python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/jiffies.h>
#include <linux/timer.h>
#include <linux/uaccess.h>
#include <linux/sched/loadavg.h>
#include <asm/auxio.h>
#define LED_MAX_LENGTH 8 /* maximum chars written to proc file */
static inline void led_toggle(void)
{
unsigned char val = get_auxio();
unsigned char on, off;
if (val & AUXIO_LED) {
on = 0;
off = AUXIO_LED;
} else {
on = AUXIO_LED;
off = 0;
}
set_auxio(on, off);
}
static struct timer_list led_blink_timer;
static unsigned long led_blink_timer_timeout;
static void led_blink(struct timer_list *unused)
{
unsigned long timeout = led_blink_timer_timeout;
led_toggle();
/* reschedule */
if (!timeout) { /* blink according to load */
led_blink_timer.expires = jiffies +
((1 + (avenrun[0] >> FSHIFT)) * HZ);
} else { /* blink at user specified interval */
led_blink_timer.expires = jiffies + (timeout * HZ);
}
add_timer(&led_blink_timer);
}
#ifdef CONFIG_PROC_FS
static int led_proc_show(struct seq_file *m, void *v)
{
if (get_auxio() & AUXIO_LED)
seq_puts(m, "on\n");
else
seq_puts(m, "off\n");
return 0;
}
static int led_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, led_proc_show, NULL);
}
static ssize_t led_proc_write(struct file *file, const char __user *buffer,
size_t count, loff_t *ppos)
{
char *buf = NULL;
if (count > LED_MAX_LENGTH)
count = LED_MAX_LENGTH;
buf = memdup_user_nul(buffer, count);
if (IS_ERR(buf))
return PTR_ERR(buf);
/* work around \n when echo'ing into proc */
if (buf[count - 1] == '\n')
buf[count - 1] = '\0';
/* before we change anything we want to stop any running timers,
* otherwise calls such as on will have no persistent effect
*/
del_timer_sync(&led_blink_timer);
if (!strcmp(buf, "on")) {
auxio_set_led(AUXIO_LED_ON);
} else if (!strcmp(buf, "toggle")) {
led_toggle();
} else if ((*buf > '0') && (*buf <= '9')) {
led_blink_timer_timeout = simple_strtoul(buf, NULL, 10);
led_blink(&led_blink_timer);
} else if (!strcmp(buf, "load")) {
led_blink_timer_timeout = 0;
led_blink(&led_blink_timer);
} else {
auxio_set_led(AUXIO_LED_OFF);
}
kfree(buf);
return count;
}
static const struct proc_ops led_proc_ops = {
.proc_open = led_proc_open,
.proc_read = seq_read,
.proc_lseek = seq_lseek,
.proc_release = single_release,
.proc_write = led_proc_write,
};
#endif
#define LED_VERSION "0.1"
static int __init led_init(void)
{
timer_setup(&led_blink_timer, led_blink, 0);
#ifdef CONFIG_PROC_FS
if (!proc_create("led", 0, NULL, &led_proc_ops))
return -ENOMEM;
#endif
printk(KERN_INFO
"led: version %s, Lars Kotthoff <[email protected]>\n",
LED_VERSION);
return 0;
}
static void __exit led_exit(void)
{
remove_proc_entry("led", NULL);
del_timer_sync(&led_blink_timer);
}
module_init(led_init);
module_exit(led_exit);
MODULE_AUTHOR("Lars Kotthoff <[email protected]>");
MODULE_DESCRIPTION("Provides control of the front LED on SPARC systems.");
MODULE_LICENSE("GPL");
MODULE_VERSION(LED_VERSION);
| linux-master | arch/sparc/kernel/led.c |
// SPDX-License-Identifier: GPL-2.0
/* hvapi.c: Hypervisor API management.
*
* Copyright (C) 2007 David S. Miller <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/init.h>
#include <asm/hypervisor.h>
#include <asm/oplib.h>
/* If the hypervisor indicates that the API setting
* calls are unsupported, by returning HV_EBADTRAP or
* HV_ENOTSUPPORTED, we assume that API groups with the
* PRE_API flag set are major 1 minor 0.
*/
struct api_info {
unsigned long group;
unsigned long major;
unsigned long minor;
unsigned int refcnt;
unsigned int flags;
#define FLAG_PRE_API 0x00000001
};
static struct api_info api_table[] = {
{ .group = HV_GRP_SUN4V, .flags = FLAG_PRE_API },
{ .group = HV_GRP_CORE, .flags = FLAG_PRE_API },
{ .group = HV_GRP_INTR, },
{ .group = HV_GRP_SOFT_STATE, },
{ .group = HV_GRP_TM, },
{ .group = HV_GRP_PCI, .flags = FLAG_PRE_API },
{ .group = HV_GRP_LDOM, },
{ .group = HV_GRP_SVC_CHAN, .flags = FLAG_PRE_API },
{ .group = HV_GRP_NCS, .flags = FLAG_PRE_API },
{ .group = HV_GRP_RNG, },
{ .group = HV_GRP_PBOOT, },
{ .group = HV_GRP_TPM, },
{ .group = HV_GRP_SDIO, },
{ .group = HV_GRP_SDIO_ERR, },
{ .group = HV_GRP_REBOOT_DATA, },
{ .group = HV_GRP_ATU, .flags = FLAG_PRE_API },
{ .group = HV_GRP_DAX, },
{ .group = HV_GRP_NIAG_PERF, .flags = FLAG_PRE_API },
{ .group = HV_GRP_FIRE_PERF, },
{ .group = HV_GRP_N2_CPU, },
{ .group = HV_GRP_NIU, },
{ .group = HV_GRP_VF_CPU, },
{ .group = HV_GRP_KT_CPU, },
{ .group = HV_GRP_VT_CPU, },
{ .group = HV_GRP_T5_CPU, },
{ .group = HV_GRP_DIAG, .flags = FLAG_PRE_API },
{ .group = HV_GRP_M7_PERF, },
};
static DEFINE_SPINLOCK(hvapi_lock);
static struct api_info *__get_info(unsigned long group)
{
int i;
for (i = 0; i < ARRAY_SIZE(api_table); i++) {
if (api_table[i].group == group)
return &api_table[i];
}
return NULL;
}
static void __get_ref(struct api_info *p)
{
p->refcnt++;
}
static void __put_ref(struct api_info *p)
{
if (--p->refcnt == 0) {
unsigned long ignore;
sun4v_set_version(p->group, 0, 0, &ignore);
p->major = p->minor = 0;
}
}
/* Register a hypervisor API specification. It indicates the
* API group and desired major+minor.
*
* If an existing API registration exists '0' (success) will
* be returned if it is compatible with the one being registered.
* Otherwise a negative error code will be returned.
*
* Otherwise an attempt will be made to negotiate the requested
* API group/major/minor with the hypervisor, and errors returned
* if that does not succeed.
*/
int sun4v_hvapi_register(unsigned long group, unsigned long major,
unsigned long *minor)
{
struct api_info *p;
unsigned long flags;
int ret;
spin_lock_irqsave(&hvapi_lock, flags);
p = __get_info(group);
ret = -EINVAL;
if (p) {
if (p->refcnt) {
ret = -EINVAL;
if (p->major == major) {
*minor = p->minor;
ret = 0;
}
} else {
unsigned long actual_minor;
unsigned long hv_ret;
hv_ret = sun4v_set_version(group, major, *minor,
&actual_minor);
ret = -EINVAL;
if (hv_ret == HV_EOK) {
*minor = actual_minor;
p->major = major;
p->minor = actual_minor;
ret = 0;
} else if (hv_ret == HV_EBADTRAP ||
hv_ret == HV_ENOTSUPPORTED) {
if (p->flags & FLAG_PRE_API) {
if (major == 1) {
p->major = 1;
p->minor = 0;
*minor = 0;
ret = 0;
}
}
}
}
if (ret == 0)
__get_ref(p);
}
spin_unlock_irqrestore(&hvapi_lock, flags);
return ret;
}
EXPORT_SYMBOL(sun4v_hvapi_register);
void sun4v_hvapi_unregister(unsigned long group)
{
struct api_info *p;
unsigned long flags;
spin_lock_irqsave(&hvapi_lock, flags);
p = __get_info(group);
if (p)
__put_ref(p);
spin_unlock_irqrestore(&hvapi_lock, flags);
}
EXPORT_SYMBOL(sun4v_hvapi_unregister);
int sun4v_hvapi_get(unsigned long group,
unsigned long *major,
unsigned long *minor)
{
struct api_info *p;
unsigned long flags;
int ret;
spin_lock_irqsave(&hvapi_lock, flags);
ret = -EINVAL;
p = __get_info(group);
if (p && p->refcnt) {
*major = p->major;
*minor = p->minor;
ret = 0;
}
spin_unlock_irqrestore(&hvapi_lock, flags);
return ret;
}
EXPORT_SYMBOL(sun4v_hvapi_get);
void __init sun4v_hvapi_init(void)
{
unsigned long group, major, minor;
group = HV_GRP_SUN4V;
major = 1;
minor = 0;
if (sun4v_hvapi_register(group, major, &minor))
goto bad;
group = HV_GRP_CORE;
major = 1;
minor = 6;
if (sun4v_hvapi_register(group, major, &minor))
goto bad;
return;
bad:
prom_printf("HVAPI: Cannot register API group "
"%lx with major(%lu) minor(%lu)\n",
group, major, minor);
prom_halt();
}
| linux-master | arch/sparc/kernel/hvapi.c |
// SPDX-License-Identifier: GPL-2.0
/*
* sbus.c: UltraSparc SBUS controller support.
*
* Copyright (C) 1999 David S. Miller ([email protected])
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/numa.h>
#include <asm/page.h>
#include <asm/io.h>
#include <asm/upa.h>
#include <asm/cache.h>
#include <asm/dma.h>
#include <asm/irq.h>
#include <asm/prom.h>
#include <asm/oplib.h>
#include <asm/starfire.h>
#include "iommu_common.h"
#define MAP_BASE ((u32)0xc0000000)
/* Offsets from iommu_regs */
#define SYSIO_IOMMUREG_BASE 0x2400UL
#define IOMMU_CONTROL (0x2400UL - 0x2400UL) /* IOMMU control register */
#define IOMMU_TSBBASE (0x2408UL - 0x2400UL) /* TSB base address register */
#define IOMMU_FLUSH (0x2410UL - 0x2400UL) /* IOMMU flush register */
#define IOMMU_VADIAG (0x4400UL - 0x2400UL) /* SBUS virtual address diagnostic */
#define IOMMU_TAGCMP (0x4408UL - 0x2400UL) /* TLB tag compare diagnostics */
#define IOMMU_LRUDIAG (0x4500UL - 0x2400UL) /* IOMMU LRU queue diagnostics */
#define IOMMU_TAGDIAG (0x4580UL - 0x2400UL) /* TLB tag diagnostics */
#define IOMMU_DRAMDIAG (0x4600UL - 0x2400UL) /* TLB data RAM diagnostics */
#define IOMMU_DRAM_VALID (1UL << 30UL)
/* Offsets from strbuf_regs */
#define SYSIO_STRBUFREG_BASE 0x2800UL
#define STRBUF_CONTROL (0x2800UL - 0x2800UL) /* Control */
#define STRBUF_PFLUSH (0x2808UL - 0x2800UL) /* Page flush/invalidate */
#define STRBUF_FSYNC (0x2810UL - 0x2800UL) /* Flush synchronization */
#define STRBUF_DRAMDIAG (0x5000UL - 0x2800UL) /* data RAM diagnostic */
#define STRBUF_ERRDIAG (0x5400UL - 0x2800UL) /* error status diagnostics */
#define STRBUF_PTAGDIAG (0x5800UL - 0x2800UL) /* Page tag diagnostics */
#define STRBUF_LTAGDIAG (0x5900UL - 0x2800UL) /* Line tag diagnostics */
#define STRBUF_TAG_VALID 0x02UL
/* Enable 64-bit DVMA mode for the given device. */
void sbus_set_sbus64(struct device *dev, int bursts)
{
struct iommu *iommu = dev->archdata.iommu;
struct platform_device *op = to_platform_device(dev);
const struct linux_prom_registers *regs;
unsigned long cfg_reg;
int slot;
u64 val;
regs = of_get_property(op->dev.of_node, "reg", NULL);
if (!regs) {
printk(KERN_ERR "sbus_set_sbus64: Cannot find regs for %pOF\n",
op->dev.of_node);
return;
}
slot = regs->which_io;
cfg_reg = iommu->write_complete_reg;
switch (slot) {
case 0:
cfg_reg += 0x20UL;
break;
case 1:
cfg_reg += 0x28UL;
break;
case 2:
cfg_reg += 0x30UL;
break;
case 3:
cfg_reg += 0x38UL;
break;
case 13:
cfg_reg += 0x40UL;
break;
case 14:
cfg_reg += 0x48UL;
break;
case 15:
cfg_reg += 0x50UL;
break;
default:
return;
}
val = upa_readq(cfg_reg);
if (val & (1UL << 14UL)) {
/* Extended transfer mode already enabled. */
return;
}
val |= (1UL << 14UL);
if (bursts & DMA_BURST8)
val |= (1UL << 1UL);
if (bursts & DMA_BURST16)
val |= (1UL << 2UL);
if (bursts & DMA_BURST32)
val |= (1UL << 3UL);
if (bursts & DMA_BURST64)
val |= (1UL << 4UL);
upa_writeq(val, cfg_reg);
}
EXPORT_SYMBOL(sbus_set_sbus64);
/* INO number to IMAP register offset for SYSIO external IRQ's.
* This should conform to both Sunfire/Wildfire server and Fusion
* desktop designs.
*/
#define SYSIO_IMAP_SLOT0 0x2c00UL
#define SYSIO_IMAP_SLOT1 0x2c08UL
#define SYSIO_IMAP_SLOT2 0x2c10UL
#define SYSIO_IMAP_SLOT3 0x2c18UL
#define SYSIO_IMAP_SCSI 0x3000UL
#define SYSIO_IMAP_ETH 0x3008UL
#define SYSIO_IMAP_BPP 0x3010UL
#define SYSIO_IMAP_AUDIO 0x3018UL
#define SYSIO_IMAP_PFAIL 0x3020UL
#define SYSIO_IMAP_KMS 0x3028UL
#define SYSIO_IMAP_FLPY 0x3030UL
#define SYSIO_IMAP_SHW 0x3038UL
#define SYSIO_IMAP_KBD 0x3040UL
#define SYSIO_IMAP_MS 0x3048UL
#define SYSIO_IMAP_SER 0x3050UL
#define SYSIO_IMAP_TIM0 0x3060UL
#define SYSIO_IMAP_TIM1 0x3068UL
#define SYSIO_IMAP_UE 0x3070UL
#define SYSIO_IMAP_CE 0x3078UL
#define SYSIO_IMAP_SBERR 0x3080UL
#define SYSIO_IMAP_PMGMT 0x3088UL
#define SYSIO_IMAP_GFX 0x3090UL
#define SYSIO_IMAP_EUPA 0x3098UL
#define bogon ((unsigned long) -1)
static unsigned long sysio_irq_offsets[] = {
/* SBUS Slot 0 --> 3, level 1 --> 7 */
SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0,
SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0,
SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1,
SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1,
SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2,
SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2,
SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3,
SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3,
/* Onboard devices (not relevant/used on SunFire). */
SYSIO_IMAP_SCSI,
SYSIO_IMAP_ETH,
SYSIO_IMAP_BPP,
bogon,
SYSIO_IMAP_AUDIO,
SYSIO_IMAP_PFAIL,
bogon,
bogon,
SYSIO_IMAP_KMS,
SYSIO_IMAP_FLPY,
SYSIO_IMAP_SHW,
SYSIO_IMAP_KBD,
SYSIO_IMAP_MS,
SYSIO_IMAP_SER,
bogon,
bogon,
SYSIO_IMAP_TIM0,
SYSIO_IMAP_TIM1,
bogon,
bogon,
SYSIO_IMAP_UE,
SYSIO_IMAP_CE,
SYSIO_IMAP_SBERR,
SYSIO_IMAP_PMGMT,
};
#undef bogon
#define NUM_SYSIO_OFFSETS ARRAY_SIZE(sysio_irq_offsets)
/* Convert Interrupt Mapping register pointer to associated
* Interrupt Clear register pointer, SYSIO specific version.
*/
#define SYSIO_ICLR_UNUSED0 0x3400UL
#define SYSIO_ICLR_SLOT0 0x3408UL
#define SYSIO_ICLR_SLOT1 0x3448UL
#define SYSIO_ICLR_SLOT2 0x3488UL
#define SYSIO_ICLR_SLOT3 0x34c8UL
static unsigned long sysio_imap_to_iclr(unsigned long imap)
{
unsigned long diff = SYSIO_ICLR_UNUSED0 - SYSIO_IMAP_SLOT0;
return imap + diff;
}
static unsigned int sbus_build_irq(struct platform_device *op, unsigned int ino)
{
struct iommu *iommu = op->dev.archdata.iommu;
unsigned long reg_base = iommu->write_complete_reg - 0x2000UL;
unsigned long imap, iclr;
int sbus_level = 0;
imap = sysio_irq_offsets[ino];
if (imap == ((unsigned long)-1)) {
prom_printf("get_irq_translations: Bad SYSIO INO[%x]\n",
ino);
prom_halt();
}
imap += reg_base;
/* SYSIO inconsistency. For external SLOTS, we have to select
* the right ICLR register based upon the lower SBUS irq level
* bits.
*/
if (ino >= 0x20) {
iclr = sysio_imap_to_iclr(imap);
} else {
int sbus_slot = (ino & 0x18)>>3;
sbus_level = ino & 0x7;
switch(sbus_slot) {
case 0:
iclr = reg_base + SYSIO_ICLR_SLOT0;
break;
case 1:
iclr = reg_base + SYSIO_ICLR_SLOT1;
break;
case 2:
iclr = reg_base + SYSIO_ICLR_SLOT2;
break;
default:
case 3:
iclr = reg_base + SYSIO_ICLR_SLOT3;
break;
}
iclr += ((unsigned long)sbus_level - 1UL) * 8UL;
}
return build_irq(sbus_level, iclr, imap);
}
/* Error interrupt handling. */
#define SYSIO_UE_AFSR 0x0030UL
#define SYSIO_UE_AFAR 0x0038UL
#define SYSIO_UEAFSR_PPIO 0x8000000000000000UL /* Primary PIO cause */
#define SYSIO_UEAFSR_PDRD 0x4000000000000000UL /* Primary DVMA read cause */
#define SYSIO_UEAFSR_PDWR 0x2000000000000000UL /* Primary DVMA write cause */
#define SYSIO_UEAFSR_SPIO 0x1000000000000000UL /* Secondary PIO is cause */
#define SYSIO_UEAFSR_SDRD 0x0800000000000000UL /* Secondary DVMA read cause */
#define SYSIO_UEAFSR_SDWR 0x0400000000000000UL /* Secondary DVMA write cause*/
#define SYSIO_UEAFSR_RESV1 0x03ff000000000000UL /* Reserved */
#define SYSIO_UEAFSR_DOFF 0x0000e00000000000UL /* Doubleword Offset */
#define SYSIO_UEAFSR_SIZE 0x00001c0000000000UL /* Bad transfer size 2^SIZE */
#define SYSIO_UEAFSR_MID 0x000003e000000000UL /* UPA MID causing the fault */
#define SYSIO_UEAFSR_RESV2 0x0000001fffffffffUL /* Reserved */
static irqreturn_t sysio_ue_handler(int irq, void *dev_id)
{
struct platform_device *op = dev_id;
struct iommu *iommu = op->dev.archdata.iommu;
unsigned long reg_base = iommu->write_complete_reg - 0x2000UL;
unsigned long afsr_reg, afar_reg;
unsigned long afsr, afar, error_bits;
int reported, portid;
afsr_reg = reg_base + SYSIO_UE_AFSR;
afar_reg = reg_base + SYSIO_UE_AFAR;
/* Latch error status. */
afsr = upa_readq(afsr_reg);
afar = upa_readq(afar_reg);
/* Clear primary/secondary error status bits. */
error_bits = afsr &
(SYSIO_UEAFSR_PPIO | SYSIO_UEAFSR_PDRD | SYSIO_UEAFSR_PDWR |
SYSIO_UEAFSR_SPIO | SYSIO_UEAFSR_SDRD | SYSIO_UEAFSR_SDWR);
upa_writeq(error_bits, afsr_reg);
portid = of_getintprop_default(op->dev.of_node, "portid", -1);
/* Log the error. */
printk("SYSIO[%x]: Uncorrectable ECC Error, primary error type[%s]\n",
portid,
(((error_bits & SYSIO_UEAFSR_PPIO) ?
"PIO" :
((error_bits & SYSIO_UEAFSR_PDRD) ?
"DVMA Read" :
((error_bits & SYSIO_UEAFSR_PDWR) ?
"DVMA Write" : "???")))));
printk("SYSIO[%x]: DOFF[%lx] SIZE[%lx] MID[%lx]\n",
portid,
(afsr & SYSIO_UEAFSR_DOFF) >> 45UL,
(afsr & SYSIO_UEAFSR_SIZE) >> 42UL,
(afsr & SYSIO_UEAFSR_MID) >> 37UL);
printk("SYSIO[%x]: AFAR[%016lx]\n", portid, afar);
printk("SYSIO[%x]: Secondary UE errors [", portid);
reported = 0;
if (afsr & SYSIO_UEAFSR_SPIO) {
reported++;
printk("(PIO)");
}
if (afsr & SYSIO_UEAFSR_SDRD) {
reported++;
printk("(DVMA Read)");
}
if (afsr & SYSIO_UEAFSR_SDWR) {
reported++;
printk("(DVMA Write)");
}
if (!reported)
printk("(none)");
printk("]\n");
return IRQ_HANDLED;
}
#define SYSIO_CE_AFSR 0x0040UL
#define SYSIO_CE_AFAR 0x0048UL
#define SYSIO_CEAFSR_PPIO 0x8000000000000000UL /* Primary PIO cause */
#define SYSIO_CEAFSR_PDRD 0x4000000000000000UL /* Primary DVMA read cause */
#define SYSIO_CEAFSR_PDWR 0x2000000000000000UL /* Primary DVMA write cause */
#define SYSIO_CEAFSR_SPIO 0x1000000000000000UL /* Secondary PIO cause */
#define SYSIO_CEAFSR_SDRD 0x0800000000000000UL /* Secondary DVMA read cause */
#define SYSIO_CEAFSR_SDWR 0x0400000000000000UL /* Secondary DVMA write cause*/
#define SYSIO_CEAFSR_RESV1 0x0300000000000000UL /* Reserved */
#define SYSIO_CEAFSR_ESYND 0x00ff000000000000UL /* Syndrome Bits */
#define SYSIO_CEAFSR_DOFF 0x0000e00000000000UL /* Double Offset */
#define SYSIO_CEAFSR_SIZE 0x00001c0000000000UL /* Bad transfer size 2^SIZE */
#define SYSIO_CEAFSR_MID 0x000003e000000000UL /* UPA MID causing the fault */
#define SYSIO_CEAFSR_RESV2 0x0000001fffffffffUL /* Reserved */
static irqreturn_t sysio_ce_handler(int irq, void *dev_id)
{
struct platform_device *op = dev_id;
struct iommu *iommu = op->dev.archdata.iommu;
unsigned long reg_base = iommu->write_complete_reg - 0x2000UL;
unsigned long afsr_reg, afar_reg;
unsigned long afsr, afar, error_bits;
int reported, portid;
afsr_reg = reg_base + SYSIO_CE_AFSR;
afar_reg = reg_base + SYSIO_CE_AFAR;
/* Latch error status. */
afsr = upa_readq(afsr_reg);
afar = upa_readq(afar_reg);
/* Clear primary/secondary error status bits. */
error_bits = afsr &
(SYSIO_CEAFSR_PPIO | SYSIO_CEAFSR_PDRD | SYSIO_CEAFSR_PDWR |
SYSIO_CEAFSR_SPIO | SYSIO_CEAFSR_SDRD | SYSIO_CEAFSR_SDWR);
upa_writeq(error_bits, afsr_reg);
portid = of_getintprop_default(op->dev.of_node, "portid", -1);
printk("SYSIO[%x]: Correctable ECC Error, primary error type[%s]\n",
portid,
(((error_bits & SYSIO_CEAFSR_PPIO) ?
"PIO" :
((error_bits & SYSIO_CEAFSR_PDRD) ?
"DVMA Read" :
((error_bits & SYSIO_CEAFSR_PDWR) ?
"DVMA Write" : "???")))));
/* XXX Use syndrome and afar to print out module string just like
* XXX UDB CE trap handler does... -DaveM
*/
printk("SYSIO[%x]: DOFF[%lx] ECC Syndrome[%lx] Size[%lx] MID[%lx]\n",
portid,
(afsr & SYSIO_CEAFSR_DOFF) >> 45UL,
(afsr & SYSIO_CEAFSR_ESYND) >> 48UL,
(afsr & SYSIO_CEAFSR_SIZE) >> 42UL,
(afsr & SYSIO_CEAFSR_MID) >> 37UL);
printk("SYSIO[%x]: AFAR[%016lx]\n", portid, afar);
printk("SYSIO[%x]: Secondary CE errors [", portid);
reported = 0;
if (afsr & SYSIO_CEAFSR_SPIO) {
reported++;
printk("(PIO)");
}
if (afsr & SYSIO_CEAFSR_SDRD) {
reported++;
printk("(DVMA Read)");
}
if (afsr & SYSIO_CEAFSR_SDWR) {
reported++;
printk("(DVMA Write)");
}
if (!reported)
printk("(none)");
printk("]\n");
return IRQ_HANDLED;
}
#define SYSIO_SBUS_AFSR 0x2010UL
#define SYSIO_SBUS_AFAR 0x2018UL
#define SYSIO_SBAFSR_PLE 0x8000000000000000UL /* Primary Late PIO Error */
#define SYSIO_SBAFSR_PTO 0x4000000000000000UL /* Primary SBUS Timeout */
#define SYSIO_SBAFSR_PBERR 0x2000000000000000UL /* Primary SBUS Error ACK */
#define SYSIO_SBAFSR_SLE 0x1000000000000000UL /* Secondary Late PIO Error */
#define SYSIO_SBAFSR_STO 0x0800000000000000UL /* Secondary SBUS Timeout */
#define SYSIO_SBAFSR_SBERR 0x0400000000000000UL /* Secondary SBUS Error ACK */
#define SYSIO_SBAFSR_RESV1 0x03ff000000000000UL /* Reserved */
#define SYSIO_SBAFSR_RD 0x0000800000000000UL /* Primary was late PIO read */
#define SYSIO_SBAFSR_RESV2 0x0000600000000000UL /* Reserved */
#define SYSIO_SBAFSR_SIZE 0x00001c0000000000UL /* Size of transfer */
#define SYSIO_SBAFSR_MID 0x000003e000000000UL /* MID causing the error */
#define SYSIO_SBAFSR_RESV3 0x0000001fffffffffUL /* Reserved */
static irqreturn_t sysio_sbus_error_handler(int irq, void *dev_id)
{
struct platform_device *op = dev_id;
struct iommu *iommu = op->dev.archdata.iommu;
unsigned long afsr_reg, afar_reg, reg_base;
unsigned long afsr, afar, error_bits;
int reported, portid;
reg_base = iommu->write_complete_reg - 0x2000UL;
afsr_reg = reg_base + SYSIO_SBUS_AFSR;
afar_reg = reg_base + SYSIO_SBUS_AFAR;
afsr = upa_readq(afsr_reg);
afar = upa_readq(afar_reg);
/* Clear primary/secondary error status bits. */
error_bits = afsr &
(SYSIO_SBAFSR_PLE | SYSIO_SBAFSR_PTO | SYSIO_SBAFSR_PBERR |
SYSIO_SBAFSR_SLE | SYSIO_SBAFSR_STO | SYSIO_SBAFSR_SBERR);
upa_writeq(error_bits, afsr_reg);
portid = of_getintprop_default(op->dev.of_node, "portid", -1);
/* Log the error. */
printk("SYSIO[%x]: SBUS Error, primary error type[%s] read(%d)\n",
portid,
(((error_bits & SYSIO_SBAFSR_PLE) ?
"Late PIO Error" :
((error_bits & SYSIO_SBAFSR_PTO) ?
"Time Out" :
((error_bits & SYSIO_SBAFSR_PBERR) ?
"Error Ack" : "???")))),
(afsr & SYSIO_SBAFSR_RD) ? 1 : 0);
printk("SYSIO[%x]: size[%lx] MID[%lx]\n",
portid,
(afsr & SYSIO_SBAFSR_SIZE) >> 42UL,
(afsr & SYSIO_SBAFSR_MID) >> 37UL);
printk("SYSIO[%x]: AFAR[%016lx]\n", portid, afar);
printk("SYSIO[%x]: Secondary SBUS errors [", portid);
reported = 0;
if (afsr & SYSIO_SBAFSR_SLE) {
reported++;
printk("(Late PIO Error)");
}
if (afsr & SYSIO_SBAFSR_STO) {
reported++;
printk("(Time Out)");
}
if (afsr & SYSIO_SBAFSR_SBERR) {
reported++;
printk("(Error Ack)");
}
if (!reported)
printk("(none)");
printk("]\n");
/* XXX check iommu/strbuf for further error status XXX */
return IRQ_HANDLED;
}
#define ECC_CONTROL 0x0020UL
#define SYSIO_ECNTRL_ECCEN 0x8000000000000000UL /* Enable ECC Checking */
#define SYSIO_ECNTRL_UEEN 0x4000000000000000UL /* Enable UE Interrupts */
#define SYSIO_ECNTRL_CEEN 0x2000000000000000UL /* Enable CE Interrupts */
#define SYSIO_UE_INO 0x34
#define SYSIO_CE_INO 0x35
#define SYSIO_SBUSERR_INO 0x36
static void __init sysio_register_error_handlers(struct platform_device *op)
{
struct iommu *iommu = op->dev.archdata.iommu;
unsigned long reg_base = iommu->write_complete_reg - 0x2000UL;
unsigned int irq;
u64 control;
int portid;
portid = of_getintprop_default(op->dev.of_node, "portid", -1);
irq = sbus_build_irq(op, SYSIO_UE_INO);
if (request_irq(irq, sysio_ue_handler, 0,
"SYSIO_UE", op) < 0) {
prom_printf("SYSIO[%x]: Cannot register UE interrupt.\n",
portid);
prom_halt();
}
irq = sbus_build_irq(op, SYSIO_CE_INO);
if (request_irq(irq, sysio_ce_handler, 0,
"SYSIO_CE", op) < 0) {
prom_printf("SYSIO[%x]: Cannot register CE interrupt.\n",
portid);
prom_halt();
}
irq = sbus_build_irq(op, SYSIO_SBUSERR_INO);
if (request_irq(irq, sysio_sbus_error_handler, 0,
"SYSIO_SBERR", op) < 0) {
prom_printf("SYSIO[%x]: Cannot register SBUS Error interrupt.\n",
portid);
prom_halt();
}
/* Now turn the error interrupts on and also enable ECC checking. */
upa_writeq((SYSIO_ECNTRL_ECCEN |
SYSIO_ECNTRL_UEEN |
SYSIO_ECNTRL_CEEN),
reg_base + ECC_CONTROL);
control = upa_readq(iommu->write_complete_reg);
control |= 0x100UL; /* SBUS Error Interrupt Enable */
upa_writeq(control, iommu->write_complete_reg);
}
/* Boot time initialization. */
static void __init sbus_iommu_init(struct platform_device *op)
{
const struct linux_prom64_registers *pr;
struct device_node *dp = op->dev.of_node;
struct iommu *iommu;
struct strbuf *strbuf;
unsigned long regs, reg_base;
int i, portid;
u64 control;
pr = of_get_property(dp, "reg", NULL);
if (!pr) {
prom_printf("sbus_iommu_init: Cannot map SYSIO "
"control registers.\n");
prom_halt();
}
regs = pr->phys_addr;
iommu = kzalloc(sizeof(*iommu), GFP_ATOMIC);
strbuf = kzalloc(sizeof(*strbuf), GFP_ATOMIC);
if (!iommu || !strbuf)
goto fatal_memory_error;
op->dev.archdata.iommu = iommu;
op->dev.archdata.stc = strbuf;
op->dev.archdata.numa_node = NUMA_NO_NODE;
reg_base = regs + SYSIO_IOMMUREG_BASE;
iommu->iommu_control = reg_base + IOMMU_CONTROL;
iommu->iommu_tsbbase = reg_base + IOMMU_TSBBASE;
iommu->iommu_flush = reg_base + IOMMU_FLUSH;
iommu->iommu_tags = iommu->iommu_control +
(IOMMU_TAGDIAG - IOMMU_CONTROL);
reg_base = regs + SYSIO_STRBUFREG_BASE;
strbuf->strbuf_control = reg_base + STRBUF_CONTROL;
strbuf->strbuf_pflush = reg_base + STRBUF_PFLUSH;
strbuf->strbuf_fsync = reg_base + STRBUF_FSYNC;
strbuf->strbuf_enabled = 1;
strbuf->strbuf_flushflag = (volatile unsigned long *)
((((unsigned long)&strbuf->__flushflag_buf[0])
+ 63UL)
& ~63UL);
strbuf->strbuf_flushflag_pa = (unsigned long)
__pa(strbuf->strbuf_flushflag);
/* The SYSIO SBUS control register is used for dummy reads
* in order to ensure write completion.
*/
iommu->write_complete_reg = regs + 0x2000UL;
portid = of_getintprop_default(op->dev.of_node, "portid", -1);
printk(KERN_INFO "SYSIO: UPA portID %x, at %016lx\n",
portid, regs);
/* Setup for TSB_SIZE=7, TBW_SIZE=0, MMU_DE=1, MMU_EN=1 */
if (iommu_table_init(iommu, IO_TSB_SIZE, MAP_BASE, 0xffffffff, -1))
goto fatal_memory_error;
control = upa_readq(iommu->iommu_control);
control = ((7UL << 16UL) |
(0UL << 2UL) |
(1UL << 1UL) |
(1UL << 0UL));
upa_writeq(control, iommu->iommu_control);
/* Clean out any cruft in the IOMMU using
* diagnostic accesses.
*/
for (i = 0; i < 16; i++) {
unsigned long dram, tag;
dram = iommu->iommu_control + (IOMMU_DRAMDIAG - IOMMU_CONTROL);
tag = iommu->iommu_control + (IOMMU_TAGDIAG - IOMMU_CONTROL);
dram += (unsigned long)i * 8UL;
tag += (unsigned long)i * 8UL;
upa_writeq(0, dram);
upa_writeq(0, tag);
}
upa_readq(iommu->write_complete_reg);
/* Give the TSB to SYSIO. */
upa_writeq(__pa(iommu->page_table), iommu->iommu_tsbbase);
/* Setup streaming buffer, DE=1 SB_EN=1 */
control = (1UL << 1UL) | (1UL << 0UL);
upa_writeq(control, strbuf->strbuf_control);
/* Clear out the tags using diagnostics. */
for (i = 0; i < 16; i++) {
unsigned long ptag, ltag;
ptag = strbuf->strbuf_control +
(STRBUF_PTAGDIAG - STRBUF_CONTROL);
ltag = strbuf->strbuf_control +
(STRBUF_LTAGDIAG - STRBUF_CONTROL);
ptag += (unsigned long)i * 8UL;
ltag += (unsigned long)i * 8UL;
upa_writeq(0UL, ptag);
upa_writeq(0UL, ltag);
}
/* Enable DVMA arbitration for all devices/slots. */
control = upa_readq(iommu->write_complete_reg);
control |= 0x3fUL;
upa_writeq(control, iommu->write_complete_reg);
/* Now some Xfire specific grot... */
if (this_is_starfire)
starfire_hookup(portid);
sysio_register_error_handlers(op);
return;
fatal_memory_error:
kfree(iommu);
kfree(strbuf);
prom_printf("sbus_iommu_init: Fatal memory allocation error.\n");
}
static int __init sbus_init(void)
{
struct device_node *dp;
for_each_node_by_name(dp, "sbus") {
struct platform_device *op = of_find_device_by_node(dp);
sbus_iommu_init(op);
of_propagate_archdata(op);
}
return 0;
}
subsys_initcall(sbus_init);
| linux-master | arch/sparc/kernel/sbus.c |
// SPDX-License-Identifier: GPL-2.0
/* auxio.c: Probing for the Sparc AUXIO register at boot time.
*
* Copyright (C) 1996 David S. Miller ([email protected])
*
* Refactoring for unified NCR/PCIO support 2002 Eric Brower ([email protected])
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <asm/prom.h>
#include <asm/io.h>
#include <asm/auxio.h>
void __iomem *auxio_register = NULL;
EXPORT_SYMBOL(auxio_register);
enum auxio_type {
AUXIO_TYPE_NODEV,
AUXIO_TYPE_SBUS,
AUXIO_TYPE_EBUS
};
static enum auxio_type auxio_devtype = AUXIO_TYPE_NODEV;
static DEFINE_SPINLOCK(auxio_lock);
static void __auxio_rmw(u8 bits_on, u8 bits_off, int ebus)
{
if (auxio_register) {
unsigned long flags;
u8 regval, newval;
spin_lock_irqsave(&auxio_lock, flags);
regval = (ebus ?
(u8) readl(auxio_register) :
sbus_readb(auxio_register));
newval = regval | bits_on;
newval &= ~bits_off;
if (!ebus)
newval &= ~AUXIO_AUX1_MASK;
if (ebus)
writel((u32) newval, auxio_register);
else
sbus_writeb(newval, auxio_register);
spin_unlock_irqrestore(&auxio_lock, flags);
}
}
static void __auxio_set_bit(u8 bit, int on, int ebus)
{
u8 bits_on = (ebus ? AUXIO_PCIO_LED : AUXIO_AUX1_LED);
u8 bits_off = 0;
if (!on) {
u8 tmp = bits_off;
bits_off = bits_on;
bits_on = tmp;
}
__auxio_rmw(bits_on, bits_off, ebus);
}
void auxio_set_led(int on)
{
int ebus = auxio_devtype == AUXIO_TYPE_EBUS;
u8 bit;
bit = (ebus ? AUXIO_PCIO_LED : AUXIO_AUX1_LED);
__auxio_set_bit(bit, on, ebus);
}
EXPORT_SYMBOL(auxio_set_led);
static void __auxio_sbus_set_lte(int on)
{
__auxio_set_bit(AUXIO_AUX1_LTE, on, 0);
}
void auxio_set_lte(int on)
{
switch(auxio_devtype) {
case AUXIO_TYPE_SBUS:
__auxio_sbus_set_lte(on);
break;
case AUXIO_TYPE_EBUS:
default:
break;
}
}
EXPORT_SYMBOL(auxio_set_lte);
static const struct of_device_id auxio_match[] = {
{
.name = "auxio",
},
{},
};
MODULE_DEVICE_TABLE(of, auxio_match);
static int auxio_probe(struct platform_device *dev)
{
struct device_node *dp = dev->dev.of_node;
unsigned long size;
if (of_node_name_eq(dp->parent, "ebus")) {
auxio_devtype = AUXIO_TYPE_EBUS;
size = sizeof(u32);
} else if (of_node_name_eq(dp->parent, "sbus")) {
auxio_devtype = AUXIO_TYPE_SBUS;
size = 1;
} else {
printk("auxio: Unknown parent bus type [%pOFn]\n",
dp->parent);
return -ENODEV;
}
auxio_register = of_ioremap(&dev->resource[0], 0, size, "auxio");
if (!auxio_register)
return -ENODEV;
printk(KERN_INFO "AUXIO: Found device at %pOF\n", dp);
if (auxio_devtype == AUXIO_TYPE_EBUS)
auxio_set_led(AUXIO_LED_ON);
return 0;
}
static struct platform_driver auxio_driver = {
.probe = auxio_probe,
.driver = {
.name = "auxio",
.of_match_table = auxio_match,
},
};
static int __init auxio_init(void)
{
return platform_driver_register(&auxio_driver);
}
/* Must be after subsys_initcall() so that busses are probed. Must
* be before device_initcall() because things like the floppy driver
* need to use the AUXIO register.
*/
fs_initcall(auxio_init);
| linux-master | arch/sparc/kernel/auxio_64.c |
// SPDX-License-Identifier: GPL-2.0
/* kgdb.c: KGDB support for 32-bit sparc.
*
* Copyright (C) 2008 David S. Miller <[email protected]>
*/
#include <linux/kgdb.h>
#include <linux/kdebug.h>
#include <linux/sched.h>
#include <asm/kdebug.h>
#include <asm/ptrace.h>
#include <asm/irq.h>
#include <asm/cacheflush.h>
#include "kernel.h"
#include "entry.h"
void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
{
struct reg_window32 *win;
int i;
gdb_regs[GDB_G0] = 0;
for (i = 0; i < 15; i++)
gdb_regs[GDB_G1 + i] = regs->u_regs[UREG_G1 + i];
win = (struct reg_window32 *) regs->u_regs[UREG_FP];
for (i = 0; i < 8; i++)
gdb_regs[GDB_L0 + i] = win->locals[i];
for (i = 0; i < 8; i++)
gdb_regs[GDB_I0 + i] = win->ins[i];
for (i = GDB_F0; i <= GDB_F31; i++)
gdb_regs[i] = 0;
gdb_regs[GDB_Y] = regs->y;
gdb_regs[GDB_PSR] = regs->psr;
gdb_regs[GDB_WIM] = 0;
gdb_regs[GDB_TBR] = (unsigned long) &trapbase;
gdb_regs[GDB_PC] = regs->pc;
gdb_regs[GDB_NPC] = regs->npc;
gdb_regs[GDB_FSR] = 0;
gdb_regs[GDB_CSR] = 0;
}
void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
{
struct thread_info *t = task_thread_info(p);
struct reg_window32 *win;
int i;
for (i = GDB_G0; i < GDB_G6; i++)
gdb_regs[i] = 0;
gdb_regs[GDB_G6] = (unsigned long) t;
gdb_regs[GDB_G7] = 0;
for (i = GDB_O0; i < GDB_SP; i++)
gdb_regs[i] = 0;
gdb_regs[GDB_SP] = t->ksp;
gdb_regs[GDB_O7] = 0;
win = (struct reg_window32 *) t->ksp;
for (i = 0; i < 8; i++)
gdb_regs[GDB_L0 + i] = win->locals[i];
for (i = 0; i < 8; i++)
gdb_regs[GDB_I0 + i] = win->ins[i];
for (i = GDB_F0; i <= GDB_F31; i++)
gdb_regs[i] = 0;
gdb_regs[GDB_Y] = 0;
gdb_regs[GDB_PSR] = t->kpsr;
gdb_regs[GDB_WIM] = t->kwim;
gdb_regs[GDB_TBR] = (unsigned long) &trapbase;
gdb_regs[GDB_PC] = t->kpc;
gdb_regs[GDB_NPC] = t->kpc + 4;
gdb_regs[GDB_FSR] = 0;
gdb_regs[GDB_CSR] = 0;
}
void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
{
struct reg_window32 *win;
int i;
for (i = 0; i < 15; i++)
regs->u_regs[UREG_G1 + i] = gdb_regs[GDB_G1 + i];
/* If the PSR register is changing, we have to preserve
* the CWP field, otherwise window save/restore explodes.
*/
if (regs->psr != gdb_regs[GDB_PSR]) {
unsigned long cwp = regs->psr & PSR_CWP;
regs->psr = (gdb_regs[GDB_PSR] & ~PSR_CWP) | cwp;
}
regs->pc = gdb_regs[GDB_PC];
regs->npc = gdb_regs[GDB_NPC];
regs->y = gdb_regs[GDB_Y];
win = (struct reg_window32 *) regs->u_regs[UREG_FP];
for (i = 0; i < 8; i++)
win->locals[i] = gdb_regs[GDB_L0 + i];
for (i = 0; i < 8; i++)
win->ins[i] = gdb_regs[GDB_I0 + i];
}
int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
char *remcomInBuffer, char *remcomOutBuffer,
struct pt_regs *linux_regs)
{
unsigned long addr;
char *ptr;
switch (remcomInBuffer[0]) {
case 'c':
/* try to read optional parameter, pc unchanged if no parm */
ptr = &remcomInBuffer[1];
if (kgdb_hex2long(&ptr, &addr)) {
linux_regs->pc = addr;
linux_regs->npc = addr + 4;
}
fallthrough;
case 'D':
case 'k':
if (linux_regs->pc == (unsigned long) arch_kgdb_breakpoint) {
linux_regs->pc = linux_regs->npc;
linux_regs->npc += 4;
}
return 0;
}
return -1;
}
asmlinkage void kgdb_trap(unsigned long trap_level, struct pt_regs *regs)
{
unsigned long flags;
if (user_mode(regs)) {
do_hw_interrupt(regs, trap_level);
return;
}
flushw_all();
local_irq_save(flags);
kgdb_handle_exception(trap_level, SIGTRAP, 0, regs);
local_irq_restore(flags);
}
int kgdb_arch_init(void)
{
return 0;
}
void kgdb_arch_exit(void)
{
}
void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
{
regs->pc = ip;
regs->npc = regs->pc + 4;
}
const struct kgdb_arch arch_kgdb_ops = {
/* Breakpoint instruction: ta 0x7d */
.gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x7d },
};
| linux-master | arch/sparc/kernel/kgdb_32.c |
// SPDX-License-Identifier: GPL-2.0
/* pmc - Driver implementation for power management functions
* of Power Management Controller (PMC) on SPARCstation-Voyager.
*
* Copyright (c) 2002 Eric Brower ([email protected])
*/
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/pm.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/module.h>
#include <asm/io.h>
#include <asm/oplib.h>
#include <linux/uaccess.h>
#include <asm/auxio.h>
#include <asm/processor.h>
/* Debug
*
* #define PMC_DEBUG_LED
* #define PMC_NO_IDLE
*/
#define PMC_OBPNAME "SUNW,pmc"
#define PMC_DEVNAME "pmc"
#define PMC_IDLE_REG 0x00
#define PMC_IDLE_ON 0x01
static u8 __iomem *regs;
#define pmc_readb(offs) (sbus_readb(regs+offs))
#define pmc_writeb(val, offs) (sbus_writeb(val, regs+offs))
/*
* CPU idle callback function
* See .../arch/sparc/kernel/process.c
*/
static void pmc_swift_idle(void)
{
#ifdef PMC_DEBUG_LED
set_auxio(0x00, AUXIO_LED);
#endif
pmc_writeb(pmc_readb(PMC_IDLE_REG) | PMC_IDLE_ON, PMC_IDLE_REG);
#ifdef PMC_DEBUG_LED
set_auxio(AUXIO_LED, 0x00);
#endif
}
static int pmc_probe(struct platform_device *op)
{
regs = of_ioremap(&op->resource[0], 0,
resource_size(&op->resource[0]), PMC_OBPNAME);
if (!regs) {
printk(KERN_ERR "%s: unable to map registers\n", PMC_DEVNAME);
return -ENODEV;
}
#ifndef PMC_NO_IDLE
/* Assign power management IDLE handler */
sparc_idle = pmc_swift_idle;
#endif
printk(KERN_INFO "%s: power management initialized\n", PMC_DEVNAME);
return 0;
}
static const struct of_device_id pmc_match[] = {
{
.name = PMC_OBPNAME,
},
{},
};
MODULE_DEVICE_TABLE(of, pmc_match);
static struct platform_driver pmc_driver = {
.driver = {
.name = "pmc",
.of_match_table = pmc_match,
},
.probe = pmc_probe,
};
static int __init pmc_init(void)
{
return platform_driver_register(&pmc_driver);
}
/* This driver is not critical to the boot process
* and is easiest to ioremap when SBus is already
* initialized, so we install ourselves thusly:
*/
__initcall(pmc_init);
| linux-master | arch/sparc/kernel/pmc.c |
// SPDX-License-Identifier: GPL-2.0
/* psycho_common.c: Code common to PSYCHO and derivative PCI controllers.
*
* Copyright (C) 2008 David S. Miller <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/numa.h>
#include <linux/platform_device.h>
#include <asm/upa.h>
#include "pci_impl.h"
#include "iommu_common.h"
#include "psycho_common.h"
#define PSYCHO_STRBUF_CTRL_DENAB 0x0000000000000002ULL
#define PSYCHO_STCERR_WRITE 0x0000000000000002ULL
#define PSYCHO_STCERR_READ 0x0000000000000001ULL
#define PSYCHO_STCTAG_PPN 0x0fffffff00000000ULL
#define PSYCHO_STCTAG_VPN 0x00000000ffffe000ULL
#define PSYCHO_STCTAG_VALID 0x0000000000000002ULL
#define PSYCHO_STCTAG_WRITE 0x0000000000000001ULL
#define PSYCHO_STCLINE_LINDX 0x0000000001e00000ULL
#define PSYCHO_STCLINE_SPTR 0x00000000001f8000ULL
#define PSYCHO_STCLINE_LADDR 0x0000000000007f00ULL
#define PSYCHO_STCLINE_EPTR 0x00000000000000fcULL
#define PSYCHO_STCLINE_VALID 0x0000000000000002ULL
#define PSYCHO_STCLINE_FOFN 0x0000000000000001ULL
static DEFINE_SPINLOCK(stc_buf_lock);
static unsigned long stc_error_buf[128];
static unsigned long stc_tag_buf[16];
static unsigned long stc_line_buf[16];
static void psycho_check_stc_error(struct pci_pbm_info *pbm)
{
unsigned long err_base, tag_base, line_base;
struct strbuf *strbuf = &pbm->stc;
u64 control;
int i;
if (!strbuf->strbuf_control)
return;
err_base = strbuf->strbuf_err_stat;
tag_base = strbuf->strbuf_tag_diag;
line_base = strbuf->strbuf_line_diag;
spin_lock(&stc_buf_lock);
/* This is __REALLY__ dangerous. When we put the streaming
* buffer into diagnostic mode to probe it's tags and error
* status, we _must_ clear all of the line tag valid bits
* before re-enabling the streaming buffer. If any dirty data
* lives in the STC when we do this, we will end up
* invalidating it before it has a chance to reach main
* memory.
*/
control = upa_readq(strbuf->strbuf_control);
upa_writeq(control | PSYCHO_STRBUF_CTRL_DENAB, strbuf->strbuf_control);
for (i = 0; i < 128; i++) {
u64 val;
val = upa_readq(err_base + (i * 8UL));
upa_writeq(0UL, err_base + (i * 8UL));
stc_error_buf[i] = val;
}
for (i = 0; i < 16; i++) {
stc_tag_buf[i] = upa_readq(tag_base + (i * 8UL));
stc_line_buf[i] = upa_readq(line_base + (i * 8UL));
upa_writeq(0UL, tag_base + (i * 8UL));
upa_writeq(0UL, line_base + (i * 8UL));
}
/* OK, state is logged, exit diagnostic mode. */
upa_writeq(control, strbuf->strbuf_control);
for (i = 0; i < 16; i++) {
int j, saw_error, first, last;
saw_error = 0;
first = i * 8;
last = first + 8;
for (j = first; j < last; j++) {
u64 errval = stc_error_buf[j];
if (errval != 0) {
saw_error++;
printk(KERN_ERR "%s: STC_ERR(%d)[wr(%d)"
"rd(%d)]\n",
pbm->name,
j,
(errval & PSYCHO_STCERR_WRITE) ? 1 : 0,
(errval & PSYCHO_STCERR_READ) ? 1 : 0);
}
}
if (saw_error != 0) {
u64 tagval = stc_tag_buf[i];
u64 lineval = stc_line_buf[i];
printk(KERN_ERR "%s: STC_TAG(%d)[PA(%016llx)VA(%08llx)"
"V(%d)W(%d)]\n",
pbm->name,
i,
((tagval & PSYCHO_STCTAG_PPN) >> 19UL),
(tagval & PSYCHO_STCTAG_VPN),
((tagval & PSYCHO_STCTAG_VALID) ? 1 : 0),
((tagval & PSYCHO_STCTAG_WRITE) ? 1 : 0));
printk(KERN_ERR "%s: STC_LINE(%d)[LIDX(%llx)SP(%llx)"
"LADDR(%llx)EP(%llx)V(%d)FOFN(%d)]\n",
pbm->name,
i,
((lineval & PSYCHO_STCLINE_LINDX) >> 21UL),
((lineval & PSYCHO_STCLINE_SPTR) >> 15UL),
((lineval & PSYCHO_STCLINE_LADDR) >> 8UL),
((lineval & PSYCHO_STCLINE_EPTR) >> 2UL),
((lineval & PSYCHO_STCLINE_VALID) ? 1 : 0),
((lineval & PSYCHO_STCLINE_FOFN) ? 1 : 0));
}
}
spin_unlock(&stc_buf_lock);
}
#define PSYCHO_IOMMU_TAG 0xa580UL
#define PSYCHO_IOMMU_DATA 0xa600UL
static void psycho_record_iommu_tags_and_data(struct pci_pbm_info *pbm,
u64 *tag, u64 *data)
{
int i;
for (i = 0; i < 16; i++) {
unsigned long base = pbm->controller_regs;
unsigned long off = i * 8UL;
tag[i] = upa_readq(base + PSYCHO_IOMMU_TAG+off);
data[i] = upa_readq(base + PSYCHO_IOMMU_DATA+off);
/* Now clear out the entry. */
upa_writeq(0, base + PSYCHO_IOMMU_TAG + off);
upa_writeq(0, base + PSYCHO_IOMMU_DATA + off);
}
}
#define PSYCHO_IOMMU_TAG_ERRSTS (0x3UL << 23UL)
#define PSYCHO_IOMMU_TAG_ERR (0x1UL << 22UL)
#define PSYCHO_IOMMU_TAG_WRITE (0x1UL << 21UL)
#define PSYCHO_IOMMU_TAG_STREAM (0x1UL << 20UL)
#define PSYCHO_IOMMU_TAG_SIZE (0x1UL << 19UL)
#define PSYCHO_IOMMU_TAG_VPAGE 0x7ffffULL
#define PSYCHO_IOMMU_DATA_VALID (1UL << 30UL)
#define PSYCHO_IOMMU_DATA_CACHE (1UL << 28UL)
#define PSYCHO_IOMMU_DATA_PPAGE 0xfffffffULL
static void psycho_dump_iommu_tags_and_data(struct pci_pbm_info *pbm,
u64 *tag, u64 *data)
{
int i;
for (i = 0; i < 16; i++) {
u64 tag_val, data_val;
const char *type_str;
tag_val = tag[i];
if (!(tag_val & PSYCHO_IOMMU_TAG_ERR))
continue;
data_val = data[i];
switch((tag_val & PSYCHO_IOMMU_TAG_ERRSTS) >> 23UL) {
case 0:
type_str = "Protection Error";
break;
case 1:
type_str = "Invalid Error";
break;
case 2:
type_str = "TimeOut Error";
break;
case 3:
default:
type_str = "ECC Error";
break;
}
printk(KERN_ERR "%s: IOMMU TAG(%d)[error(%s) wr(%d) "
"str(%d) sz(%dK) vpg(%08llx)]\n",
pbm->name, i, type_str,
((tag_val & PSYCHO_IOMMU_TAG_WRITE) ? 1 : 0),
((tag_val & PSYCHO_IOMMU_TAG_STREAM) ? 1 : 0),
((tag_val & PSYCHO_IOMMU_TAG_SIZE) ? 64 : 8),
(tag_val & PSYCHO_IOMMU_TAG_VPAGE) << IOMMU_PAGE_SHIFT);
printk(KERN_ERR "%s: IOMMU DATA(%d)[valid(%d) cache(%d) "
"ppg(%016llx)]\n",
pbm->name, i,
((data_val & PSYCHO_IOMMU_DATA_VALID) ? 1 : 0),
((data_val & PSYCHO_IOMMU_DATA_CACHE) ? 1 : 0),
(data_val & PSYCHO_IOMMU_DATA_PPAGE) << IOMMU_PAGE_SHIFT);
}
}
#define PSYCHO_IOMMU_CTRL_XLTESTAT 0x0000000006000000UL
#define PSYCHO_IOMMU_CTRL_XLTEERR 0x0000000001000000UL
void psycho_check_iommu_error(struct pci_pbm_info *pbm,
unsigned long afsr,
unsigned long afar,
enum psycho_error_type type)
{
u64 control, iommu_tag[16], iommu_data[16];
struct iommu *iommu = pbm->iommu;
unsigned long flags;
spin_lock_irqsave(&iommu->lock, flags);
control = upa_readq(iommu->iommu_control);
if (control & PSYCHO_IOMMU_CTRL_XLTEERR) {
const char *type_str;
control &= ~PSYCHO_IOMMU_CTRL_XLTEERR;
upa_writeq(control, iommu->iommu_control);
switch ((control & PSYCHO_IOMMU_CTRL_XLTESTAT) >> 25UL) {
case 0:
type_str = "Protection Error";
break;
case 1:
type_str = "Invalid Error";
break;
case 2:
type_str = "TimeOut Error";
break;
case 3:
default:
type_str = "ECC Error";
break;
}
printk(KERN_ERR "%s: IOMMU Error, type[%s]\n",
pbm->name, type_str);
/* It is very possible for another DVMA to occur while
* we do this probe, and corrupt the system further.
* But we are so screwed at this point that we are
* likely to crash hard anyways, so get as much
* diagnostic information to the console as we can.
*/
psycho_record_iommu_tags_and_data(pbm, iommu_tag, iommu_data);
psycho_dump_iommu_tags_and_data(pbm, iommu_tag, iommu_data);
}
psycho_check_stc_error(pbm);
spin_unlock_irqrestore(&iommu->lock, flags);
}
#define PSYCHO_PCICTRL_SBH_ERR 0x0000000800000000UL
#define PSYCHO_PCICTRL_SERR 0x0000000400000000UL
static irqreturn_t psycho_pcierr_intr_other(struct pci_pbm_info *pbm)
{
irqreturn_t ret = IRQ_NONE;
u64 csr, csr_error_bits;
u16 stat, *addr;
csr = upa_readq(pbm->pci_csr);
csr_error_bits = csr & (PSYCHO_PCICTRL_SBH_ERR | PSYCHO_PCICTRL_SERR);
if (csr_error_bits) {
/* Clear the errors. */
upa_writeq(csr, pbm->pci_csr);
/* Log 'em. */
if (csr_error_bits & PSYCHO_PCICTRL_SBH_ERR)
printk(KERN_ERR "%s: PCI streaming byte hole "
"error asserted.\n", pbm->name);
if (csr_error_bits & PSYCHO_PCICTRL_SERR)
printk(KERN_ERR "%s: PCI SERR signal asserted.\n",
pbm->name);
ret = IRQ_HANDLED;
}
addr = psycho_pci_config_mkaddr(pbm, pbm->pci_first_busno,
0, PCI_STATUS);
pci_config_read16(addr, &stat);
if (stat & (PCI_STATUS_PARITY |
PCI_STATUS_SIG_TARGET_ABORT |
PCI_STATUS_REC_TARGET_ABORT |
PCI_STATUS_REC_MASTER_ABORT |
PCI_STATUS_SIG_SYSTEM_ERROR)) {
printk(KERN_ERR "%s: PCI bus error, PCI_STATUS[%04x]\n",
pbm->name, stat);
pci_config_write16(addr, 0xffff);
ret = IRQ_HANDLED;
}
return ret;
}
#define PSYCHO_PCIAFSR_PMA 0x8000000000000000ULL
#define PSYCHO_PCIAFSR_PTA 0x4000000000000000ULL
#define PSYCHO_PCIAFSR_PRTRY 0x2000000000000000ULL
#define PSYCHO_PCIAFSR_PPERR 0x1000000000000000ULL
#define PSYCHO_PCIAFSR_SMA 0x0800000000000000ULL
#define PSYCHO_PCIAFSR_STA 0x0400000000000000ULL
#define PSYCHO_PCIAFSR_SRTRY 0x0200000000000000ULL
#define PSYCHO_PCIAFSR_SPERR 0x0100000000000000ULL
#define PSYCHO_PCIAFSR_RESV1 0x00ff000000000000ULL
#define PSYCHO_PCIAFSR_BMSK 0x0000ffff00000000ULL
#define PSYCHO_PCIAFSR_BLK 0x0000000080000000ULL
#define PSYCHO_PCIAFSR_RESV2 0x0000000040000000ULL
#define PSYCHO_PCIAFSR_MID 0x000000003e000000ULL
#define PSYCHO_PCIAFSR_RESV3 0x0000000001ffffffULL
irqreturn_t psycho_pcierr_intr(int irq, void *dev_id)
{
struct pci_pbm_info *pbm = dev_id;
u64 afsr, afar, error_bits;
int reported;
afsr = upa_readq(pbm->pci_afsr);
afar = upa_readq(pbm->pci_afar);
error_bits = afsr &
(PSYCHO_PCIAFSR_PMA | PSYCHO_PCIAFSR_PTA |
PSYCHO_PCIAFSR_PRTRY | PSYCHO_PCIAFSR_PPERR |
PSYCHO_PCIAFSR_SMA | PSYCHO_PCIAFSR_STA |
PSYCHO_PCIAFSR_SRTRY | PSYCHO_PCIAFSR_SPERR);
if (!error_bits)
return psycho_pcierr_intr_other(pbm);
upa_writeq(error_bits, pbm->pci_afsr);
printk(KERN_ERR "%s: PCI Error, primary error type[%s]\n",
pbm->name,
(((error_bits & PSYCHO_PCIAFSR_PMA) ?
"Master Abort" :
((error_bits & PSYCHO_PCIAFSR_PTA) ?
"Target Abort" :
((error_bits & PSYCHO_PCIAFSR_PRTRY) ?
"Excessive Retries" :
((error_bits & PSYCHO_PCIAFSR_PPERR) ?
"Parity Error" : "???"))))));
printk(KERN_ERR "%s: bytemask[%04llx] UPA_MID[%02llx] was_block(%d)\n",
pbm->name,
(afsr & PSYCHO_PCIAFSR_BMSK) >> 32UL,
(afsr & PSYCHO_PCIAFSR_MID) >> 25UL,
(afsr & PSYCHO_PCIAFSR_BLK) ? 1 : 0);
printk(KERN_ERR "%s: PCI AFAR [%016llx]\n", pbm->name, afar);
printk(KERN_ERR "%s: PCI Secondary errors [", pbm->name);
reported = 0;
if (afsr & PSYCHO_PCIAFSR_SMA) {
reported++;
printk("(Master Abort)");
}
if (afsr & PSYCHO_PCIAFSR_STA) {
reported++;
printk("(Target Abort)");
}
if (afsr & PSYCHO_PCIAFSR_SRTRY) {
reported++;
printk("(Excessive Retries)");
}
if (afsr & PSYCHO_PCIAFSR_SPERR) {
reported++;
printk("(Parity Error)");
}
if (!reported)
printk("(none)");
printk("]\n");
if (error_bits & (PSYCHO_PCIAFSR_PTA | PSYCHO_PCIAFSR_STA)) {
psycho_check_iommu_error(pbm, afsr, afar, PCI_ERR);
pci_scan_for_target_abort(pbm, pbm->pci_bus);
}
if (error_bits & (PSYCHO_PCIAFSR_PMA | PSYCHO_PCIAFSR_SMA))
pci_scan_for_master_abort(pbm, pbm->pci_bus);
if (error_bits & (PSYCHO_PCIAFSR_PPERR | PSYCHO_PCIAFSR_SPERR))
pci_scan_for_parity_error(pbm, pbm->pci_bus);
return IRQ_HANDLED;
}
static void psycho_iommu_flush(struct pci_pbm_info *pbm)
{
int i;
for (i = 0; i < 16; i++) {
unsigned long off = i * 8;
upa_writeq(0, pbm->controller_regs + PSYCHO_IOMMU_TAG + off);
upa_writeq(0, pbm->controller_regs + PSYCHO_IOMMU_DATA + off);
}
}
#define PSYCHO_IOMMU_CONTROL 0x0200UL
#define PSYCHO_IOMMU_CTRL_TSBSZ 0x0000000000070000UL
#define PSYCHO_IOMMU_TSBSZ_1K 0x0000000000000000UL
#define PSYCHO_IOMMU_TSBSZ_2K 0x0000000000010000UL
#define PSYCHO_IOMMU_TSBSZ_4K 0x0000000000020000UL
#define PSYCHO_IOMMU_TSBSZ_8K 0x0000000000030000UL
#define PSYCHO_IOMMU_TSBSZ_16K 0x0000000000040000UL
#define PSYCHO_IOMMU_TSBSZ_32K 0x0000000000050000UL
#define PSYCHO_IOMMU_TSBSZ_64K 0x0000000000060000UL
#define PSYCHO_IOMMU_TSBSZ_128K 0x0000000000070000UL
#define PSYCHO_IOMMU_CTRL_TBWSZ 0x0000000000000004UL
#define PSYCHO_IOMMU_CTRL_DENAB 0x0000000000000002UL
#define PSYCHO_IOMMU_CTRL_ENAB 0x0000000000000001UL
#define PSYCHO_IOMMU_FLUSH 0x0210UL
#define PSYCHO_IOMMU_TSBBASE 0x0208UL
int psycho_iommu_init(struct pci_pbm_info *pbm, int tsbsize,
u32 dvma_offset, u32 dma_mask,
unsigned long write_complete_offset)
{
struct iommu *iommu = pbm->iommu;
u64 control;
int err;
iommu->iommu_control = pbm->controller_regs + PSYCHO_IOMMU_CONTROL;
iommu->iommu_tsbbase = pbm->controller_regs + PSYCHO_IOMMU_TSBBASE;
iommu->iommu_flush = pbm->controller_regs + PSYCHO_IOMMU_FLUSH;
iommu->iommu_tags = pbm->controller_regs + PSYCHO_IOMMU_TAG;
iommu->write_complete_reg = (pbm->controller_regs +
write_complete_offset);
iommu->iommu_ctxflush = 0;
control = upa_readq(iommu->iommu_control);
control |= PSYCHO_IOMMU_CTRL_DENAB;
upa_writeq(control, iommu->iommu_control);
psycho_iommu_flush(pbm);
/* Leave diag mode enabled for full-flushing done in pci_iommu.c */
err = iommu_table_init(iommu, tsbsize * 1024 * 8,
dvma_offset, dma_mask, pbm->numa_node);
if (err)
return err;
upa_writeq(__pa(iommu->page_table), iommu->iommu_tsbbase);
control = upa_readq(iommu->iommu_control);
control &= ~(PSYCHO_IOMMU_CTRL_TSBSZ | PSYCHO_IOMMU_CTRL_TBWSZ);
control |= PSYCHO_IOMMU_CTRL_ENAB;
switch (tsbsize) {
case 64:
control |= PSYCHO_IOMMU_TSBSZ_64K;
break;
case 128:
control |= PSYCHO_IOMMU_TSBSZ_128K;
break;
default:
return -EINVAL;
}
upa_writeq(control, iommu->iommu_control);
return 0;
}
void psycho_pbm_init_common(struct pci_pbm_info *pbm, struct platform_device *op,
const char *chip_name, int chip_type)
{
struct device_node *dp = op->dev.of_node;
pbm->name = dp->full_name;
pbm->numa_node = NUMA_NO_NODE;
pbm->chip_type = chip_type;
pbm->chip_version = of_getintprop_default(dp, "version#", 0);
pbm->chip_revision = of_getintprop_default(dp, "module-revision#", 0);
pbm->op = op;
pbm->pci_ops = &sun4u_pci_ops;
pbm->config_space_reg_bits = 8;
pbm->index = pci_num_pbms++;
pci_get_pbm_props(pbm);
pci_determine_mem_io_space(pbm);
printk(KERN_INFO "%s: %s PCI Bus Module ver[%x:%x]\n",
pbm->name, chip_name,
pbm->chip_version, pbm->chip_revision);
}
| linux-master | arch/sparc/kernel/psycho_common.c |
// SPDX-License-Identifier: GPL-2.0
/* apc - Driver implementation for power management functions
* of Aurora Personality Chip (APC) on SPARCstation-4/5 and
* derivatives.
*
* Copyright (c) 2002 Eric Brower ([email protected])
*/
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/miscdevice.h>
#include <linux/pm.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/module.h>
#include <asm/io.h>
#include <asm/oplib.h>
#include <linux/uaccess.h>
#include <asm/auxio.h>
#include <asm/apc.h>
#include <asm/processor.h>
/* Debugging
*
* #define APC_DEBUG_LED
*/
#define APC_MINOR MISC_DYNAMIC_MINOR
#define APC_OBPNAME "power-management"
#define APC_DEVNAME "apc"
static u8 __iomem *regs;
static int apc_no_idle = 0;
#define apc_readb(offs) (sbus_readb(regs+offs))
#define apc_writeb(val, offs) (sbus_writeb(val, regs+offs))
/* Specify "apc=noidle" on the kernel command line to
* disable APC CPU standby support. Certain prototype
* systems (SPARCstation-Fox) do not play well with APC
* CPU idle, so disable this if your system has APC and
* crashes randomly.
*/
static int __init apc_setup(char *str)
{
if(!strncmp(str, "noidle", strlen("noidle"))) {
apc_no_idle = 1;
return 1;
}
return 0;
}
__setup("apc=", apc_setup);
/*
* CPU idle callback function
* See .../arch/sparc/kernel/process.c
*/
static void apc_swift_idle(void)
{
#ifdef APC_DEBUG_LED
set_auxio(0x00, AUXIO_LED);
#endif
apc_writeb(apc_readb(APC_IDLE_REG) | APC_IDLE_ON, APC_IDLE_REG);
#ifdef APC_DEBUG_LED
set_auxio(AUXIO_LED, 0x00);
#endif
}
static inline void apc_free(struct platform_device *op)
{
of_iounmap(&op->resource[0], regs, resource_size(&op->resource[0]));
}
static int apc_open(struct inode *inode, struct file *f)
{
return 0;
}
static int apc_release(struct inode *inode, struct file *f)
{
return 0;
}
static long apc_ioctl(struct file *f, unsigned int cmd, unsigned long __arg)
{
__u8 inarg, __user *arg = (__u8 __user *) __arg;
switch (cmd) {
case APCIOCGFANCTL:
if (put_user(apc_readb(APC_FANCTL_REG) & APC_REGMASK, arg))
return -EFAULT;
break;
case APCIOCGCPWR:
if (put_user(apc_readb(APC_CPOWER_REG) & APC_REGMASK, arg))
return -EFAULT;
break;
case APCIOCGBPORT:
if (put_user(apc_readb(APC_BPORT_REG) & APC_BPMASK, arg))
return -EFAULT;
break;
case APCIOCSFANCTL:
if (get_user(inarg, arg))
return -EFAULT;
apc_writeb(inarg & APC_REGMASK, APC_FANCTL_REG);
break;
case APCIOCSCPWR:
if (get_user(inarg, arg))
return -EFAULT;
apc_writeb(inarg & APC_REGMASK, APC_CPOWER_REG);
break;
case APCIOCSBPORT:
if (get_user(inarg, arg))
return -EFAULT;
apc_writeb(inarg & APC_BPMASK, APC_BPORT_REG);
break;
default:
return -EINVAL;
}
return 0;
}
static const struct file_operations apc_fops = {
.unlocked_ioctl = apc_ioctl,
.open = apc_open,
.release = apc_release,
.llseek = noop_llseek,
};
static struct miscdevice apc_miscdev = { APC_MINOR, APC_DEVNAME, &apc_fops };
static int apc_probe(struct platform_device *op)
{
int err;
regs = of_ioremap(&op->resource[0], 0,
resource_size(&op->resource[0]), APC_OBPNAME);
if (!regs) {
printk(KERN_ERR "%s: unable to map registers\n", APC_DEVNAME);
return -ENODEV;
}
err = misc_register(&apc_miscdev);
if (err) {
printk(KERN_ERR "%s: unable to register device\n", APC_DEVNAME);
apc_free(op);
return -ENODEV;
}
/* Assign power management IDLE handler */
if (!apc_no_idle)
sparc_idle = apc_swift_idle;
printk(KERN_INFO "%s: power management initialized%s\n",
APC_DEVNAME, apc_no_idle ? " (CPU idle disabled)" : "");
return 0;
}
static const struct of_device_id apc_match[] = {
{
.name = APC_OBPNAME,
},
{},
};
MODULE_DEVICE_TABLE(of, apc_match);
static struct platform_driver apc_driver = {
.driver = {
.name = "apc",
.of_match_table = apc_match,
},
.probe = apc_probe,
};
static int __init apc_init(void)
{
return platform_driver_register(&apc_driver);
}
/* This driver is not critical to the boot process
* and is easiest to ioremap when SBus is already
* initialized, so we install ourselves thusly:
*/
__initcall(apc_init);
| linux-master | arch/sparc/kernel/apc.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/mod_devicetable.h>
#include <linux/errno.h>
#include <linux/irq.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include "of_device_common.h"
unsigned int irq_of_parse_and_map(struct device_node *node, int index)
{
struct platform_device *op = of_find_device_by_node(node);
if (!op || index >= op->archdata.num_irqs)
return 0;
return op->archdata.irqs[index];
}
EXPORT_SYMBOL(irq_of_parse_and_map);
int of_address_to_resource(struct device_node *node, int index,
struct resource *r)
{
struct platform_device *op = of_find_device_by_node(node);
if (!op || index >= op->num_resources)
return -EINVAL;
memcpy(r, &op->archdata.resource[index], sizeof(*r));
return 0;
}
EXPORT_SYMBOL_GPL(of_address_to_resource);
void __iomem *of_iomap(struct device_node *node, int index)
{
struct platform_device *op = of_find_device_by_node(node);
struct resource *r;
if (!op || index >= op->num_resources)
return NULL;
r = &op->archdata.resource[index];
return of_ioremap(r, 0, resource_size(r), (char *) r->name);
}
EXPORT_SYMBOL(of_iomap);
/* Take the archdata values for IOMMU, STC, and HOSTDATA found in
* BUS and propagate to all child platform_device objects.
*/
void of_propagate_archdata(struct platform_device *bus)
{
struct dev_archdata *bus_sd = &bus->dev.archdata;
struct device_node *bus_dp = bus->dev.of_node;
struct device_node *dp;
for (dp = bus_dp->child; dp; dp = dp->sibling) {
struct platform_device *op = of_find_device_by_node(dp);
op->dev.archdata.iommu = bus_sd->iommu;
op->dev.archdata.stc = bus_sd->stc;
op->dev.archdata.host_controller = bus_sd->host_controller;
op->dev.archdata.numa_node = bus_sd->numa_node;
op->dev.dma_ops = bus->dev.dma_ops;
if (dp->child)
of_propagate_archdata(op);
}
}
static void get_cells(struct device_node *dp, int *addrc, int *sizec)
{
if (addrc)
*addrc = of_n_addr_cells(dp);
if (sizec)
*sizec = of_n_size_cells(dp);
}
/*
* Default translator (generic bus)
*/
void of_bus_default_count_cells(struct device_node *dev, int *addrc, int *sizec)
{
get_cells(dev, addrc, sizec);
}
/* Make sure the least significant 64-bits are in-range. Even
* for 3 or 4 cell values it is a good enough approximation.
*/
int of_out_of_range(const u32 *addr, const u32 *base,
const u32 *size, int na, int ns)
{
u64 a = of_read_addr(addr, na);
u64 b = of_read_addr(base, na);
if (a < b)
return 1;
b += of_read_addr(size, ns);
if (a >= b)
return 1;
return 0;
}
int of_bus_default_map(u32 *addr, const u32 *range, int na, int ns, int pna)
{
u32 result[OF_MAX_ADDR_CELLS];
int i;
if (ns > 2) {
printk("of_device: Cannot handle size cells (%d) > 2.", ns);
return -EINVAL;
}
if (of_out_of_range(addr, range, range + na + pna, na, ns))
return -EINVAL;
/* Start with the parent range base. */
memcpy(result, range + na, pna * 4);
/* Add in the child address offset. */
for (i = 0; i < na; i++)
result[pna - 1 - i] +=
(addr[na - 1 - i] -
range[na - 1 - i]);
memcpy(addr, result, pna * 4);
return 0;
}
unsigned long of_bus_default_get_flags(const u32 *addr, unsigned long flags)
{
if (flags)
return flags;
return IORESOURCE_MEM;
}
/*
* SBUS bus specific translator
*/
int of_bus_sbus_match(struct device_node *np)
{
struct device_node *dp = np;
while (dp) {
if (of_node_name_eq(dp, "sbus") ||
of_node_name_eq(dp, "sbi"))
return 1;
/* Have a look at use_1to1_mapping(). We're trying
* to match SBUS if that's the top-level bus and we
* don't have some intervening real bus that provides
* ranges based translations.
*/
if (of_property_present(dp, "ranges"))
break;
dp = dp->parent;
}
return 0;
}
void of_bus_sbus_count_cells(struct device_node *child, int *addrc, int *sizec)
{
if (addrc)
*addrc = 2;
if (sizec)
*sizec = 1;
}
| linux-master | arch/sparc/kernel/of_device_common.c |
// SPDX-License-Identifier: GPL-2.0
/* sys_sparc32.c: Conversion between 32bit and 64bit native syscalls.
*
* Copyright (C) 1997,1998 Jakub Jelinek ([email protected])
* Copyright (C) 1997, 2007 David S. Miller ([email protected])
*
* These routines maintain argument size conversion between 32bit and 64bit
* environment.
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/capability.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/file.h>
#include <linux/signal.h>
#include <linux/resource.h>
#include <linux/times.h>
#include <linux/smp.h>
#include <linux/sem.h>
#include <linux/msg.h>
#include <linux/shm.h>
#include <linux/uio.h>
#include <linux/quota.h>
#include <linux/poll.h>
#include <linux/personality.h>
#include <linux/stat.h>
#include <linux/filter.h>
#include <linux/highmem.h>
#include <linux/highuid.h>
#include <linux/mman.h>
#include <linux/ipv6.h>
#include <linux/in.h>
#include <linux/icmpv6.h>
#include <linux/syscalls.h>
#include <linux/sysctl.h>
#include <linux/binfmts.h>
#include <linux/dnotify.h>
#include <linux/security.h>
#include <linux/compat.h>
#include <linux/vfs.h>
#include <linux/ptrace.h>
#include <linux/slab.h>
#include <asm/types.h>
#include <linux/uaccess.h>
#include <asm/fpumacro.h>
#include <asm/mmu_context.h>
#include <asm/compat_signal.h>
#include "systbls.h"
COMPAT_SYSCALL_DEFINE3(truncate64, const char __user *, path, u32, high, u32, low)
{
return ksys_truncate(path, ((u64)high << 32) | low);
}
COMPAT_SYSCALL_DEFINE3(ftruncate64, unsigned int, fd, u32, high, u32, low)
{
return ksys_ftruncate(fd, ((u64)high << 32) | low);
}
static int cp_compat_stat64(struct kstat *stat,
struct compat_stat64 __user *statbuf)
{
int err;
err = put_user(huge_encode_dev(stat->dev), &statbuf->st_dev);
err |= put_user(stat->ino, &statbuf->st_ino);
err |= put_user(stat->mode, &statbuf->st_mode);
err |= put_user(stat->nlink, &statbuf->st_nlink);
err |= put_user(from_kuid_munged(current_user_ns(), stat->uid), &statbuf->st_uid);
err |= put_user(from_kgid_munged(current_user_ns(), stat->gid), &statbuf->st_gid);
err |= put_user(huge_encode_dev(stat->rdev), &statbuf->st_rdev);
err |= put_user(0, (unsigned long __user *) &statbuf->__pad3[0]);
err |= put_user(stat->size, &statbuf->st_size);
err |= put_user(stat->blksize, &statbuf->st_blksize);
err |= put_user(0, (unsigned int __user *) &statbuf->__pad4[0]);
err |= put_user(0, (unsigned int __user *) &statbuf->__pad4[4]);
err |= put_user(stat->blocks, &statbuf->st_blocks);
err |= put_user(stat->atime.tv_sec, &statbuf->st_atime);
err |= put_user(stat->atime.tv_nsec, &statbuf->st_atime_nsec);
err |= put_user(stat->mtime.tv_sec, &statbuf->st_mtime);
err |= put_user(stat->mtime.tv_nsec, &statbuf->st_mtime_nsec);
err |= put_user(stat->ctime.tv_sec, &statbuf->st_ctime);
err |= put_user(stat->ctime.tv_nsec, &statbuf->st_ctime_nsec);
err |= put_user(0, &statbuf->__unused4);
err |= put_user(0, &statbuf->__unused5);
return err;
}
COMPAT_SYSCALL_DEFINE2(stat64, const char __user *, filename,
struct compat_stat64 __user *, statbuf)
{
struct kstat stat;
int error = vfs_stat(filename, &stat);
if (!error)
error = cp_compat_stat64(&stat, statbuf);
return error;
}
COMPAT_SYSCALL_DEFINE2(lstat64, const char __user *, filename,
struct compat_stat64 __user *, statbuf)
{
struct kstat stat;
int error = vfs_lstat(filename, &stat);
if (!error)
error = cp_compat_stat64(&stat, statbuf);
return error;
}
COMPAT_SYSCALL_DEFINE2(fstat64, unsigned int, fd,
struct compat_stat64 __user *, statbuf)
{
struct kstat stat;
int error = vfs_fstat(fd, &stat);
if (!error)
error = cp_compat_stat64(&stat, statbuf);
return error;
}
COMPAT_SYSCALL_DEFINE4(fstatat64, unsigned int, dfd,
const char __user *, filename,
struct compat_stat64 __user *, statbuf, int, flag)
{
struct kstat stat;
int error;
error = vfs_fstatat(dfd, filename, &stat, flag);
if (error)
return error;
return cp_compat_stat64(&stat, statbuf);
}
COMPAT_SYSCALL_DEFINE3(sparc_sigaction, int, sig,
struct compat_old_sigaction __user *,act,
struct compat_old_sigaction __user *,oact)
{
WARN_ON_ONCE(sig >= 0);
return compat_sys_sigaction(-sig, act, oact);
}
COMPAT_SYSCALL_DEFINE5(rt_sigaction, int, sig,
struct compat_sigaction __user *,act,
struct compat_sigaction __user *,oact,
void __user *,restorer,
compat_size_t,sigsetsize)
{
struct k_sigaction new_ka, old_ka;
int ret;
/* XXX: Don't preclude handling different sized sigset_t's. */
if (sigsetsize != sizeof(compat_sigset_t))
return -EINVAL;
if (act) {
u32 u_handler, u_restorer;
new_ka.ka_restorer = restorer;
ret = get_user(u_handler, &act->sa_handler);
new_ka.sa.sa_handler = compat_ptr(u_handler);
ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
ret |= get_user(u_restorer, &act->sa_restorer);
new_ka.sa.sa_restorer = compat_ptr(u_restorer);
if (ret)
return -EFAULT;
}
ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
if (!ret && oact) {
ret = put_user(ptr_to_compat(old_ka.sa.sa_handler), &oact->sa_handler);
ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
sizeof(oact->sa_mask));
ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer), &oact->sa_restorer);
if (ret)
ret = -EFAULT;
}
return ret;
}
COMPAT_SYSCALL_DEFINE5(pread64, unsigned int, fd, char __user *, ubuf,
compat_size_t, count, u32, poshi, u32, poslo)
{
return ksys_pread64(fd, ubuf, count, ((u64)poshi << 32) | poslo);
}
COMPAT_SYSCALL_DEFINE5(pwrite64, unsigned int, fd, char __user *, ubuf,
compat_size_t, count, u32, poshi, u32, poslo)
{
return ksys_pwrite64(fd, ubuf, count, ((u64)poshi << 32) | poslo);
}
COMPAT_SYSCALL_DEFINE4(readahead, int, fd, u32, offhi, u32, offlo,
compat_size_t, count)
{
return ksys_readahead(fd, ((u64)offhi << 32) | offlo, count);
}
COMPAT_SYSCALL_DEFINE5(fadvise64, int, fd, u32, offhi, u32, offlo,
compat_size_t, len, int, advice)
{
return ksys_fadvise64_64(fd, ((u64)offhi << 32) | offlo, len, advice);
}
COMPAT_SYSCALL_DEFINE6(fadvise64_64, int, fd, u32, offhi, u32, offlo,
u32, lenhi, u32, lenlo, int, advice)
{
return ksys_fadvise64_64(fd,
((u64)offhi << 32) | offlo,
((u64)lenhi << 32) | lenlo,
advice);
}
COMPAT_SYSCALL_DEFINE6(sync_file_range, unsigned int, fd, u32, off_high, u32, off_low,
u32, nb_high, u32, nb_low, unsigned int, flags)
{
return ksys_sync_file_range(fd,
((u64)off_high << 32) | off_low,
((u64)nb_high << 32) | nb_low,
flags);
}
COMPAT_SYSCALL_DEFINE6(fallocate, int, fd, int, mode, u32, offhi, u32, offlo,
u32, lenhi, u32, lenlo)
{
return ksys_fallocate(fd, mode, ((loff_t)offhi << 32) | offlo,
((loff_t)lenhi << 32) | lenlo);
}
| linux-master | arch/sparc/kernel/sys_sparc32.c |
// SPDX-License-Identifier: GPL-2.0
/*
* This file handles the architecture independent parts of process handling..
*/
#include <linux/compat.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/ptrace.h>
#include <linux/sched.h>
#include <linux/sched/task.h>
#include <linux/sched/task_stack.h>
#include <linux/signal.h>
#include "kernel.h"
asmlinkage long sparc_fork(struct pt_regs *regs)
{
unsigned long orig_i1 = regs->u_regs[UREG_I1];
long ret;
struct kernel_clone_args args = {
.exit_signal = SIGCHLD,
/* Reuse the parent's stack for the child. */
.stack = regs->u_regs[UREG_FP],
};
ret = kernel_clone(&args);
/* If we get an error and potentially restart the system
* call, we're screwed because copy_thread() clobbered
* the parent's %o1. So detect that case and restore it
* here.
*/
if ((unsigned long)ret >= -ERESTART_RESTARTBLOCK)
regs->u_regs[UREG_I1] = orig_i1;
return ret;
}
asmlinkage long sparc_vfork(struct pt_regs *regs)
{
unsigned long orig_i1 = regs->u_regs[UREG_I1];
long ret;
struct kernel_clone_args args = {
.flags = CLONE_VFORK | CLONE_VM,
.exit_signal = SIGCHLD,
/* Reuse the parent's stack for the child. */
.stack = regs->u_regs[UREG_FP],
};
ret = kernel_clone(&args);
/* If we get an error and potentially restart the system
* call, we're screwed because copy_thread() clobbered
* the parent's %o1. So detect that case and restore it
* here.
*/
if ((unsigned long)ret >= -ERESTART_RESTARTBLOCK)
regs->u_regs[UREG_I1] = orig_i1;
return ret;
}
asmlinkage long sparc_clone(struct pt_regs *regs)
{
unsigned long orig_i1 = regs->u_regs[UREG_I1];
unsigned int flags = lower_32_bits(regs->u_regs[UREG_I0]);
long ret;
struct kernel_clone_args args = {
.flags = (flags & ~CSIGNAL),
.exit_signal = (flags & CSIGNAL),
.tls = regs->u_regs[UREG_I3],
};
#ifdef CONFIG_COMPAT
if (test_thread_flag(TIF_32BIT)) {
args.pidfd = compat_ptr(regs->u_regs[UREG_I2]);
args.child_tid = compat_ptr(regs->u_regs[UREG_I4]);
args.parent_tid = compat_ptr(regs->u_regs[UREG_I2]);
} else
#endif
{
args.pidfd = (int __user *)regs->u_regs[UREG_I2];
args.child_tid = (int __user *)regs->u_regs[UREG_I4];
args.parent_tid = (int __user *)regs->u_regs[UREG_I2];
}
/* Did userspace give setup a separate stack for the child or are we
* reusing the parent's?
*/
if (regs->u_regs[UREG_I1])
args.stack = regs->u_regs[UREG_I1];
else
args.stack = regs->u_regs[UREG_FP];
ret = kernel_clone(&args);
/* If we get an error and potentially restart the system
* call, we're screwed because copy_thread() clobbered
* the parent's %o1. So detect that case and restore it
* here.
*/
if ((unsigned long)ret >= -ERESTART_RESTARTBLOCK)
regs->u_regs[UREG_I1] = orig_i1;
return ret;
}
| linux-master | arch/sparc/kernel/process.c |
// SPDX-License-Identifier: GPL-2.0-only
/* pcr.c: Generic sparc64 performance counter infrastructure.
*
* Copyright (C) 2009 David S. Miller ([email protected])
*/
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/irq_work.h>
#include <linux/ftrace.h>
#include <asm/pil.h>
#include <asm/pcr.h>
#include <asm/nmi.h>
#include <asm/asi.h>
#include <asm/spitfire.h>
/* This code is shared between various users of the performance
* counters. Users will be oprofile, pseudo-NMI watchdog, and the
* perf_event support layer.
*/
/* Performance counter interrupts run unmasked at PIL level 15.
* Therefore we can't do things like wakeups and other work
* that expects IRQ disabling to be adhered to in locking etc.
*
* Therefore in such situations we defer the work by signalling
* a lower level cpu IRQ.
*/
void __irq_entry deferred_pcr_work_irq(int irq, struct pt_regs *regs)
{
struct pt_regs *old_regs;
clear_softint(1 << PIL_DEFERRED_PCR_WORK);
old_regs = set_irq_regs(regs);
irq_enter();
#ifdef CONFIG_IRQ_WORK
irq_work_run();
#endif
irq_exit();
set_irq_regs(old_regs);
}
void arch_irq_work_raise(void)
{
set_softint(1 << PIL_DEFERRED_PCR_WORK);
}
const struct pcr_ops *pcr_ops;
EXPORT_SYMBOL_GPL(pcr_ops);
static u64 direct_pcr_read(unsigned long reg_num)
{
u64 val;
WARN_ON_ONCE(reg_num != 0);
__asm__ __volatile__("rd %%pcr, %0" : "=r" (val));
return val;
}
static void direct_pcr_write(unsigned long reg_num, u64 val)
{
WARN_ON_ONCE(reg_num != 0);
__asm__ __volatile__("wr %0, 0x0, %%pcr" : : "r" (val));
}
static u64 direct_pic_read(unsigned long reg_num)
{
u64 val;
WARN_ON_ONCE(reg_num != 0);
__asm__ __volatile__("rd %%pic, %0" : "=r" (val));
return val;
}
static void direct_pic_write(unsigned long reg_num, u64 val)
{
WARN_ON_ONCE(reg_num != 0);
/* Blackbird errata workaround. See commentary in
* arch/sparc64/kernel/smp.c:smp_percpu_timer_interrupt()
* for more information.
*/
__asm__ __volatile__("ba,pt %%xcc, 99f\n\t"
" nop\n\t"
".align 64\n"
"99:wr %0, 0x0, %%pic\n\t"
"rd %%pic, %%g0" : : "r" (val));
}
static u64 direct_picl_value(unsigned int nmi_hz)
{
u32 delta = local_cpu_data().clock_tick / nmi_hz;
return ((u64)((0 - delta) & 0xffffffff)) << 32;
}
static const struct pcr_ops direct_pcr_ops = {
.read_pcr = direct_pcr_read,
.write_pcr = direct_pcr_write,
.read_pic = direct_pic_read,
.write_pic = direct_pic_write,
.nmi_picl_value = direct_picl_value,
.pcr_nmi_enable = (PCR_PIC_PRIV | PCR_STRACE | PCR_UTRACE),
.pcr_nmi_disable = PCR_PIC_PRIV,
};
static void n2_pcr_write(unsigned long reg_num, u64 val)
{
unsigned long ret;
WARN_ON_ONCE(reg_num != 0);
if (val & PCR_N2_HTRACE) {
ret = sun4v_niagara2_setperf(HV_N2_PERF_SPARC_CTL, val);
if (ret != HV_EOK)
direct_pcr_write(reg_num, val);
} else
direct_pcr_write(reg_num, val);
}
static u64 n2_picl_value(unsigned int nmi_hz)
{
u32 delta = local_cpu_data().clock_tick / (nmi_hz << 2);
return ((u64)((0 - delta) & 0xffffffff)) << 32;
}
static const struct pcr_ops n2_pcr_ops = {
.read_pcr = direct_pcr_read,
.write_pcr = n2_pcr_write,
.read_pic = direct_pic_read,
.write_pic = direct_pic_write,
.nmi_picl_value = n2_picl_value,
.pcr_nmi_enable = (PCR_PIC_PRIV | PCR_STRACE | PCR_UTRACE |
PCR_N2_TOE_OV1 |
(2 << PCR_N2_SL1_SHIFT) |
(0xff << PCR_N2_MASK1_SHIFT)),
.pcr_nmi_disable = PCR_PIC_PRIV,
};
static u64 n4_pcr_read(unsigned long reg_num)
{
unsigned long val;
(void) sun4v_vt_get_perfreg(reg_num, &val);
return val;
}
static void n4_pcr_write(unsigned long reg_num, u64 val)
{
(void) sun4v_vt_set_perfreg(reg_num, val);
}
static u64 n4_pic_read(unsigned long reg_num)
{
unsigned long val;
__asm__ __volatile__("ldxa [%1] %2, %0"
: "=r" (val)
: "r" (reg_num * 0x8UL), "i" (ASI_PIC));
return val;
}
static void n4_pic_write(unsigned long reg_num, u64 val)
{
__asm__ __volatile__("stxa %0, [%1] %2"
: /* no outputs */
: "r" (val), "r" (reg_num * 0x8UL), "i" (ASI_PIC));
}
static u64 n4_picl_value(unsigned int nmi_hz)
{
u32 delta = local_cpu_data().clock_tick / (nmi_hz << 2);
return ((u64)((0 - delta) & 0xffffffff));
}
static const struct pcr_ops n4_pcr_ops = {
.read_pcr = n4_pcr_read,
.write_pcr = n4_pcr_write,
.read_pic = n4_pic_read,
.write_pic = n4_pic_write,
.nmi_picl_value = n4_picl_value,
.pcr_nmi_enable = (PCR_N4_PICNPT | PCR_N4_STRACE |
PCR_N4_UTRACE | PCR_N4_TOE |
(26 << PCR_N4_SL_SHIFT)),
.pcr_nmi_disable = PCR_N4_PICNPT,
};
static u64 n5_pcr_read(unsigned long reg_num)
{
unsigned long val;
(void) sun4v_t5_get_perfreg(reg_num, &val);
return val;
}
static void n5_pcr_write(unsigned long reg_num, u64 val)
{
(void) sun4v_t5_set_perfreg(reg_num, val);
}
static const struct pcr_ops n5_pcr_ops = {
.read_pcr = n5_pcr_read,
.write_pcr = n5_pcr_write,
.read_pic = n4_pic_read,
.write_pic = n4_pic_write,
.nmi_picl_value = n4_picl_value,
.pcr_nmi_enable = (PCR_N4_PICNPT | PCR_N4_STRACE |
PCR_N4_UTRACE | PCR_N4_TOE |
(26 << PCR_N4_SL_SHIFT)),
.pcr_nmi_disable = PCR_N4_PICNPT,
};
static u64 m7_pcr_read(unsigned long reg_num)
{
unsigned long val;
(void) sun4v_m7_get_perfreg(reg_num, &val);
return val;
}
static void m7_pcr_write(unsigned long reg_num, u64 val)
{
(void) sun4v_m7_set_perfreg(reg_num, val);
}
static const struct pcr_ops m7_pcr_ops = {
.read_pcr = m7_pcr_read,
.write_pcr = m7_pcr_write,
.read_pic = n4_pic_read,
.write_pic = n4_pic_write,
.nmi_picl_value = n4_picl_value,
.pcr_nmi_enable = (PCR_N4_PICNPT | PCR_N4_STRACE |
PCR_N4_UTRACE | PCR_N4_TOE |
(26 << PCR_N4_SL_SHIFT)),
.pcr_nmi_disable = PCR_N4_PICNPT,
};
static unsigned long perf_hsvc_group;
static unsigned long perf_hsvc_major;
static unsigned long perf_hsvc_minor;
static int __init register_perf_hsvc(void)
{
unsigned long hverror;
if (tlb_type == hypervisor) {
switch (sun4v_chip_type) {
case SUN4V_CHIP_NIAGARA1:
perf_hsvc_group = HV_GRP_NIAG_PERF;
break;
case SUN4V_CHIP_NIAGARA2:
perf_hsvc_group = HV_GRP_N2_CPU;
break;
case SUN4V_CHIP_NIAGARA3:
perf_hsvc_group = HV_GRP_KT_CPU;
break;
case SUN4V_CHIP_NIAGARA4:
perf_hsvc_group = HV_GRP_VT_CPU;
break;
case SUN4V_CHIP_NIAGARA5:
perf_hsvc_group = HV_GRP_T5_CPU;
break;
case SUN4V_CHIP_SPARC_M7:
perf_hsvc_group = HV_GRP_M7_PERF;
break;
default:
return -ENODEV;
}
perf_hsvc_major = 1;
perf_hsvc_minor = 0;
hverror = sun4v_hvapi_register(perf_hsvc_group,
perf_hsvc_major,
&perf_hsvc_minor);
if (hverror) {
pr_err("perfmon: Could not register hvapi(0x%lx).\n",
hverror);
return -ENODEV;
}
}
return 0;
}
static void __init unregister_perf_hsvc(void)
{
if (tlb_type != hypervisor)
return;
sun4v_hvapi_unregister(perf_hsvc_group);
}
static int __init setup_sun4v_pcr_ops(void)
{
int ret = 0;
switch (sun4v_chip_type) {
case SUN4V_CHIP_NIAGARA1:
case SUN4V_CHIP_NIAGARA2:
case SUN4V_CHIP_NIAGARA3:
pcr_ops = &n2_pcr_ops;
break;
case SUN4V_CHIP_NIAGARA4:
pcr_ops = &n4_pcr_ops;
break;
case SUN4V_CHIP_NIAGARA5:
pcr_ops = &n5_pcr_ops;
break;
case SUN4V_CHIP_SPARC_M7:
pcr_ops = &m7_pcr_ops;
break;
default:
ret = -ENODEV;
break;
}
return ret;
}
int __init pcr_arch_init(void)
{
int err = register_perf_hsvc();
if (err)
return err;
switch (tlb_type) {
case hypervisor:
err = setup_sun4v_pcr_ops();
if (err)
goto out_unregister;
break;
case cheetah:
case cheetah_plus:
pcr_ops = &direct_pcr_ops;
break;
case spitfire:
/* UltraSPARC-I/II and derivatives lack a profile
* counter overflow interrupt so we can't make use of
* their hardware currently.
*/
fallthrough;
default:
err = -ENODEV;
goto out_unregister;
}
return nmi_init();
out_unregister:
unregister_perf_hsvc();
return err;
}
| linux-master | arch/sparc/kernel/pcr.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sparc/kernel/traps.c
*
* Copyright 1995, 2008 David S. Miller ([email protected])
* Copyright 2000 Jakub Jelinek ([email protected])
*/
/*
* I hate traps on the sparc, grrr...
*/
#include <linux/sched/mm.h>
#include <linux/sched/debug.h>
#include <linux/mm_types.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/smp.h>
#include <linux/kdebug.h>
#include <linux/export.h>
#include <linux/pgtable.h>
#include <asm/delay.h>
#include <asm/ptrace.h>
#include <asm/oplib.h>
#include <asm/page.h>
#include <asm/unistd.h>
#include <asm/traps.h>
#include "entry.h"
#include "kernel.h"
/* #define TRAP_DEBUG */
static void instruction_dump(unsigned long *pc)
{
int i;
if((((unsigned long) pc) & 3))
return;
for(i = -3; i < 6; i++)
printk("%c%08lx%c",i?' ':'<',pc[i],i?' ':'>');
printk("\n");
}
#define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
#define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
{
static int die_counter;
int count = 0;
/* Amuse the user. */
printk(
" \\|/ ____ \\|/\n"
" \"@'/ ,. \\`@\"\n"
" /_| \\__/ |_\\\n"
" \\__U_/\n");
printk("%s(%d): %s [#%d]\n", current->comm, task_pid_nr(current), str, ++die_counter);
show_regs(regs);
add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
__SAVE; __SAVE; __SAVE; __SAVE;
__SAVE; __SAVE; __SAVE; __SAVE;
__RESTORE; __RESTORE; __RESTORE; __RESTORE;
__RESTORE; __RESTORE; __RESTORE; __RESTORE;
{
struct reg_window32 *rw = (struct reg_window32 *)regs->u_regs[UREG_FP];
/* Stop the back trace when we hit userland or we
* find some badly aligned kernel stack. Set an upper
* bound in case our stack is trashed and we loop.
*/
while(rw &&
count++ < 30 &&
(((unsigned long) rw) >= PAGE_OFFSET) &&
!(((unsigned long) rw) & 0x7)) {
printk("Caller[%08lx]: %pS\n", rw->ins[7],
(void *) rw->ins[7]);
rw = (struct reg_window32 *)rw->ins[6];
}
}
printk("Instruction DUMP:");
instruction_dump ((unsigned long *) regs->pc);
make_task_dead((regs->psr & PSR_PS) ? SIGKILL : SIGSEGV);
}
void do_hw_interrupt(struct pt_regs *regs, unsigned long type)
{
if(type < 0x80) {
/* Sun OS's puke from bad traps, Linux survives! */
printk("Unimplemented Sparc TRAP, type = %02lx\n", type);
die_if_kernel("Whee... Hello Mr. Penguin", regs);
}
if(regs->psr & PSR_PS)
die_if_kernel("Kernel bad trap", regs);
force_sig_fault_trapno(SIGILL, ILL_ILLTRP,
(void __user *)regs->pc, type - 0x80);
}
void do_illegal_instruction(struct pt_regs *regs, unsigned long pc, unsigned long npc,
unsigned long psr)
{
if(psr & PSR_PS)
die_if_kernel("Kernel illegal instruction", regs);
#ifdef TRAP_DEBUG
printk("Ill instr. at pc=%08lx instruction is %08lx\n",
regs->pc, *(unsigned long *)regs->pc);
#endif
send_sig_fault(SIGILL, ILL_ILLOPC, (void __user *)pc, current);
}
void do_priv_instruction(struct pt_regs *regs, unsigned long pc, unsigned long npc,
unsigned long psr)
{
if(psr & PSR_PS)
die_if_kernel("Penguin instruction from Penguin mode??!?!", regs);
send_sig_fault(SIGILL, ILL_PRVOPC, (void __user *)pc, current);
}
/* XXX User may want to be allowed to do this. XXX */
void do_memaccess_unaligned(struct pt_regs *regs, unsigned long pc, unsigned long npc,
unsigned long psr)
{
if(regs->psr & PSR_PS) {
printk("KERNEL MNA at pc %08lx npc %08lx called by %08lx\n", pc, npc,
regs->u_regs[UREG_RETPC]);
die_if_kernel("BOGUS", regs);
/* die_if_kernel("Kernel MNA access", regs); */
}
#if 0
show_regs (regs);
instruction_dump ((unsigned long *) regs->pc);
printk ("do_MNA!\n");
#endif
send_sig_fault(SIGBUS, BUS_ADRALN,
/* FIXME: Should dig out mna address */ (void *)0,
current);
}
static unsigned long init_fsr = 0x0UL;
static unsigned long init_fregs[32] __attribute__ ((aligned (8))) =
{ ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL,
~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL,
~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL,
~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL };
void do_fpd_trap(struct pt_regs *regs, unsigned long pc, unsigned long npc,
unsigned long psr)
{
/* Sanity check... */
if(psr & PSR_PS)
die_if_kernel("Kernel gets FloatingPenguinUnit disabled trap", regs);
put_psr(get_psr() | PSR_EF); /* Allow FPU ops. */
regs->psr |= PSR_EF;
#ifndef CONFIG_SMP
if(last_task_used_math == current)
return;
if(last_task_used_math) {
/* Other processes fpu state, save away */
struct task_struct *fptask = last_task_used_math;
fpsave(&fptask->thread.float_regs[0], &fptask->thread.fsr,
&fptask->thread.fpqueue[0], &fptask->thread.fpqdepth);
}
last_task_used_math = current;
if(used_math()) {
fpload(¤t->thread.float_regs[0], ¤t->thread.fsr);
} else {
/* Set initial sane state. */
fpload(&init_fregs[0], &init_fsr);
set_used_math();
}
#else
if(!used_math()) {
fpload(&init_fregs[0], &init_fsr);
set_used_math();
} else {
fpload(¤t->thread.float_regs[0], ¤t->thread.fsr);
}
set_thread_flag(TIF_USEDFPU);
#endif
}
static unsigned long fake_regs[32] __attribute__ ((aligned (8)));
static unsigned long fake_fsr;
static unsigned long fake_queue[32] __attribute__ ((aligned (8)));
static unsigned long fake_depth;
void do_fpe_trap(struct pt_regs *regs, unsigned long pc, unsigned long npc,
unsigned long psr)
{
static int calls;
unsigned long fsr;
int ret = 0;
int code;
#ifndef CONFIG_SMP
struct task_struct *fpt = last_task_used_math;
#else
struct task_struct *fpt = current;
#endif
put_psr(get_psr() | PSR_EF);
/* If nobody owns the fpu right now, just clear the
* error into our fake static buffer and hope it don't
* happen again. Thank you crashme...
*/
#ifndef CONFIG_SMP
if(!fpt) {
#else
if (!test_tsk_thread_flag(fpt, TIF_USEDFPU)) {
#endif
fpsave(&fake_regs[0], &fake_fsr, &fake_queue[0], &fake_depth);
regs->psr &= ~PSR_EF;
return;
}
fpsave(&fpt->thread.float_regs[0], &fpt->thread.fsr,
&fpt->thread.fpqueue[0], &fpt->thread.fpqdepth);
#ifdef DEBUG_FPU
printk("Hmm, FP exception, fsr was %016lx\n", fpt->thread.fsr);
#endif
switch ((fpt->thread.fsr & 0x1c000)) {
/* switch on the contents of the ftt [floating point trap type] field */
#ifdef DEBUG_FPU
case (1 << 14):
printk("IEEE_754_exception\n");
break;
#endif
case (2 << 14): /* unfinished_FPop (underflow & co) */
case (3 << 14): /* unimplemented_FPop (quad stuff, maybe sqrt) */
ret = do_mathemu(regs, fpt);
break;
#ifdef DEBUG_FPU
case (4 << 14):
printk("sequence_error (OS bug...)\n");
break;
case (5 << 14):
printk("hardware_error (uhoh!)\n");
break;
case (6 << 14):
printk("invalid_fp_register (user error)\n");
break;
#endif /* DEBUG_FPU */
}
/* If we successfully emulated the FPop, we pretend the trap never happened :-> */
if (ret) {
fpload(¤t->thread.float_regs[0], ¤t->thread.fsr);
return;
}
/* nope, better SIGFPE the offending process... */
#ifdef CONFIG_SMP
clear_tsk_thread_flag(fpt, TIF_USEDFPU);
#endif
if(psr & PSR_PS) {
/* The first fsr store/load we tried trapped,
* the second one will not (we hope).
*/
printk("WARNING: FPU exception from kernel mode. at pc=%08lx\n",
regs->pc);
regs->pc = regs->npc;
regs->npc += 4;
calls++;
if(calls > 2)
die_if_kernel("Too many Penguin-FPU traps from kernel mode",
regs);
return;
}
fsr = fpt->thread.fsr;
code = FPE_FLTUNK;
if ((fsr & 0x1c000) == (1 << 14)) {
if (fsr & 0x10)
code = FPE_FLTINV;
else if (fsr & 0x08)
code = FPE_FLTOVF;
else if (fsr & 0x04)
code = FPE_FLTUND;
else if (fsr & 0x02)
code = FPE_FLTDIV;
else if (fsr & 0x01)
code = FPE_FLTRES;
}
send_sig_fault(SIGFPE, code, (void __user *)pc, fpt);
#ifndef CONFIG_SMP
last_task_used_math = NULL;
#endif
regs->psr &= ~PSR_EF;
if(calls > 0)
calls=0;
}
void handle_tag_overflow(struct pt_regs *regs, unsigned long pc, unsigned long npc,
unsigned long psr)
{
if(psr & PSR_PS)
die_if_kernel("Penguin overflow trap from kernel mode", regs);
send_sig_fault(SIGEMT, EMT_TAGOVF, (void __user *)pc, current);
}
void handle_watchpoint(struct pt_regs *regs, unsigned long pc, unsigned long npc,
unsigned long psr)
{
#ifdef TRAP_DEBUG
printk("Watchpoint detected at PC %08lx NPC %08lx PSR %08lx\n",
pc, npc, psr);
#endif
if(psr & PSR_PS)
panic("Tell me what a watchpoint trap is, and I'll then deal "
"with such a beast...");
}
void handle_reg_access(struct pt_regs *regs, unsigned long pc, unsigned long npc,
unsigned long psr)
{
#ifdef TRAP_DEBUG
printk("Register Access Exception at PC %08lx NPC %08lx PSR %08lx\n",
pc, npc, psr);
#endif
force_sig_fault(SIGBUS, BUS_OBJERR, (void __user *)pc);
}
void handle_cp_disabled(struct pt_regs *regs, unsigned long pc, unsigned long npc,
unsigned long psr)
{
send_sig_fault(SIGILL, ILL_COPROC, (void __user *)pc, current);
}
void handle_cp_exception(struct pt_regs *regs, unsigned long pc, unsigned long npc,
unsigned long psr)
{
#ifdef TRAP_DEBUG
printk("Co-Processor Exception at PC %08lx NPC %08lx PSR %08lx\n",
pc, npc, psr);
#endif
send_sig_fault(SIGILL, ILL_COPROC, (void __user *)pc, current);
}
void handle_hw_divzero(struct pt_regs *regs, unsigned long pc, unsigned long npc,
unsigned long psr)
{
send_sig_fault(SIGFPE, FPE_INTDIV, (void __user *)pc, current);
}
#ifdef CONFIG_DEBUG_BUGVERBOSE
void do_BUG(const char *file, int line)
{
// bust_spinlocks(1); XXX Not in our original BUG()
printk("kernel BUG at %s:%d!\n", file, line);
}
EXPORT_SYMBOL(do_BUG);
#endif
/* Since we have our mappings set up, on multiprocessors we can spin them
* up here so that timer interrupts work during initialization.
*/
void trap_init(void)
{
extern void thread_info_offsets_are_bolixed_pete(void);
/* Force linker to barf if mismatched */
if (TI_UWINMASK != offsetof(struct thread_info, uwinmask) ||
TI_TASK != offsetof(struct thread_info, task) ||
TI_FLAGS != offsetof(struct thread_info, flags) ||
TI_CPU != offsetof(struct thread_info, cpu) ||
TI_PREEMPT != offsetof(struct thread_info, preempt_count) ||
TI_SOFTIRQ != offsetof(struct thread_info, softirq_count) ||
TI_HARDIRQ != offsetof(struct thread_info, hardirq_count) ||
TI_KSP != offsetof(struct thread_info, ksp) ||
TI_KPC != offsetof(struct thread_info, kpc) ||
TI_KPSR != offsetof(struct thread_info, kpsr) ||
TI_KWIM != offsetof(struct thread_info, kwim) ||
TI_REG_WINDOW != offsetof(struct thread_info, reg_window) ||
TI_RWIN_SPTRS != offsetof(struct thread_info, rwbuf_stkptrs) ||
TI_W_SAVED != offsetof(struct thread_info, w_saved))
thread_info_offsets_are_bolixed_pete();
/* Attach to the address space of init_task. */
mmgrab(&init_mm);
current->active_mm = &init_mm;
/* NOTE: Other cpus have this done as they are started
* up on SMP.
*/
}
| linux-master | arch/sparc/kernel/traps_32.c |
// SPDX-License-Identifier: GPL-2.0-only
/* adi_64.c: support for ADI (Application Data Integrity) feature on
* sparc m7 and newer processors. This feature is also known as
* SSM (Silicon Secured Memory).
*
* Copyright (C) 2016 Oracle and/or its affiliates. All rights reserved.
* Author: Khalid Aziz ([email protected])
*/
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/mm_types.h>
#include <asm/mdesc.h>
#include <asm/adi_64.h>
#include <asm/mmu_64.h>
#include <asm/pgtable_64.h>
/* Each page of storage for ADI tags can accommodate tags for 128
* pages. When ADI enabled pages are being swapped out, it would be
* prudent to allocate at least enough tag storage space to accommodate
* SWAPFILE_CLUSTER number of pages. Allocate enough tag storage to
* store tags for four SWAPFILE_CLUSTER pages to reduce need for
* further allocations for same vma.
*/
#define TAG_STORAGE_PAGES 8
struct adi_config adi_state;
EXPORT_SYMBOL(adi_state);
/* mdesc_adi_init() : Parse machine description provided by the
* hypervisor to detect ADI capabilities
*
* Hypervisor reports ADI capabilities of platform in "hwcap-list" property
* for "cpu" node. If the platform supports ADI, "hwcap-list" property
* contains the keyword "adp". If the platform supports ADI, "platform"
* node will contain "adp-blksz", "adp-nbits" and "ue-on-adp" properties
* to describe the ADI capabilities.
*/
void __init mdesc_adi_init(void)
{
struct mdesc_handle *hp = mdesc_grab();
const char *prop;
u64 pn, *val;
int len;
if (!hp)
goto adi_not_found;
pn = mdesc_node_by_name(hp, MDESC_NODE_NULL, "cpu");
if (pn == MDESC_NODE_NULL)
goto adi_not_found;
prop = mdesc_get_property(hp, pn, "hwcap-list", &len);
if (!prop)
goto adi_not_found;
/*
* Look for "adp" keyword in hwcap-list which would indicate
* ADI support
*/
adi_state.enabled = false;
while (len) {
int plen;
if (!strcmp(prop, "adp")) {
adi_state.enabled = true;
break;
}
plen = strlen(prop) + 1;
prop += plen;
len -= plen;
}
if (!adi_state.enabled)
goto adi_not_found;
/* Find the ADI properties in "platform" node. If all ADI
* properties are not found, ADI support is incomplete and
* do not enable ADI in the kernel.
*/
pn = mdesc_node_by_name(hp, MDESC_NODE_NULL, "platform");
if (pn == MDESC_NODE_NULL)
goto adi_not_found;
val = (u64 *) mdesc_get_property(hp, pn, "adp-blksz", &len);
if (!val)
goto adi_not_found;
adi_state.caps.blksz = *val;
val = (u64 *) mdesc_get_property(hp, pn, "adp-nbits", &len);
if (!val)
goto adi_not_found;
adi_state.caps.nbits = *val;
val = (u64 *) mdesc_get_property(hp, pn, "ue-on-adp", &len);
if (!val)
goto adi_not_found;
adi_state.caps.ue_on_adi = *val;
/* Some of the code to support swapping ADI tags is written
* assumption that two ADI tags can fit inside one byte. If
* this assumption is broken by a future architecture change,
* that code will have to be revisited. If that were to happen,
* disable ADI support so we do not get unpredictable results
* with programs trying to use ADI and their pages getting
* swapped out
*/
if (adi_state.caps.nbits > 4) {
pr_warn("WARNING: ADI tag size >4 on this platform. Disabling AADI support\n");
adi_state.enabled = false;
}
mdesc_release(hp);
return;
adi_not_found:
adi_state.enabled = false;
adi_state.caps.blksz = 0;
adi_state.caps.nbits = 0;
if (hp)
mdesc_release(hp);
}
tag_storage_desc_t *find_tag_store(struct mm_struct *mm,
struct vm_area_struct *vma,
unsigned long addr)
{
tag_storage_desc_t *tag_desc = NULL;
unsigned long i, max_desc, flags;
/* Check if this vma already has tag storage descriptor
* allocated for it.
*/
max_desc = PAGE_SIZE/sizeof(tag_storage_desc_t);
if (mm->context.tag_store) {
tag_desc = mm->context.tag_store;
spin_lock_irqsave(&mm->context.tag_lock, flags);
for (i = 0; i < max_desc; i++) {
if ((addr >= tag_desc->start) &&
((addr + PAGE_SIZE - 1) <= tag_desc->end))
break;
tag_desc++;
}
spin_unlock_irqrestore(&mm->context.tag_lock, flags);
/* If no matching entries were found, this must be a
* freshly allocated page
*/
if (i >= max_desc)
tag_desc = NULL;
}
return tag_desc;
}
tag_storage_desc_t *alloc_tag_store(struct mm_struct *mm,
struct vm_area_struct *vma,
unsigned long addr)
{
unsigned char *tags;
unsigned long i, size, max_desc, flags;
tag_storage_desc_t *tag_desc, *open_desc;
unsigned long end_addr, hole_start, hole_end;
max_desc = PAGE_SIZE/sizeof(tag_storage_desc_t);
open_desc = NULL;
hole_start = 0;
hole_end = ULONG_MAX;
end_addr = addr + PAGE_SIZE - 1;
/* Check if this vma already has tag storage descriptor
* allocated for it.
*/
spin_lock_irqsave(&mm->context.tag_lock, flags);
if (mm->context.tag_store) {
tag_desc = mm->context.tag_store;
/* Look for a matching entry for this address. While doing
* that, look for the first open slot as well and find
* the hole in already allocated range where this request
* will fit in.
*/
for (i = 0; i < max_desc; i++) {
if (tag_desc->tag_users == 0) {
if (open_desc == NULL)
open_desc = tag_desc;
} else {
if ((addr >= tag_desc->start) &&
(tag_desc->end >= (addr + PAGE_SIZE - 1))) {
tag_desc->tag_users++;
goto out;
}
}
if ((tag_desc->start > end_addr) &&
(tag_desc->start < hole_end))
hole_end = tag_desc->start;
if ((tag_desc->end < addr) &&
(tag_desc->end > hole_start))
hole_start = tag_desc->end;
tag_desc++;
}
} else {
size = sizeof(tag_storage_desc_t)*max_desc;
mm->context.tag_store = kzalloc(size, GFP_NOWAIT|__GFP_NOWARN);
if (mm->context.tag_store == NULL) {
tag_desc = NULL;
goto out;
}
tag_desc = mm->context.tag_store;
for (i = 0; i < max_desc; i++, tag_desc++)
tag_desc->tag_users = 0;
open_desc = mm->context.tag_store;
i = 0;
}
/* Check if we ran out of tag storage descriptors */
if (open_desc == NULL) {
tag_desc = NULL;
goto out;
}
/* Mark this tag descriptor slot in use and then initialize it */
tag_desc = open_desc;
tag_desc->tag_users = 1;
/* Tag storage has not been allocated for this vma and space
* is available in tag storage descriptor. Since this page is
* being swapped out, there is high probability subsequent pages
* in the VMA will be swapped out as well. Allocate pages to
* store tags for as many pages in this vma as possible but not
* more than TAG_STORAGE_PAGES. Each byte in tag space holds
* two ADI tags since each ADI tag is 4 bits. Each ADI tag
* covers adi_blksize() worth of addresses. Check if the hole is
* big enough to accommodate full address range for using
* TAG_STORAGE_PAGES number of tag pages.
*/
size = TAG_STORAGE_PAGES * PAGE_SIZE;
end_addr = addr + (size*2*adi_blksize()) - 1;
/* Check for overflow. If overflow occurs, allocate only one page */
if (end_addr < addr) {
size = PAGE_SIZE;
end_addr = addr + (size*2*adi_blksize()) - 1;
/* If overflow happens with the minimum tag storage
* allocation as well, adjust ending address for this
* tag storage.
*/
if (end_addr < addr)
end_addr = ULONG_MAX;
}
if (hole_end < end_addr) {
/* Available hole is too small on the upper end of
* address. Can we expand the range towards the lower
* address and maximize use of this slot?
*/
unsigned long tmp_addr;
end_addr = hole_end - 1;
tmp_addr = end_addr - (size*2*adi_blksize()) + 1;
/* Check for underflow. If underflow occurs, allocate
* only one page for storing ADI tags
*/
if (tmp_addr > addr) {
size = PAGE_SIZE;
tmp_addr = end_addr - (size*2*adi_blksize()) - 1;
/* If underflow happens with the minimum tag storage
* allocation as well, adjust starting address for
* this tag storage.
*/
if (tmp_addr > addr)
tmp_addr = 0;
}
if (tmp_addr < hole_start) {
/* Available hole is restricted on lower address
* end as well
*/
tmp_addr = hole_start + 1;
}
addr = tmp_addr;
size = (end_addr + 1 - addr)/(2*adi_blksize());
size = (size + (PAGE_SIZE-adi_blksize()))/PAGE_SIZE;
size = size * PAGE_SIZE;
}
tags = kzalloc(size, GFP_NOWAIT|__GFP_NOWARN);
if (tags == NULL) {
tag_desc->tag_users = 0;
tag_desc = NULL;
goto out;
}
tag_desc->start = addr;
tag_desc->tags = tags;
tag_desc->end = end_addr;
out:
spin_unlock_irqrestore(&mm->context.tag_lock, flags);
return tag_desc;
}
void del_tag_store(tag_storage_desc_t *tag_desc, struct mm_struct *mm)
{
unsigned long flags;
unsigned char *tags = NULL;
spin_lock_irqsave(&mm->context.tag_lock, flags);
tag_desc->tag_users--;
if (tag_desc->tag_users == 0) {
tag_desc->start = tag_desc->end = 0;
/* Do not free up the tag storage space allocated
* by the first descriptor. This is persistent
* emergency tag storage space for the task.
*/
if (tag_desc != mm->context.tag_store) {
tags = tag_desc->tags;
tag_desc->tags = NULL;
}
}
spin_unlock_irqrestore(&mm->context.tag_lock, flags);
kfree(tags);
}
#define tag_start(addr, tag_desc) \
((tag_desc)->tags + ((addr - (tag_desc)->start)/(2*adi_blksize())))
/* Retrieve any saved ADI tags for the page being swapped back in and
* restore these tags to the newly allocated physical page.
*/
void adi_restore_tags(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, pte_t pte)
{
unsigned char *tag;
tag_storage_desc_t *tag_desc;
unsigned long paddr, tmp, version1, version2;
/* Check if the swapped out page has an ADI version
* saved. If yes, restore version tag to the newly
* allocated page.
*/
tag_desc = find_tag_store(mm, vma, addr);
if (tag_desc == NULL)
return;
tag = tag_start(addr, tag_desc);
paddr = pte_val(pte) & _PAGE_PADDR_4V;
for (tmp = paddr; tmp < (paddr+PAGE_SIZE); tmp += adi_blksize()) {
version1 = (*tag) >> 4;
version2 = (*tag) & 0x0f;
*tag++ = 0;
asm volatile("stxa %0, [%1] %2\n\t"
:
: "r" (version1), "r" (tmp),
"i" (ASI_MCD_REAL));
tmp += adi_blksize();
asm volatile("stxa %0, [%1] %2\n\t"
:
: "r" (version2), "r" (tmp),
"i" (ASI_MCD_REAL));
}
asm volatile("membar #Sync\n\t");
/* Check and mark this tag space for release later if
* the swapped in page was the last user of tag space
*/
del_tag_store(tag_desc, mm);
}
/* A page is about to be swapped out. Save any ADI tags associated with
* this physical page so they can be restored later when the page is swapped
* back in.
*/
int adi_save_tags(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, pte_t oldpte)
{
unsigned char *tag;
tag_storage_desc_t *tag_desc;
unsigned long version1, version2, paddr, tmp;
tag_desc = alloc_tag_store(mm, vma, addr);
if (tag_desc == NULL)
return -1;
tag = tag_start(addr, tag_desc);
paddr = pte_val(oldpte) & _PAGE_PADDR_4V;
for (tmp = paddr; tmp < (paddr+PAGE_SIZE); tmp += adi_blksize()) {
asm volatile("ldxa [%1] %2, %0\n\t"
: "=r" (version1)
: "r" (tmp), "i" (ASI_MCD_REAL));
tmp += adi_blksize();
asm volatile("ldxa [%1] %2, %0\n\t"
: "=r" (version2)
: "r" (tmp), "i" (ASI_MCD_REAL));
*tag = (version1 << 4) | version2;
tag++;
}
return 0;
}
| linux-master | arch/sparc/kernel/adi_64.c |
// SPDX-License-Identifier: GPL-2.0
/* sysfs.c: Topology sysfs support code for sparc64.
*
* Copyright (C) 2007 David S. Miller <[email protected]>
*/
#include <linux/sched.h>
#include <linux/device.h>
#include <linux/cpu.h>
#include <linux/smp.h>
#include <linux/percpu.h>
#include <linux/init.h>
#include <asm/cpudata.h>
#include <asm/hypervisor.h>
#include <asm/spitfire.h>
static DEFINE_PER_CPU(struct hv_mmu_statistics, mmu_stats) __attribute__((aligned(64)));
#define SHOW_MMUSTAT_ULONG(NAME) \
static ssize_t show_##NAME(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
struct hv_mmu_statistics *p = &per_cpu(mmu_stats, dev->id); \
return sprintf(buf, "%lu\n", p->NAME); \
} \
static DEVICE_ATTR(NAME, 0444, show_##NAME, NULL)
SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctx0_8k_tte);
SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctx0_8k_tte);
SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctx0_64k_tte);
SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctx0_64k_tte);
SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctx0_4mb_tte);
SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctx0_4mb_tte);
SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctx0_256mb_tte);
SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctx0_256mb_tte);
SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctxnon0_8k_tte);
SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctxnon0_8k_tte);
SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctxnon0_64k_tte);
SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctxnon0_64k_tte);
SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctxnon0_4mb_tte);
SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctxnon0_4mb_tte);
SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctxnon0_256mb_tte);
SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctxnon0_256mb_tte);
SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctx0_8k_tte);
SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctx0_8k_tte);
SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctx0_64k_tte);
SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctx0_64k_tte);
SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctx0_4mb_tte);
SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctx0_4mb_tte);
SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctx0_256mb_tte);
SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctx0_256mb_tte);
SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctxnon0_8k_tte);
SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctxnon0_8k_tte);
SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctxnon0_64k_tte);
SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctxnon0_64k_tte);
SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctxnon0_4mb_tte);
SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctxnon0_4mb_tte);
SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctxnon0_256mb_tte);
SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctxnon0_256mb_tte);
static struct attribute *mmu_stat_attrs[] = {
&dev_attr_immu_tsb_hits_ctx0_8k_tte.attr,
&dev_attr_immu_tsb_ticks_ctx0_8k_tte.attr,
&dev_attr_immu_tsb_hits_ctx0_64k_tte.attr,
&dev_attr_immu_tsb_ticks_ctx0_64k_tte.attr,
&dev_attr_immu_tsb_hits_ctx0_4mb_tte.attr,
&dev_attr_immu_tsb_ticks_ctx0_4mb_tte.attr,
&dev_attr_immu_tsb_hits_ctx0_256mb_tte.attr,
&dev_attr_immu_tsb_ticks_ctx0_256mb_tte.attr,
&dev_attr_immu_tsb_hits_ctxnon0_8k_tte.attr,
&dev_attr_immu_tsb_ticks_ctxnon0_8k_tte.attr,
&dev_attr_immu_tsb_hits_ctxnon0_64k_tte.attr,
&dev_attr_immu_tsb_ticks_ctxnon0_64k_tte.attr,
&dev_attr_immu_tsb_hits_ctxnon0_4mb_tte.attr,
&dev_attr_immu_tsb_ticks_ctxnon0_4mb_tte.attr,
&dev_attr_immu_tsb_hits_ctxnon0_256mb_tte.attr,
&dev_attr_immu_tsb_ticks_ctxnon0_256mb_tte.attr,
&dev_attr_dmmu_tsb_hits_ctx0_8k_tte.attr,
&dev_attr_dmmu_tsb_ticks_ctx0_8k_tte.attr,
&dev_attr_dmmu_tsb_hits_ctx0_64k_tte.attr,
&dev_attr_dmmu_tsb_ticks_ctx0_64k_tte.attr,
&dev_attr_dmmu_tsb_hits_ctx0_4mb_tte.attr,
&dev_attr_dmmu_tsb_ticks_ctx0_4mb_tte.attr,
&dev_attr_dmmu_tsb_hits_ctx0_256mb_tte.attr,
&dev_attr_dmmu_tsb_ticks_ctx0_256mb_tte.attr,
&dev_attr_dmmu_tsb_hits_ctxnon0_8k_tte.attr,
&dev_attr_dmmu_tsb_ticks_ctxnon0_8k_tte.attr,
&dev_attr_dmmu_tsb_hits_ctxnon0_64k_tte.attr,
&dev_attr_dmmu_tsb_ticks_ctxnon0_64k_tte.attr,
&dev_attr_dmmu_tsb_hits_ctxnon0_4mb_tte.attr,
&dev_attr_dmmu_tsb_ticks_ctxnon0_4mb_tte.attr,
&dev_attr_dmmu_tsb_hits_ctxnon0_256mb_tte.attr,
&dev_attr_dmmu_tsb_ticks_ctxnon0_256mb_tte.attr,
NULL,
};
static struct attribute_group mmu_stat_group = {
.attrs = mmu_stat_attrs,
.name = "mmu_stats",
};
static long read_mmustat_enable(void *data __maybe_unused)
{
unsigned long ra = 0;
sun4v_mmustat_info(&ra);
return ra != 0;
}
static long write_mmustat_enable(void *data)
{
unsigned long ra, orig_ra, *val = data;
if (*val)
ra = __pa(&per_cpu(mmu_stats, smp_processor_id()));
else
ra = 0UL;
return sun4v_mmustat_conf(ra, &orig_ra);
}
static ssize_t show_mmustat_enable(struct device *s,
struct device_attribute *attr, char *buf)
{
long val = work_on_cpu(s->id, read_mmustat_enable, NULL);
return sprintf(buf, "%lx\n", val);
}
static ssize_t store_mmustat_enable(struct device *s,
struct device_attribute *attr, const char *buf,
size_t count)
{
unsigned long val;
long err;
int ret;
ret = sscanf(buf, "%lu", &val);
if (ret != 1)
return -EINVAL;
err = work_on_cpu(s->id, write_mmustat_enable, &val);
if (err)
return -EIO;
return count;
}
static DEVICE_ATTR(mmustat_enable, 0644, show_mmustat_enable, store_mmustat_enable);
static int mmu_stats_supported;
static int register_mmu_stats(struct device *s)
{
if (!mmu_stats_supported)
return 0;
device_create_file(s, &dev_attr_mmustat_enable);
return sysfs_create_group(&s->kobj, &mmu_stat_group);
}
#ifdef CONFIG_HOTPLUG_CPU
static void unregister_mmu_stats(struct device *s)
{
if (!mmu_stats_supported)
return;
sysfs_remove_group(&s->kobj, &mmu_stat_group);
device_remove_file(s, &dev_attr_mmustat_enable);
}
#endif
#define SHOW_CPUDATA_ULONG_NAME(NAME, MEMBER) \
static ssize_t show_##NAME(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
cpuinfo_sparc *c = &cpu_data(dev->id); \
return sprintf(buf, "%lu\n", c->MEMBER); \
}
#define SHOW_CPUDATA_UINT_NAME(NAME, MEMBER) \
static ssize_t show_##NAME(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
cpuinfo_sparc *c = &cpu_data(dev->id); \
return sprintf(buf, "%u\n", c->MEMBER); \
}
SHOW_CPUDATA_ULONG_NAME(clock_tick, clock_tick);
SHOW_CPUDATA_UINT_NAME(l1_dcache_size, dcache_size);
SHOW_CPUDATA_UINT_NAME(l1_dcache_line_size, dcache_line_size);
SHOW_CPUDATA_UINT_NAME(l1_icache_size, icache_size);
SHOW_CPUDATA_UINT_NAME(l1_icache_line_size, icache_line_size);
SHOW_CPUDATA_UINT_NAME(l2_cache_size, ecache_size);
SHOW_CPUDATA_UINT_NAME(l2_cache_line_size, ecache_line_size);
static struct device_attribute cpu_core_attrs[] = {
__ATTR(clock_tick, 0444, show_clock_tick, NULL),
__ATTR(l1_dcache_size, 0444, show_l1_dcache_size, NULL),
__ATTR(l1_dcache_line_size, 0444, show_l1_dcache_line_size, NULL),
__ATTR(l1_icache_size, 0444, show_l1_icache_size, NULL),
__ATTR(l1_icache_line_size, 0444, show_l1_icache_line_size, NULL),
__ATTR(l2_cache_size, 0444, show_l2_cache_size, NULL),
__ATTR(l2_cache_line_size, 0444, show_l2_cache_line_size, NULL),
};
static DEFINE_PER_CPU(struct cpu, cpu_devices);
static int register_cpu_online(unsigned int cpu)
{
struct cpu *c = &per_cpu(cpu_devices, cpu);
struct device *s = &c->dev;
int i;
for (i = 0; i < ARRAY_SIZE(cpu_core_attrs); i++)
device_create_file(s, &cpu_core_attrs[i]);
register_mmu_stats(s);
return 0;
}
static int unregister_cpu_online(unsigned int cpu)
{
#ifdef CONFIG_HOTPLUG_CPU
struct cpu *c = &per_cpu(cpu_devices, cpu);
struct device *s = &c->dev;
int i;
unregister_mmu_stats(s);
for (i = 0; i < ARRAY_SIZE(cpu_core_attrs); i++)
device_remove_file(s, &cpu_core_attrs[i]);
#endif
return 0;
}
static void __init check_mmu_stats(void)
{
unsigned long dummy1, err;
if (tlb_type != hypervisor)
return;
err = sun4v_mmustat_info(&dummy1);
if (!err)
mmu_stats_supported = 1;
}
static int __init topology_init(void)
{
int cpu, ret;
check_mmu_stats();
for_each_possible_cpu(cpu) {
struct cpu *c = &per_cpu(cpu_devices, cpu);
register_cpu(c, cpu);
}
ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "sparc/topology:online",
register_cpu_online, unregister_cpu_online);
WARN_ON(ret < 0);
return 0;
}
subsys_initcall(topology_init);
| linux-master | arch/sparc/kernel/sysfs.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/dma-mapping.h>
#include <linux/init.h>
#include <linux/export.h>
#include <linux/mod_devicetable.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/irq.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <asm/spitfire.h>
#include "of_device_common.h"
void __iomem *of_ioremap(struct resource *res, unsigned long offset, unsigned long size, char *name)
{
unsigned long ret = res->start + offset;
struct resource *r;
if (res->flags & IORESOURCE_MEM)
r = request_mem_region(ret, size, name);
else
r = request_region(ret, size, name);
if (!r)
ret = 0;
return (void __iomem *) ret;
}
EXPORT_SYMBOL(of_ioremap);
void of_iounmap(struct resource *res, void __iomem *base, unsigned long size)
{
if (res->flags & IORESOURCE_MEM)
release_mem_region((unsigned long) base, size);
else
release_region((unsigned long) base, size);
}
EXPORT_SYMBOL(of_iounmap);
/*
* PCI bus specific translator
*/
static int of_bus_pci_match(struct device_node *np)
{
if (of_node_name_eq(np, "pci")) {
const char *model = of_get_property(np, "model", NULL);
if (model && !strcmp(model, "SUNW,simba"))
return 0;
/* Do not do PCI specific frobbing if the
* PCI bridge lacks a ranges property. We
* want to pass it through up to the next
* parent as-is, not with the PCI translate
* method which chops off the top address cell.
*/
if (!of_property_present(np, "ranges"))
return 0;
return 1;
}
return 0;
}
static int of_bus_simba_match(struct device_node *np)
{
const char *model = of_get_property(np, "model", NULL);
if (model && !strcmp(model, "SUNW,simba"))
return 1;
/* Treat PCI busses lacking ranges property just like
* simba.
*/
if (of_node_name_eq(np, "pci")) {
if (!of_property_present(np, "ranges"))
return 1;
}
return 0;
}
static int of_bus_simba_map(u32 *addr, const u32 *range,
int na, int ns, int pna)
{
return 0;
}
static void of_bus_pci_count_cells(struct device_node *np,
int *addrc, int *sizec)
{
if (addrc)
*addrc = 3;
if (sizec)
*sizec = 2;
}
static int of_bus_pci_map(u32 *addr, const u32 *range,
int na, int ns, int pna)
{
u32 result[OF_MAX_ADDR_CELLS];
int i;
/* Check address type match */
if (!((addr[0] ^ range[0]) & 0x03000000))
goto type_match;
/* Special exception, we can map a 64-bit address into
* a 32-bit range.
*/
if ((addr[0] & 0x03000000) == 0x03000000 &&
(range[0] & 0x03000000) == 0x02000000)
goto type_match;
return -EINVAL;
type_match:
if (of_out_of_range(addr + 1, range + 1, range + na + pna,
na - 1, ns))
return -EINVAL;
/* Start with the parent range base. */
memcpy(result, range + na, pna * 4);
/* Add in the child address offset, skipping high cell. */
for (i = 0; i < na - 1; i++)
result[pna - 1 - i] +=
(addr[na - 1 - i] -
range[na - 1 - i]);
memcpy(addr, result, pna * 4);
return 0;
}
static unsigned long of_bus_pci_get_flags(const u32 *addr, unsigned long flags)
{
u32 w = addr[0];
/* For PCI, we override whatever child busses may have used. */
flags = 0;
switch((w >> 24) & 0x03) {
case 0x01:
flags |= IORESOURCE_IO;
break;
case 0x02: /* 32 bits */
case 0x03: /* 64 bits */
flags |= IORESOURCE_MEM;
break;
}
if (w & 0x40000000)
flags |= IORESOURCE_PREFETCH;
return flags;
}
/*
* FHC/Central bus specific translator.
*
* This is just needed to hard-code the address and size cell
* counts. 'fhc' and 'central' nodes lack the #address-cells and
* #size-cells properties, and if you walk to the root on such
* Enterprise boxes all you'll get is a #size-cells of 2 which is
* not what we want to use.
*/
static int of_bus_fhc_match(struct device_node *np)
{
return of_node_name_eq(np, "fhc") ||
of_node_name_eq(np, "central");
}
#define of_bus_fhc_count_cells of_bus_sbus_count_cells
/*
* Array of bus specific translators
*/
static struct of_bus of_busses[] = {
/* PCI */
{
.name = "pci",
.addr_prop_name = "assigned-addresses",
.match = of_bus_pci_match,
.count_cells = of_bus_pci_count_cells,
.map = of_bus_pci_map,
.get_flags = of_bus_pci_get_flags,
},
/* SIMBA */
{
.name = "simba",
.addr_prop_name = "assigned-addresses",
.match = of_bus_simba_match,
.count_cells = of_bus_pci_count_cells,
.map = of_bus_simba_map,
.get_flags = of_bus_pci_get_flags,
},
/* SBUS */
{
.name = "sbus",
.addr_prop_name = "reg",
.match = of_bus_sbus_match,
.count_cells = of_bus_sbus_count_cells,
.map = of_bus_default_map,
.get_flags = of_bus_default_get_flags,
},
/* FHC */
{
.name = "fhc",
.addr_prop_name = "reg",
.match = of_bus_fhc_match,
.count_cells = of_bus_fhc_count_cells,
.map = of_bus_default_map,
.get_flags = of_bus_default_get_flags,
},
/* Default */
{
.name = "default",
.addr_prop_name = "reg",
.match = NULL,
.count_cells = of_bus_default_count_cells,
.map = of_bus_default_map,
.get_flags = of_bus_default_get_flags,
},
};
static struct of_bus *of_match_bus(struct device_node *np)
{
int i;
for (i = 0; i < ARRAY_SIZE(of_busses); i ++)
if (!of_busses[i].match || of_busses[i].match(np))
return &of_busses[i];
BUG();
return NULL;
}
static int __init build_one_resource(struct device_node *parent,
struct of_bus *bus,
struct of_bus *pbus,
u32 *addr,
int na, int ns, int pna)
{
const u32 *ranges;
int rone, rlen;
ranges = of_get_property(parent, "ranges", &rlen);
if (ranges == NULL || rlen == 0) {
u32 result[OF_MAX_ADDR_CELLS];
int i;
memset(result, 0, pna * 4);
for (i = 0; i < na; i++)
result[pna - 1 - i] =
addr[na - 1 - i];
memcpy(addr, result, pna * 4);
return 0;
}
/* Now walk through the ranges */
rlen /= 4;
rone = na + pna + ns;
for (; rlen >= rone; rlen -= rone, ranges += rone) {
if (!bus->map(addr, ranges, na, ns, pna))
return 0;
}
/* When we miss an I/O space match on PCI, just pass it up
* to the next PCI bridge and/or controller.
*/
if (!strcmp(bus->name, "pci") &&
(addr[0] & 0x03000000) == 0x01000000)
return 0;
return 1;
}
static int __init use_1to1_mapping(struct device_node *pp)
{
/* If we have a ranges property in the parent, use it. */
if (of_property_present(pp, "ranges"))
return 0;
/* If the parent is the dma node of an ISA bus, pass
* the translation up to the root.
*
* Some SBUS devices use intermediate nodes to express
* hierarchy within the device itself. These aren't
* real bus nodes, and don't have a 'ranges' property.
* But, we should still pass the translation work up
* to the SBUS itself.
*/
if (of_node_name_eq(pp, "dma") ||
of_node_name_eq(pp, "espdma") ||
of_node_name_eq(pp, "ledma") ||
of_node_name_eq(pp, "lebuffer"))
return 0;
/* Similarly for all PCI bridges, if we get this far
* it lacks a ranges property, and this will include
* cases like Simba.
*/
if (of_node_name_eq(pp, "pci"))
return 0;
return 1;
}
static int of_resource_verbose;
static void __init build_device_resources(struct platform_device *op,
struct device *parent)
{
struct platform_device *p_op;
struct of_bus *bus;
int na, ns;
int index, num_reg;
const void *preg;
if (!parent)
return;
p_op = to_platform_device(parent);
bus = of_match_bus(p_op->dev.of_node);
bus->count_cells(op->dev.of_node, &na, &ns);
preg = of_get_property(op->dev.of_node, bus->addr_prop_name, &num_reg);
if (!preg || num_reg == 0)
return;
/* Convert to num-cells. */
num_reg /= 4;
/* Convert to num-entries. */
num_reg /= na + ns;
/* Prevent overrunning the op->resources[] array. */
if (num_reg > PROMREG_MAX) {
printk(KERN_WARNING "%pOF: Too many regs (%d), "
"limiting to %d.\n",
op->dev.of_node, num_reg, PROMREG_MAX);
num_reg = PROMREG_MAX;
}
op->resource = op->archdata.resource;
op->num_resources = num_reg;
for (index = 0; index < num_reg; index++) {
struct resource *r = &op->resource[index];
u32 addr[OF_MAX_ADDR_CELLS];
const u32 *reg = (preg + (index * ((na + ns) * 4)));
struct device_node *dp = op->dev.of_node;
struct device_node *pp = p_op->dev.of_node;
struct of_bus *pbus, *dbus;
u64 size, result = OF_BAD_ADDR;
unsigned long flags;
int dna, dns;
int pna, pns;
size = of_read_addr(reg + na, ns);
memcpy(addr, reg, na * 4);
flags = bus->get_flags(addr, 0);
if (use_1to1_mapping(pp)) {
result = of_read_addr(addr, na);
goto build_res;
}
dna = na;
dns = ns;
dbus = bus;
while (1) {
dp = pp;
pp = dp->parent;
if (!pp) {
result = of_read_addr(addr, dna);
break;
}
pbus = of_match_bus(pp);
pbus->count_cells(dp, &pna, &pns);
if (build_one_resource(dp, dbus, pbus, addr,
dna, dns, pna))
break;
flags = pbus->get_flags(addr, flags);
dna = pna;
dns = pns;
dbus = pbus;
}
build_res:
memset(r, 0, sizeof(*r));
if (of_resource_verbose)
printk("%pOF reg[%d] -> %llx\n",
op->dev.of_node, index,
result);
if (result != OF_BAD_ADDR) {
if (tlb_type == hypervisor)
result &= 0x0fffffffffffffffUL;
r->start = result;
r->end = result + size - 1;
r->flags = flags;
}
r->name = op->dev.of_node->full_name;
}
}
static struct device_node * __init
apply_interrupt_map(struct device_node *dp, struct device_node *pp,
const u32 *imap, int imlen, const u32 *imask,
unsigned int *irq_p)
{
struct device_node *cp;
unsigned int irq = *irq_p;
struct of_bus *bus;
phandle handle;
const u32 *reg;
int na, num_reg, i;
bus = of_match_bus(pp);
bus->count_cells(dp, &na, NULL);
reg = of_get_property(dp, "reg", &num_reg);
if (!reg || !num_reg)
return NULL;
imlen /= ((na + 3) * 4);
handle = 0;
for (i = 0; i < imlen; i++) {
int j;
for (j = 0; j < na; j++) {
if ((reg[j] & imask[j]) != imap[j])
goto next;
}
if (imap[na] == irq) {
handle = imap[na + 1];
irq = imap[na + 2];
break;
}
next:
imap += (na + 3);
}
if (i == imlen) {
/* Psycho and Sabre PCI controllers can have 'interrupt-map'
* properties that do not include the on-board device
* interrupts. Instead, the device's 'interrupts' property
* is already a fully specified INO value.
*
* Handle this by deciding that, if we didn't get a
* match in the parent's 'interrupt-map', and the
* parent is an IRQ translator, then use the parent as
* our IRQ controller.
*/
if (pp->irq_trans)
return pp;
return NULL;
}
*irq_p = irq;
cp = of_find_node_by_phandle(handle);
return cp;
}
static unsigned int __init pci_irq_swizzle(struct device_node *dp,
struct device_node *pp,
unsigned int irq)
{
const struct linux_prom_pci_registers *regs;
unsigned int bus, devfn, slot, ret;
if (irq < 1 || irq > 4)
return irq;
regs = of_get_property(dp, "reg", NULL);
if (!regs)
return irq;
bus = (regs->phys_hi >> 16) & 0xff;
devfn = (regs->phys_hi >> 8) & 0xff;
slot = (devfn >> 3) & 0x1f;
if (pp->irq_trans) {
/* Derived from Table 8-3, U2P User's Manual. This branch
* is handling a PCI controller that lacks a proper set of
* interrupt-map and interrupt-map-mask properties. The
* Ultra-E450 is one example.
*
* The bit layout is BSSLL, where:
* B: 0 on bus A, 1 on bus B
* D: 2-bit slot number, derived from PCI device number as
* (dev - 1) for bus A, or (dev - 2) for bus B
* L: 2-bit line number
*/
if (bus & 0x80) {
/* PBM-A */
bus = 0x00;
slot = (slot - 1) << 2;
} else {
/* PBM-B */
bus = 0x10;
slot = (slot - 2) << 2;
}
irq -= 1;
ret = (bus | slot | irq);
} else {
/* Going through a PCI-PCI bridge that lacks a set of
* interrupt-map and interrupt-map-mask properties.
*/
ret = ((irq - 1 + (slot & 3)) & 3) + 1;
}
return ret;
}
static int of_irq_verbose;
static unsigned int __init build_one_device_irq(struct platform_device *op,
struct device *parent,
unsigned int irq)
{
struct device_node *dp = op->dev.of_node;
struct device_node *pp, *ip;
unsigned int orig_irq = irq;
int nid;
if (irq == 0xffffffff)
return irq;
if (dp->irq_trans) {
irq = dp->irq_trans->irq_build(dp, irq,
dp->irq_trans->data);
if (of_irq_verbose)
printk("%pOF: direct translate %x --> %x\n",
dp, orig_irq, irq);
goto out;
}
/* Something more complicated. Walk up to the root, applying
* interrupt-map or bus specific translations, until we hit
* an IRQ translator.
*
* If we hit a bus type or situation we cannot handle, we
* stop and assume that the original IRQ number was in a
* format which has special meaning to it's immediate parent.
*/
pp = dp->parent;
ip = NULL;
while (pp) {
const void *imap, *imsk;
int imlen;
imap = of_get_property(pp, "interrupt-map", &imlen);
imsk = of_get_property(pp, "interrupt-map-mask", NULL);
if (imap && imsk) {
struct device_node *iret;
int this_orig_irq = irq;
iret = apply_interrupt_map(dp, pp,
imap, imlen, imsk,
&irq);
if (of_irq_verbose)
printk("%pOF: Apply [%pOF:%x] imap --> [%pOF:%x]\n",
op->dev.of_node,
pp, this_orig_irq, iret, irq);
if (!iret)
break;
if (iret->irq_trans) {
ip = iret;
break;
}
} else {
if (of_node_name_eq(pp, "pci")) {
unsigned int this_orig_irq = irq;
irq = pci_irq_swizzle(dp, pp, irq);
if (of_irq_verbose)
printk("%pOF: PCI swizzle [%pOF] "
"%x --> %x\n",
op->dev.of_node,
pp, this_orig_irq,
irq);
}
if (pp->irq_trans) {
ip = pp;
break;
}
}
dp = pp;
pp = pp->parent;
}
if (!ip)
return orig_irq;
irq = ip->irq_trans->irq_build(op->dev.of_node, irq,
ip->irq_trans->data);
if (of_irq_verbose)
printk("%pOF: Apply IRQ trans [%pOF] %x --> %x\n",
op->dev.of_node, ip, orig_irq, irq);
out:
nid = of_node_to_nid(dp);
if (nid != -1) {
cpumask_t numa_mask;
cpumask_copy(&numa_mask, cpumask_of_node(nid));
irq_set_affinity(irq, &numa_mask);
}
return irq;
}
static struct platform_device * __init scan_one_device(struct device_node *dp,
struct device *parent)
{
struct platform_device *op = kzalloc(sizeof(*op), GFP_KERNEL);
const unsigned int *irq;
struct dev_archdata *sd;
int len, i;
if (!op)
return NULL;
sd = &op->dev.archdata;
sd->op = op;
op->dev.of_node = dp;
irq = of_get_property(dp, "interrupts", &len);
if (irq) {
op->archdata.num_irqs = len / 4;
/* Prevent overrunning the op->irqs[] array. */
if (op->archdata.num_irqs > PROMINTR_MAX) {
printk(KERN_WARNING "%pOF: Too many irqs (%d), "
"limiting to %d.\n",
dp, op->archdata.num_irqs, PROMINTR_MAX);
op->archdata.num_irqs = PROMINTR_MAX;
}
memcpy(op->archdata.irqs, irq, op->archdata.num_irqs * 4);
} else {
op->archdata.num_irqs = 0;
}
build_device_resources(op, parent);
for (i = 0; i < op->archdata.num_irqs; i++)
op->archdata.irqs[i] = build_one_device_irq(op, parent, op->archdata.irqs[i]);
op->dev.parent = parent;
op->dev.bus = &platform_bus_type;
if (!parent)
dev_set_name(&op->dev, "root");
else
dev_set_name(&op->dev, "%08x", dp->phandle);
op->dev.coherent_dma_mask = DMA_BIT_MASK(32);
op->dev.dma_mask = &op->dev.coherent_dma_mask;
if (of_device_register(op)) {
printk("%pOF: Could not register of device.\n", dp);
kfree(op);
op = NULL;
}
return op;
}
static void __init scan_tree(struct device_node *dp, struct device *parent)
{
while (dp) {
struct platform_device *op = scan_one_device(dp, parent);
if (op)
scan_tree(dp->child, &op->dev);
dp = dp->sibling;
}
}
static int __init scan_of_devices(void)
{
struct device_node *root = of_find_node_by_path("/");
struct platform_device *parent;
parent = scan_one_device(root, NULL);
if (!parent)
return 0;
scan_tree(root->child, &parent->dev);
return 0;
}
postcore_initcall(scan_of_devices);
static int __init of_debug(char *str)
{
int val = 0;
get_option(&str, &val);
if (val & 1)
of_resource_verbose = 1;
if (val & 2)
of_irq_verbose = 1;
return 1;
}
__setup("of_debug=", of_debug);
| linux-master | arch/sparc/kernel/of_device_64.c |
// SPDX-License-Identifier: GPL-2.0
/*
* linux/arch/sparc/kernel/setup.c
*
* Copyright (C) 1995 David S. Miller ([email protected])
* Copyright (C) 2000 Anton Blanchard ([email protected])
*/
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/ptrace.h>
#include <linux/slab.h>
#include <linux/initrd.h>
#include <asm/smp.h>
#include <linux/user.h>
#include <linux/screen_info.h>
#include <linux/delay.h>
#include <linux/fs.h>
#include <linux/seq_file.h>
#include <linux/syscalls.h>
#include <linux/kdev_t.h>
#include <linux/major.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/console.h>
#include <linux/spinlock.h>
#include <linux/root_dev.h>
#include <linux/cpu.h>
#include <linux/kdebug.h>
#include <linux/export.h>
#include <linux/start_kernel.h>
#include <uapi/linux/mount.h>
#include <asm/io.h>
#include <asm/processor.h>
#include <asm/oplib.h>
#include <asm/page.h>
#include <asm/traps.h>
#include <asm/vaddrs.h>
#include <asm/mbus.h>
#include <asm/idprom.h>
#include <asm/cpudata.h>
#include <asm/setup.h>
#include <asm/cacheflush.h>
#include <asm/sections.h>
#include "kernel.h"
struct screen_info screen_info = {
0, 0, /* orig-x, orig-y */
0, /* unused */
0, /* orig-video-page */
0, /* orig-video-mode */
128, /* orig-video-cols */
0,0,0, /* ega_ax, ega_bx, ega_cx */
54, /* orig-video-lines */
0, /* orig-video-isVGA */
16 /* orig-video-points */
};
/* Typing sync at the prom prompt calls the function pointed to by
* romvec->pv_synchook which I set to the following function.
* This should sync all filesystems and return, for now it just
* prints out pretty messages and returns.
*/
/* Pretty sick eh? */
static void prom_sync_me(void)
{
unsigned long prom_tbr, flags;
/* XXX Badly broken. FIX! - Anton */
local_irq_save(flags);
__asm__ __volatile__("rd %%tbr, %0\n\t" : "=r" (prom_tbr));
__asm__ __volatile__("wr %0, 0x0, %%tbr\n\t"
"nop\n\t"
"nop\n\t"
"nop\n\t" : : "r" (&trapbase));
prom_printf("PROM SYNC COMMAND...\n");
show_mem();
if (!is_idle_task(current)) {
local_irq_enable();
ksys_sync();
local_irq_disable();
}
prom_printf("Returning to prom\n");
__asm__ __volatile__("wr %0, 0x0, %%tbr\n\t"
"nop\n\t"
"nop\n\t"
"nop\n\t" : : "r" (prom_tbr));
local_irq_restore(flags);
}
static unsigned int boot_flags __initdata = 0;
#define BOOTME_DEBUG 0x1
/* Exported for mm/init.c:paging_init. */
unsigned long cmdline_memory_size __initdata = 0;
/* which CPU booted us (0xff = not set) */
unsigned char boot_cpu_id = 0xff; /* 0xff will make it into DATA section... */
static void
prom_console_write(struct console *con, const char *s, unsigned int n)
{
prom_write(s, n);
}
static struct console prom_early_console = {
.name = "earlyprom",
.write = prom_console_write,
.flags = CON_PRINTBUFFER | CON_BOOT,
.index = -1,
};
/*
* Process kernel command line switches that are specific to the
* SPARC or that require special low-level processing.
*/
static void __init process_switch(char c)
{
switch (c) {
case 'd':
boot_flags |= BOOTME_DEBUG;
break;
case 's':
break;
case 'h':
prom_printf("boot_flags_init: Halt!\n");
prom_halt();
break;
case 'p':
prom_early_console.flags &= ~CON_BOOT;
break;
default:
printk("Unknown boot switch (-%c)\n", c);
break;
}
}
static void __init boot_flags_init(char *commands)
{
while (*commands) {
/* Move to the start of the next "argument". */
while (*commands == ' ')
commands++;
/* Process any command switches, otherwise skip it. */
if (*commands == '\0')
break;
if (*commands == '-') {
commands++;
while (*commands && *commands != ' ')
process_switch(*commands++);
continue;
}
if (!strncmp(commands, "mem=", 4)) {
/*
* "mem=XXX[kKmM] overrides the PROM-reported
* memory size.
*/
cmdline_memory_size = simple_strtoul(commands + 4,
&commands, 0);
if (*commands == 'K' || *commands == 'k') {
cmdline_memory_size <<= 10;
commands++;
} else if (*commands=='M' || *commands=='m') {
cmdline_memory_size <<= 20;
commands++;
}
}
while (*commands && *commands != ' ')
commands++;
}
}
extern unsigned short root_flags;
extern unsigned short root_dev;
extern unsigned short ram_flags;
#define RAMDISK_IMAGE_START_MASK 0x07FF
#define RAMDISK_PROMPT_FLAG 0x8000
#define RAMDISK_LOAD_FLAG 0x4000
extern int root_mountflags;
char reboot_command[COMMAND_LINE_SIZE];
struct cpuid_patch_entry {
unsigned int addr;
unsigned int sun4d[3];
unsigned int leon[3];
};
extern struct cpuid_patch_entry __cpuid_patch, __cpuid_patch_end;
static void __init per_cpu_patch(void)
{
struct cpuid_patch_entry *p;
if (sparc_cpu_model == sun4m) {
/* Nothing to do, this is what the unpatched code
* targets.
*/
return;
}
p = &__cpuid_patch;
while (p < &__cpuid_patch_end) {
unsigned long addr = p->addr;
unsigned int *insns;
switch (sparc_cpu_model) {
case sun4d:
insns = &p->sun4d[0];
break;
case sparc_leon:
insns = &p->leon[0];
break;
default:
prom_printf("Unknown cpu type, halting.\n");
prom_halt();
}
*(unsigned int *) (addr + 0) = insns[0];
flushi(addr + 0);
*(unsigned int *) (addr + 4) = insns[1];
flushi(addr + 4);
*(unsigned int *) (addr + 8) = insns[2];
flushi(addr + 8);
p++;
}
}
struct leon_1insn_patch_entry {
unsigned int addr;
unsigned int insn;
};
enum sparc_cpu sparc_cpu_model;
EXPORT_SYMBOL(sparc_cpu_model);
static __init void leon_patch(void)
{
struct leon_1insn_patch_entry *start = (void *)__leon_1insn_patch;
struct leon_1insn_patch_entry *end = (void *)__leon_1insn_patch_end;
/* Default instruction is leon - no patching */
if (sparc_cpu_model == sparc_leon)
return;
while (start < end) {
unsigned long addr = start->addr;
*(unsigned int *)(addr) = start->insn;
flushi(addr);
start++;
}
}
struct tt_entry *sparc_ttable;
/* Called from head_32.S - before we have setup anything
* in the kernel. Be very careful with what you do here.
*/
void __init sparc32_start_kernel(struct linux_romvec *rp)
{
prom_init(rp);
/* Set sparc_cpu_model */
sparc_cpu_model = sun_unknown;
if (!strcmp(&cputypval[0], "sun4m"))
sparc_cpu_model = sun4m;
if (!strcmp(&cputypval[0], "sun4s"))
sparc_cpu_model = sun4m; /* CP-1200 with PROM 2.30 -E */
if (!strcmp(&cputypval[0], "sun4d"))
sparc_cpu_model = sun4d;
if (!strcmp(&cputypval[0], "sun4e"))
sparc_cpu_model = sun4e;
if (!strcmp(&cputypval[0], "sun4u"))
sparc_cpu_model = sun4u;
if (!strncmp(&cputypval[0], "leon" , 4))
sparc_cpu_model = sparc_leon;
leon_patch();
start_kernel();
}
void __init setup_arch(char **cmdline_p)
{
int i;
unsigned long highest_paddr;
sparc_ttable = &trapbase;
/* Initialize PROM console and command line. */
*cmdline_p = prom_getbootargs();
strscpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE);
parse_early_param();
boot_flags_init(*cmdline_p);
register_console(&prom_early_console);
switch(sparc_cpu_model) {
case sun4m:
pr_info("ARCH: SUN4M\n");
break;
case sun4d:
pr_info("ARCH: SUN4D\n");
break;
case sun4e:
pr_info("ARCH: SUN4E\n");
break;
case sun4u:
pr_info("ARCH: SUN4U\n");
break;
case sparc_leon:
pr_info("ARCH: LEON\n");
break;
default:
pr_info("ARCH: UNKNOWN!\n");
break;
}
idprom_init();
load_mmu();
phys_base = 0xffffffffUL;
highest_paddr = 0UL;
for (i = 0; sp_banks[i].num_bytes != 0; i++) {
unsigned long top;
if (sp_banks[i].base_addr < phys_base)
phys_base = sp_banks[i].base_addr;
top = sp_banks[i].base_addr +
sp_banks[i].num_bytes;
if (highest_paddr < top)
highest_paddr = top;
}
pfn_base = phys_base >> PAGE_SHIFT;
if (!root_flags)
root_mountflags &= ~MS_RDONLY;
ROOT_DEV = old_decode_dev(root_dev);
#ifdef CONFIG_BLK_DEV_RAM
rd_image_start = ram_flags & RAMDISK_IMAGE_START_MASK;
#endif
prom_setsync(prom_sync_me);
if((boot_flags & BOOTME_DEBUG) && (linux_dbvec != NULL) &&
((*(short *)linux_dbvec) != -1)) {
printk("Booted under KADB. Syncing trap table.\n");
(*(linux_dbvec->teach_debugger))();
}
/* Run-time patch instructions to match the cpu model */
per_cpu_patch();
paging_init();
smp_setup_cpu_possible_map();
}
extern int stop_a_enabled;
void sun_do_break(void)
{
if (!stop_a_enabled)
return;
printk("\n");
flush_user_windows();
prom_cmdline();
}
EXPORT_SYMBOL(sun_do_break);
int stop_a_enabled = 1;
static int __init topology_init(void)
{
int i, ncpus, err;
/* Count the number of physically present processors in
* the machine, even on uniprocessor, so that /proc/cpuinfo
* output is consistent with 2.4.x
*/
ncpus = 0;
while (!cpu_find_by_instance(ncpus, NULL, NULL))
ncpus++;
ncpus_probed = ncpus;
err = 0;
for_each_online_cpu(i) {
struct cpu *p = kzalloc(sizeof(*p), GFP_KERNEL);
if (!p)
err = -ENOMEM;
else
register_cpu(p, i);
}
return err;
}
subsys_initcall(topology_init);
#if defined(CONFIG_SPARC32) && !defined(CONFIG_SMP)
void __init arch_cpu_finalize_init(void)
{
cpu_data(0).udelay_val = loops_per_jiffy;
}
#endif
| linux-master | arch/sparc/kernel/setup_32.c |
// SPDX-License-Identifier: GPL-2.0
#define __32bit_syscall_numbers__
#include <linux/audit_arch.h>
#include <asm/unistd.h>
#include "kernel.h"
unsigned int sparc32_dir_class[] = {
#include <asm-generic/audit_dir_write.h>
~0U
};
unsigned int sparc32_chattr_class[] = {
#include <asm-generic/audit_change_attr.h>
~0U
};
unsigned int sparc32_write_class[] = {
#include <asm-generic/audit_write.h>
~0U
};
unsigned int sparc32_read_class[] = {
#include <asm-generic/audit_read.h>
~0U
};
unsigned int sparc32_signal_class[] = {
#include <asm-generic/audit_signal.h>
~0U
};
int sparc32_classify_syscall(unsigned int syscall)
{
switch(syscall) {
case __NR_open:
return AUDITSC_OPEN;
case __NR_openat:
return AUDITSC_OPENAT;
case __NR_socketcall:
return AUDITSC_SOCKETCALL;
case __NR_execve:
return AUDITSC_EXECVE;
case __NR_openat2:
return AUDITSC_OPENAT2;
default:
return AUDITSC_COMPAT;
}
}
| linux-master | arch/sparc/kernel/compat_audit.c |
// SPDX-License-Identifier: GPL-2.0
/* Sparc SS1000/SC2000 SMP support.
*
* Copyright (C) 1998 Jakub Jelinek ([email protected])
*
* Based on sun4m's smp.c, which is:
* Copyright (C) 1996 David S. Miller ([email protected])
*/
#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/profile.h>
#include <linux/delay.h>
#include <linux/sched/mm.h>
#include <linux/cpu.h>
#include <asm/cacheflush.h>
#include <asm/switch_to.h>
#include <asm/tlbflush.h>
#include <asm/timer.h>
#include <asm/oplib.h>
#include <asm/sbi.h>
#include <asm/mmu.h>
#include "kernel.h"
#include "irq.h"
#define IRQ_CROSS_CALL 15
static volatile int smp_processors_ready;
static int smp_highest_cpu;
static inline unsigned long sun4d_swap(volatile unsigned long *ptr, unsigned long val)
{
__asm__ __volatile__("swap [%1], %0\n\t" :
"=&r" (val), "=&r" (ptr) :
"0" (val), "1" (ptr));
return val;
}
static void smp4d_ipi_init(void);
static unsigned char cpu_leds[32];
static inline void show_leds(int cpuid)
{
cpuid &= 0x1e;
__asm__ __volatile__ ("stba %0, [%1] %2" : :
"r" ((cpu_leds[cpuid] << 4) | cpu_leds[cpuid+1]),
"r" (ECSR_BASE(cpuid) | BB_LEDS),
"i" (ASI_M_CTL));
}
void sun4d_cpu_pre_starting(void *arg)
{
int cpuid = hard_smp_processor_id();
/* Show we are alive */
cpu_leds[cpuid] = 0x6;
show_leds(cpuid);
/* Enable level15 interrupt, disable level14 interrupt for now */
cc_set_imsk((cc_get_imsk() & ~0x8000) | 0x4000);
}
void sun4d_cpu_pre_online(void *arg)
{
unsigned long flags;
int cpuid;
cpuid = hard_smp_processor_id();
/* Unblock the master CPU _only_ when the scheduler state
* of all secondary CPUs will be up-to-date, so after
* the SMP initialization the master will be just allowed
* to call the scheduler code.
*/
sun4d_swap((unsigned long *)&cpu_callin_map[cpuid], 1);
local_ops->cache_all();
local_ops->tlb_all();
while ((unsigned long)current_set[cpuid] < PAGE_OFFSET)
barrier();
while (current_set[cpuid]->cpu != cpuid)
barrier();
/* Fix idle thread fields. */
__asm__ __volatile__("ld [%0], %%g6\n\t"
: : "r" (¤t_set[cpuid])
: "memory" /* paranoid */);
cpu_leds[cpuid] = 0x9;
show_leds(cpuid);
/* Attach to the address space of init_task. */
mmgrab(&init_mm);
current->active_mm = &init_mm;
local_ops->cache_all();
local_ops->tlb_all();
while (!cpumask_test_cpu(cpuid, &smp_commenced_mask))
barrier();
spin_lock_irqsave(&sun4d_imsk_lock, flags);
cc_set_imsk(cc_get_imsk() & ~0x4000); /* Allow PIL 14 as well */
spin_unlock_irqrestore(&sun4d_imsk_lock, flags);
}
/*
* Cycle through the processors asking the PROM to start each one.
*/
void __init smp4d_boot_cpus(void)
{
smp4d_ipi_init();
if (boot_cpu_id)
current_set[0] = NULL;
local_ops->cache_all();
}
int smp4d_boot_one_cpu(int i, struct task_struct *idle)
{
unsigned long *entry = &sun4d_cpu_startup;
int timeout;
int cpu_node;
cpu_find_by_instance(i, &cpu_node, NULL);
current_set[i] = task_thread_info(idle);
/*
* Initialize the contexts table
* Since the call to prom_startcpu() trashes the structure,
* we need to re-initialize it for each cpu
*/
smp_penguin_ctable.which_io = 0;
smp_penguin_ctable.phys_addr = (unsigned int) srmmu_ctx_table_phys;
smp_penguin_ctable.reg_size = 0;
/* whirrr, whirrr, whirrrrrrrrr... */
printk(KERN_INFO "Starting CPU %d at %p\n", i, entry);
local_ops->cache_all();
prom_startcpu(cpu_node,
&smp_penguin_ctable, 0, (char *)entry);
printk(KERN_INFO "prom_startcpu returned :)\n");
/* wheee... it's going... */
for (timeout = 0; timeout < 10000; timeout++) {
if (cpu_callin_map[i])
break;
udelay(200);
}
if (!(cpu_callin_map[i])) {
printk(KERN_ERR "Processor %d is stuck.\n", i);
return -ENODEV;
}
local_ops->cache_all();
return 0;
}
void __init smp4d_smp_done(void)
{
int i, first;
int *prev;
/* setup cpu list for irq rotation */
first = 0;
prev = &first;
for_each_online_cpu(i) {
*prev = i;
prev = &cpu_data(i).next;
}
*prev = first;
local_ops->cache_all();
/* Ok, they are spinning and ready to go. */
smp_processors_ready = 1;
sun4d_distribute_irqs();
}
/* Memory structure giving interrupt handler information about IPI generated */
struct sun4d_ipi_work {
int single;
int msk;
int resched;
};
static DEFINE_PER_CPU_SHARED_ALIGNED(struct sun4d_ipi_work, sun4d_ipi_work);
/* Initialize IPIs on the SUN4D SMP machine */
static void __init smp4d_ipi_init(void)
{
int cpu;
struct sun4d_ipi_work *work;
printk(KERN_INFO "smp4d: setup IPI at IRQ %d\n", SUN4D_IPI_IRQ);
for_each_possible_cpu(cpu) {
work = &per_cpu(sun4d_ipi_work, cpu);
work->single = work->msk = work->resched = 0;
}
}
void sun4d_ipi_interrupt(void)
{
struct sun4d_ipi_work *work = this_cpu_ptr(&sun4d_ipi_work);
if (work->single) {
work->single = 0;
smp_call_function_single_interrupt();
}
if (work->msk) {
work->msk = 0;
smp_call_function_interrupt();
}
if (work->resched) {
work->resched = 0;
smp_resched_interrupt();
}
}
/* +-------+-------------+-----------+------------------------------------+
* | bcast | devid | sid | levels mask |
* +-------+-------------+-----------+------------------------------------+
* 31 30 23 22 15 14 0
*/
#define IGEN_MESSAGE(bcast, devid, sid, levels) \
(((bcast) << 31) | ((devid) << 23) | ((sid) << 15) | (levels))
static void sun4d_send_ipi(int cpu, int level)
{
cc_set_igen(IGEN_MESSAGE(0, cpu << 3, 6 + ((level >> 1) & 7), 1 << (level - 1)));
}
static void sun4d_ipi_single(int cpu)
{
struct sun4d_ipi_work *work = &per_cpu(sun4d_ipi_work, cpu);
/* Mark work */
work->single = 1;
/* Generate IRQ on the CPU */
sun4d_send_ipi(cpu, SUN4D_IPI_IRQ);
}
static void sun4d_ipi_mask_one(int cpu)
{
struct sun4d_ipi_work *work = &per_cpu(sun4d_ipi_work, cpu);
/* Mark work */
work->msk = 1;
/* Generate IRQ on the CPU */
sun4d_send_ipi(cpu, SUN4D_IPI_IRQ);
}
static void sun4d_ipi_resched(int cpu)
{
struct sun4d_ipi_work *work = &per_cpu(sun4d_ipi_work, cpu);
/* Mark work */
work->resched = 1;
/* Generate IRQ on the CPU (any IRQ will cause resched) */
sun4d_send_ipi(cpu, SUN4D_IPI_IRQ);
}
static struct smp_funcall {
void *func;
unsigned long arg1;
unsigned long arg2;
unsigned long arg3;
unsigned long arg4;
unsigned long arg5;
unsigned char processors_in[NR_CPUS]; /* Set when ipi entered. */
unsigned char processors_out[NR_CPUS]; /* Set when ipi exited. */
} ccall_info __attribute__((aligned(8)));
static DEFINE_SPINLOCK(cross_call_lock);
/* Cross calls must be serialized, at least currently. */
static void sun4d_cross_call(void *func, cpumask_t mask, unsigned long arg1,
unsigned long arg2, unsigned long arg3,
unsigned long arg4)
{
if (smp_processors_ready) {
register int high = smp_highest_cpu;
unsigned long flags;
spin_lock_irqsave(&cross_call_lock, flags);
{
/*
* If you make changes here, make sure
* gcc generates proper code...
*/
register void *f asm("i0") = func;
register unsigned long a1 asm("i1") = arg1;
register unsigned long a2 asm("i2") = arg2;
register unsigned long a3 asm("i3") = arg3;
register unsigned long a4 asm("i4") = arg4;
register unsigned long a5 asm("i5") = 0;
__asm__ __volatile__(
"std %0, [%6]\n\t"
"std %2, [%6 + 8]\n\t"
"std %4, [%6 + 16]\n\t" : :
"r"(f), "r"(a1), "r"(a2), "r"(a3), "r"(a4), "r"(a5),
"r" (&ccall_info.func));
}
/* Init receive/complete mapping, plus fire the IPI's off. */
{
register int i;
cpumask_clear_cpu(smp_processor_id(), &mask);
cpumask_and(&mask, cpu_online_mask, &mask);
for (i = 0; i <= high; i++) {
if (cpumask_test_cpu(i, &mask)) {
ccall_info.processors_in[i] = 0;
ccall_info.processors_out[i] = 0;
sun4d_send_ipi(i, IRQ_CROSS_CALL);
}
}
}
{
register int i;
i = 0;
do {
if (!cpumask_test_cpu(i, &mask))
continue;
while (!ccall_info.processors_in[i])
barrier();
} while (++i <= high);
i = 0;
do {
if (!cpumask_test_cpu(i, &mask))
continue;
while (!ccall_info.processors_out[i])
barrier();
} while (++i <= high);
}
spin_unlock_irqrestore(&cross_call_lock, flags);
}
}
/* Running cross calls. */
void smp4d_cross_call_irq(void)
{
void (*func)(unsigned long, unsigned long, unsigned long, unsigned long,
unsigned long) = ccall_info.func;
int i = hard_smp_processor_id();
ccall_info.processors_in[i] = 1;
func(ccall_info.arg1, ccall_info.arg2, ccall_info.arg3, ccall_info.arg4,
ccall_info.arg5);
ccall_info.processors_out[i] = 1;
}
void smp4d_percpu_timer_interrupt(struct pt_regs *regs)
{
struct pt_regs *old_regs;
int cpu = hard_smp_processor_id();
struct clock_event_device *ce;
static int cpu_tick[NR_CPUS];
static char led_mask[] = { 0xe, 0xd, 0xb, 0x7, 0xb, 0xd };
old_regs = set_irq_regs(regs);
bw_get_prof_limit(cpu);
bw_clear_intr_mask(0, 1); /* INTR_TABLE[0] & 1 is Profile IRQ */
cpu_tick[cpu]++;
if (!(cpu_tick[cpu] & 15)) {
if (cpu_tick[cpu] == 0x60)
cpu_tick[cpu] = 0;
cpu_leds[cpu] = led_mask[cpu_tick[cpu] >> 4];
show_leds(cpu);
}
ce = &per_cpu(sparc32_clockevent, cpu);
irq_enter();
ce->event_handler(ce);
irq_exit();
set_irq_regs(old_regs);
}
static const struct sparc32_ipi_ops sun4d_ipi_ops = {
.cross_call = sun4d_cross_call,
.resched = sun4d_ipi_resched,
.single = sun4d_ipi_single,
.mask_one = sun4d_ipi_mask_one,
};
void __init sun4d_init_smp(void)
{
int i;
/* Patch ipi15 trap table */
t_nmi[1] = t_nmi[1] + (linux_trap_ipi15_sun4d - linux_trap_ipi15_sun4m);
sparc32_ipi_ops = &sun4d_ipi_ops;
for (i = 0; i < NR_CPUS; i++) {
ccall_info.processors_in[i] = 1;
ccall_info.processors_out[i] = 1;
}
}
| linux-master | arch/sparc/kernel/sun4d_smp.c |
// SPDX-License-Identifier: GPL-2.0
/* time.c: UltraSparc timer and TOD clock support.
*
* Copyright (C) 1997, 2008 David S. Miller ([email protected])
* Copyright (C) 1998 Eddie C. Dost ([email protected])
*
* Based largely on code which is:
*
* Copyright (C) 1996 Thomas K. Dyas ([email protected])
*/
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/time.h>
#include <linux/timex.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/mc146818rtc.h>
#include <linux/delay.h>
#include <linux/profile.h>
#include <linux/bcd.h>
#include <linux/jiffies.h>
#include <linux/cpufreq.h>
#include <linux/percpu.h>
#include <linux/rtc/m48t59.h>
#include <linux/kernel_stat.h>
#include <linux/clockchips.h>
#include <linux/clocksource.h>
#include <linux/platform_device.h>
#include <linux/ftrace.h>
#include <asm/oplib.h>
#include <asm/timer.h>
#include <asm/irq.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/starfire.h>
#include <asm/smp.h>
#include <asm/sections.h>
#include <asm/cpudata.h>
#include <linux/uaccess.h>
#include <asm/irq_regs.h>
#include <asm/cacheflush.h>
#include "entry.h"
#include "kernel.h"
DEFINE_SPINLOCK(rtc_lock);
#ifdef CONFIG_SMP
unsigned long profile_pc(struct pt_regs *regs)
{
unsigned long pc = instruction_pointer(regs);
if (in_lock_functions(pc))
return regs->u_regs[UREG_RETPC];
return pc;
}
EXPORT_SYMBOL(profile_pc);
#endif
static void tick_disable_protection(void)
{
/* Set things up so user can access tick register for profiling
* purposes. Also workaround BB_ERRATA_1 by doing a dummy
* read back of %tick after writing it.
*/
__asm__ __volatile__(
" ba,pt %%xcc, 1f\n"
" nop\n"
" .align 64\n"
"1: rd %%tick, %%g2\n"
" add %%g2, 6, %%g2\n"
" andn %%g2, %0, %%g2\n"
" wrpr %%g2, 0, %%tick\n"
" rdpr %%tick, %%g0"
: /* no outputs */
: "r" (TICK_PRIV_BIT)
: "g2");
}
static void tick_disable_irq(void)
{
__asm__ __volatile__(
" ba,pt %%xcc, 1f\n"
" nop\n"
" .align 64\n"
"1: wr %0, 0x0, %%tick_cmpr\n"
" rd %%tick_cmpr, %%g0"
: /* no outputs */
: "r" (TICKCMP_IRQ_BIT));
}
static void tick_init_tick(void)
{
tick_disable_protection();
tick_disable_irq();
}
static unsigned long long tick_get_tick(void)
{
unsigned long ret;
__asm__ __volatile__("rd %%tick, %0\n\t"
"mov %0, %0"
: "=r" (ret));
return ret & ~TICK_PRIV_BIT;
}
static int tick_add_compare(unsigned long adj)
{
unsigned long orig_tick, new_tick, new_compare;
__asm__ __volatile__("rd %%tick, %0"
: "=r" (orig_tick));
orig_tick &= ~TICKCMP_IRQ_BIT;
/* Workaround for Spitfire Errata (#54 I think??), I discovered
* this via Sun BugID 4008234, mentioned in Solaris-2.5.1 patch
* number 103640.
*
* On Blackbird writes to %tick_cmpr can fail, the
* workaround seems to be to execute the wr instruction
* at the start of an I-cache line, and perform a dummy
* read back from %tick_cmpr right after writing to it. -DaveM
*/
__asm__ __volatile__("ba,pt %%xcc, 1f\n\t"
" add %1, %2, %0\n\t"
".align 64\n"
"1:\n\t"
"wr %0, 0, %%tick_cmpr\n\t"
"rd %%tick_cmpr, %%g0\n\t"
: "=r" (new_compare)
: "r" (orig_tick), "r" (adj));
__asm__ __volatile__("rd %%tick, %0"
: "=r" (new_tick));
new_tick &= ~TICKCMP_IRQ_BIT;
return ((long)(new_tick - (orig_tick+adj))) > 0L;
}
static unsigned long tick_add_tick(unsigned long adj)
{
unsigned long new_tick;
/* Also need to handle Blackbird bug here too. */
__asm__ __volatile__("rd %%tick, %0\n\t"
"add %0, %1, %0\n\t"
"wrpr %0, 0, %%tick\n\t"
: "=&r" (new_tick)
: "r" (adj));
return new_tick;
}
/* Searches for cpu clock frequency with given cpuid in OpenBoot tree */
static unsigned long cpuid_to_freq(phandle node, int cpuid)
{
bool is_cpu_node = false;
unsigned long freq = 0;
char type[128];
if (!node)
return freq;
if (prom_getproperty(node, "device_type", type, sizeof(type)) != -1)
is_cpu_node = (strcmp(type, "cpu") == 0);
/* try upa-portid then cpuid to get cpuid, see prom_64.c */
if (is_cpu_node && (prom_getint(node, "upa-portid") == cpuid ||
prom_getint(node, "cpuid") == cpuid))
freq = prom_getintdefault(node, "clock-frequency", 0);
if (!freq)
freq = cpuid_to_freq(prom_getchild(node), cpuid);
if (!freq)
freq = cpuid_to_freq(prom_getsibling(node), cpuid);
return freq;
}
static unsigned long tick_get_frequency(void)
{
return cpuid_to_freq(prom_root_node, hard_smp_processor_id());
}
static struct sparc64_tick_ops tick_operations __cacheline_aligned = {
.name = "tick",
.init_tick = tick_init_tick,
.disable_irq = tick_disable_irq,
.get_tick = tick_get_tick,
.add_tick = tick_add_tick,
.add_compare = tick_add_compare,
.get_frequency = tick_get_frequency,
.softint_mask = 1UL << 0,
};
struct sparc64_tick_ops *tick_ops __read_mostly = &tick_operations;
EXPORT_SYMBOL(tick_ops);
static void stick_disable_irq(void)
{
__asm__ __volatile__(
"wr %0, 0x0, %%asr25"
: /* no outputs */
: "r" (TICKCMP_IRQ_BIT));
}
static void stick_init_tick(void)
{
/* Writes to the %tick and %stick register are not
* allowed on sun4v. The Hypervisor controls that
* bit, per-strand.
*/
if (tlb_type != hypervisor) {
tick_disable_protection();
tick_disable_irq();
/* Let the user get at STICK too. */
__asm__ __volatile__(
" rd %%asr24, %%g2\n"
" andn %%g2, %0, %%g2\n"
" wr %%g2, 0, %%asr24"
: /* no outputs */
: "r" (TICK_PRIV_BIT)
: "g1", "g2");
}
stick_disable_irq();
}
static unsigned long long stick_get_tick(void)
{
unsigned long ret;
__asm__ __volatile__("rd %%asr24, %0"
: "=r" (ret));
return ret & ~TICK_PRIV_BIT;
}
static unsigned long stick_add_tick(unsigned long adj)
{
unsigned long new_tick;
__asm__ __volatile__("rd %%asr24, %0\n\t"
"add %0, %1, %0\n\t"
"wr %0, 0, %%asr24\n\t"
: "=&r" (new_tick)
: "r" (adj));
return new_tick;
}
static int stick_add_compare(unsigned long adj)
{
unsigned long orig_tick, new_tick;
__asm__ __volatile__("rd %%asr24, %0"
: "=r" (orig_tick));
orig_tick &= ~TICKCMP_IRQ_BIT;
__asm__ __volatile__("wr %0, 0, %%asr25"
: /* no outputs */
: "r" (orig_tick + adj));
__asm__ __volatile__("rd %%asr24, %0"
: "=r" (new_tick));
new_tick &= ~TICKCMP_IRQ_BIT;
return ((long)(new_tick - (orig_tick+adj))) > 0L;
}
static unsigned long stick_get_frequency(void)
{
return prom_getintdefault(prom_root_node, "stick-frequency", 0);
}
static struct sparc64_tick_ops stick_operations __read_mostly = {
.name = "stick",
.init_tick = stick_init_tick,
.disable_irq = stick_disable_irq,
.get_tick = stick_get_tick,
.add_tick = stick_add_tick,
.add_compare = stick_add_compare,
.get_frequency = stick_get_frequency,
.softint_mask = 1UL << 16,
};
/* On Hummingbird the STICK/STICK_CMPR register is implemented
* in I/O space. There are two 64-bit registers each, the
* first holds the low 32-bits of the value and the second holds
* the high 32-bits.
*
* Since STICK is constantly updating, we have to access it carefully.
*
* The sequence we use to read is:
* 1) read high
* 2) read low
* 3) read high again, if it rolled re-read both low and high again.
*
* Writing STICK safely is also tricky:
* 1) write low to zero
* 2) write high
* 3) write low
*/
static unsigned long __hbird_read_stick(void)
{
unsigned long ret, tmp1, tmp2, tmp3;
unsigned long addr = HBIRD_STICK_ADDR+8;
__asm__ __volatile__("ldxa [%1] %5, %2\n"
"1:\n\t"
"sub %1, 0x8, %1\n\t"
"ldxa [%1] %5, %3\n\t"
"add %1, 0x8, %1\n\t"
"ldxa [%1] %5, %4\n\t"
"cmp %4, %2\n\t"
"bne,a,pn %%xcc, 1b\n\t"
" mov %4, %2\n\t"
"sllx %4, 32, %4\n\t"
"or %3, %4, %0\n\t"
: "=&r" (ret), "=&r" (addr),
"=&r" (tmp1), "=&r" (tmp2), "=&r" (tmp3)
: "i" (ASI_PHYS_BYPASS_EC_E), "1" (addr));
return ret;
}
static void __hbird_write_stick(unsigned long val)
{
unsigned long low = (val & 0xffffffffUL);
unsigned long high = (val >> 32UL);
unsigned long addr = HBIRD_STICK_ADDR;
__asm__ __volatile__("stxa %%g0, [%0] %4\n\t"
"add %0, 0x8, %0\n\t"
"stxa %3, [%0] %4\n\t"
"sub %0, 0x8, %0\n\t"
"stxa %2, [%0] %4"
: "=&r" (addr)
: "0" (addr), "r" (low), "r" (high),
"i" (ASI_PHYS_BYPASS_EC_E));
}
static void __hbird_write_compare(unsigned long val)
{
unsigned long low = (val & 0xffffffffUL);
unsigned long high = (val >> 32UL);
unsigned long addr = HBIRD_STICKCMP_ADDR + 0x8UL;
__asm__ __volatile__("stxa %3, [%0] %4\n\t"
"sub %0, 0x8, %0\n\t"
"stxa %2, [%0] %4"
: "=&r" (addr)
: "0" (addr), "r" (low), "r" (high),
"i" (ASI_PHYS_BYPASS_EC_E));
}
static void hbtick_disable_irq(void)
{
__hbird_write_compare(TICKCMP_IRQ_BIT);
}
static void hbtick_init_tick(void)
{
tick_disable_protection();
/* XXX This seems to be necessary to 'jumpstart' Hummingbird
* XXX into actually sending STICK interrupts. I think because
* XXX of how we store %tick_cmpr in head.S this somehow resets the
* XXX {TICK + STICK} interrupt mux. -DaveM
*/
__hbird_write_stick(__hbird_read_stick());
hbtick_disable_irq();
}
static unsigned long long hbtick_get_tick(void)
{
return __hbird_read_stick() & ~TICK_PRIV_BIT;
}
static unsigned long hbtick_add_tick(unsigned long adj)
{
unsigned long val;
val = __hbird_read_stick() + adj;
__hbird_write_stick(val);
return val;
}
static int hbtick_add_compare(unsigned long adj)
{
unsigned long val = __hbird_read_stick();
unsigned long val2;
val &= ~TICKCMP_IRQ_BIT;
val += adj;
__hbird_write_compare(val);
val2 = __hbird_read_stick() & ~TICKCMP_IRQ_BIT;
return ((long)(val2 - val)) > 0L;
}
static unsigned long hbtick_get_frequency(void)
{
return prom_getintdefault(prom_root_node, "stick-frequency", 0);
}
static struct sparc64_tick_ops hbtick_operations __read_mostly = {
.name = "hbtick",
.init_tick = hbtick_init_tick,
.disable_irq = hbtick_disable_irq,
.get_tick = hbtick_get_tick,
.add_tick = hbtick_add_tick,
.add_compare = hbtick_add_compare,
.get_frequency = hbtick_get_frequency,
.softint_mask = 1UL << 0,
};
unsigned long cmos_regs;
EXPORT_SYMBOL(cmos_regs);
static struct resource rtc_cmos_resource;
static struct platform_device rtc_cmos_device = {
.name = "rtc_cmos",
.id = -1,
.resource = &rtc_cmos_resource,
.num_resources = 1,
};
static int rtc_probe(struct platform_device *op)
{
struct resource *r;
printk(KERN_INFO "%pOF: RTC regs at 0x%llx\n",
op->dev.of_node, op->resource[0].start);
/* The CMOS RTC driver only accepts IORESOURCE_IO, so cons
* up a fake resource so that the probe works for all cases.
* When the RTC is behind an ISA bus it will have IORESOURCE_IO
* already, whereas when it's behind EBUS is will be IORESOURCE_MEM.
*/
r = &rtc_cmos_resource;
r->flags = IORESOURCE_IO;
r->name = op->resource[0].name;
r->start = op->resource[0].start;
r->end = op->resource[0].end;
cmos_regs = op->resource[0].start;
return platform_device_register(&rtc_cmos_device);
}
static const struct of_device_id rtc_match[] = {
{
.name = "rtc",
.compatible = "m5819",
},
{
.name = "rtc",
.compatible = "isa-m5819p",
},
{
.name = "rtc",
.compatible = "isa-m5823p",
},
{
.name = "rtc",
.compatible = "ds1287",
},
{},
};
static struct platform_driver rtc_driver = {
.probe = rtc_probe,
.driver = {
.name = "rtc",
.of_match_table = rtc_match,
},
};
static struct platform_device rtc_bq4802_device = {
.name = "rtc-bq4802",
.id = -1,
.num_resources = 1,
};
static int bq4802_probe(struct platform_device *op)
{
printk(KERN_INFO "%pOF: BQ4802 regs at 0x%llx\n",
op->dev.of_node, op->resource[0].start);
rtc_bq4802_device.resource = &op->resource[0];
return platform_device_register(&rtc_bq4802_device);
}
static const struct of_device_id bq4802_match[] = {
{
.name = "rtc",
.compatible = "bq4802",
},
{},
};
static struct platform_driver bq4802_driver = {
.probe = bq4802_probe,
.driver = {
.name = "bq4802",
.of_match_table = bq4802_match,
},
};
static unsigned char mostek_read_byte(struct device *dev, u32 ofs)
{
struct platform_device *pdev = to_platform_device(dev);
void __iomem *regs = (void __iomem *) pdev->resource[0].start;
return readb(regs + ofs);
}
static void mostek_write_byte(struct device *dev, u32 ofs, u8 val)
{
struct platform_device *pdev = to_platform_device(dev);
void __iomem *regs = (void __iomem *) pdev->resource[0].start;
writeb(val, regs + ofs);
}
static struct m48t59_plat_data m48t59_data = {
.read_byte = mostek_read_byte,
.write_byte = mostek_write_byte,
};
static struct platform_device m48t59_rtc = {
.name = "rtc-m48t59",
.id = 0,
.num_resources = 1,
.dev = {
.platform_data = &m48t59_data,
},
};
static int mostek_probe(struct platform_device *op)
{
struct device_node *dp = op->dev.of_node;
/* On an Enterprise system there can be multiple mostek clocks.
* We should only match the one that is on the central FHC bus.
*/
if (of_node_name_eq(dp->parent, "fhc") &&
!of_node_name_eq(dp->parent->parent, "central"))
return -ENODEV;
printk(KERN_INFO "%pOF: Mostek regs at 0x%llx\n",
dp, op->resource[0].start);
m48t59_rtc.resource = &op->resource[0];
return platform_device_register(&m48t59_rtc);
}
static const struct of_device_id mostek_match[] = {
{
.name = "eeprom",
},
{},
};
static struct platform_driver mostek_driver = {
.probe = mostek_probe,
.driver = {
.name = "mostek",
.of_match_table = mostek_match,
},
};
static struct platform_device rtc_sun4v_device = {
.name = "rtc-sun4v",
.id = -1,
};
static struct platform_device rtc_starfire_device = {
.name = "rtc-starfire",
.id = -1,
};
static int __init clock_init(void)
{
if (this_is_starfire)
return platform_device_register(&rtc_starfire_device);
if (tlb_type == hypervisor)
return platform_device_register(&rtc_sun4v_device);
(void) platform_driver_register(&rtc_driver);
(void) platform_driver_register(&mostek_driver);
(void) platform_driver_register(&bq4802_driver);
return 0;
}
/* Must be after subsys_initcall() so that busses are probed. Must
* be before device_initcall() because things like the RTC driver
* need to see the clock registers.
*/
fs_initcall(clock_init);
/* Return true if this is Hummingbird, aka Ultra-IIe */
static bool is_hummingbird(void)
{
unsigned long ver, manuf, impl;
__asm__ __volatile__ ("rdpr %%ver, %0"
: "=&r" (ver));
manuf = ((ver >> 48) & 0xffff);
impl = ((ver >> 32) & 0xffff);
return (manuf == 0x17 && impl == 0x13);
}
struct freq_table {
unsigned long clock_tick_ref;
unsigned int ref_freq;
};
static DEFINE_PER_CPU(struct freq_table, sparc64_freq_table) = { 0, 0 };
unsigned long sparc64_get_clock_tick(unsigned int cpu)
{
struct freq_table *ft = &per_cpu(sparc64_freq_table, cpu);
if (ft->clock_tick_ref)
return ft->clock_tick_ref;
return cpu_data(cpu).clock_tick;
}
EXPORT_SYMBOL(sparc64_get_clock_tick);
#ifdef CONFIG_CPU_FREQ
static int sparc64_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
void *data)
{
struct cpufreq_freqs *freq = data;
unsigned int cpu;
struct freq_table *ft;
for_each_cpu(cpu, freq->policy->cpus) {
ft = &per_cpu(sparc64_freq_table, cpu);
if (!ft->ref_freq) {
ft->ref_freq = freq->old;
ft->clock_tick_ref = cpu_data(cpu).clock_tick;
}
if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
(val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) {
cpu_data(cpu).clock_tick =
cpufreq_scale(ft->clock_tick_ref, ft->ref_freq,
freq->new);
}
}
return 0;
}
static struct notifier_block sparc64_cpufreq_notifier_block = {
.notifier_call = sparc64_cpufreq_notifier
};
static int __init register_sparc64_cpufreq_notifier(void)
{
cpufreq_register_notifier(&sparc64_cpufreq_notifier_block,
CPUFREQ_TRANSITION_NOTIFIER);
return 0;
}
core_initcall(register_sparc64_cpufreq_notifier);
#endif /* CONFIG_CPU_FREQ */
static int sparc64_next_event(unsigned long delta,
struct clock_event_device *evt)
{
return tick_operations.add_compare(delta) ? -ETIME : 0;
}
static int sparc64_timer_shutdown(struct clock_event_device *evt)
{
tick_operations.disable_irq();
return 0;
}
static struct clock_event_device sparc64_clockevent = {
.features = CLOCK_EVT_FEAT_ONESHOT,
.set_state_shutdown = sparc64_timer_shutdown,
.set_next_event = sparc64_next_event,
.rating = 100,
.shift = 30,
.irq = -1,
};
static DEFINE_PER_CPU(struct clock_event_device, sparc64_events);
void __irq_entry timer_interrupt(int irq, struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
unsigned long tick_mask = tick_operations.softint_mask;
int cpu = smp_processor_id();
struct clock_event_device *evt = &per_cpu(sparc64_events, cpu);
clear_softint(tick_mask);
irq_enter();
local_cpu_data().irq0_irqs++;
kstat_incr_irq_this_cpu(0);
if (unlikely(!evt->event_handler)) {
printk(KERN_WARNING
"Spurious SPARC64 timer interrupt on cpu %d\n", cpu);
} else
evt->event_handler(evt);
irq_exit();
set_irq_regs(old_regs);
}
void setup_sparc64_timer(void)
{
struct clock_event_device *sevt;
unsigned long pstate;
/* Guarantee that the following sequences execute
* uninterrupted.
*/
__asm__ __volatile__("rdpr %%pstate, %0\n\t"
"wrpr %0, %1, %%pstate"
: "=r" (pstate)
: "i" (PSTATE_IE));
tick_operations.init_tick();
/* Restore PSTATE_IE. */
__asm__ __volatile__("wrpr %0, 0x0, %%pstate"
: /* no outputs */
: "r" (pstate));
sevt = this_cpu_ptr(&sparc64_events);
memcpy(sevt, &sparc64_clockevent, sizeof(*sevt));
sevt->cpumask = cpumask_of(smp_processor_id());
clockevents_register_device(sevt);
}
#define SPARC64_NSEC_PER_CYC_SHIFT 10UL
static struct clocksource clocksource_tick = {
.rating = 100,
.mask = CLOCKSOURCE_MASK(64),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static unsigned long tb_ticks_per_usec __read_mostly;
void __delay(unsigned long loops)
{
unsigned long bclock = get_tick();
while ((get_tick() - bclock) < loops)
;
}
EXPORT_SYMBOL(__delay);
void udelay(unsigned long usecs)
{
__delay(tb_ticks_per_usec * usecs);
}
EXPORT_SYMBOL(udelay);
static u64 clocksource_tick_read(struct clocksource *cs)
{
return get_tick();
}
static void __init get_tick_patch(void)
{
unsigned int *addr, *instr, i;
struct get_tick_patch *p;
if (tlb_type == spitfire && is_hummingbird())
return;
for (p = &__get_tick_patch; p < &__get_tick_patch_end; p++) {
instr = (tlb_type == spitfire) ? p->tick : p->stick;
addr = (unsigned int *)(unsigned long)p->addr;
for (i = 0; i < GET_TICK_NINSTR; i++) {
addr[i] = instr[i];
/* ensure that address is modified before flush */
wmb();
flushi(&addr[i]);
}
}
}
static void __init init_tick_ops(struct sparc64_tick_ops *ops)
{
unsigned long freq, quotient, tick;
freq = ops->get_frequency();
quotient = clocksource_hz2mult(freq, SPARC64_NSEC_PER_CYC_SHIFT);
tick = ops->get_tick();
ops->offset = (tick * quotient) >> SPARC64_NSEC_PER_CYC_SHIFT;
ops->ticks_per_nsec_quotient = quotient;
ops->frequency = freq;
tick_operations = *ops;
get_tick_patch();
}
void __init time_init_early(void)
{
if (tlb_type == spitfire) {
if (is_hummingbird()) {
init_tick_ops(&hbtick_operations);
clocksource_tick.archdata.vclock_mode = VCLOCK_NONE;
} else {
init_tick_ops(&tick_operations);
clocksource_tick.archdata.vclock_mode = VCLOCK_TICK;
}
} else {
init_tick_ops(&stick_operations);
clocksource_tick.archdata.vclock_mode = VCLOCK_STICK;
}
}
void __init time_init(void)
{
unsigned long freq;
freq = tick_operations.frequency;
tb_ticks_per_usec = freq / USEC_PER_SEC;
clocksource_tick.name = tick_operations.name;
clocksource_tick.read = clocksource_tick_read;
clocksource_register_hz(&clocksource_tick, freq);
printk("clocksource: mult[%x] shift[%d]\n",
clocksource_tick.mult, clocksource_tick.shift);
sparc64_clockevent.name = tick_operations.name;
clockevents_calc_mult_shift(&sparc64_clockevent, freq, 4);
sparc64_clockevent.max_delta_ns =
clockevent_delta2ns(0x7fffffffffffffffUL, &sparc64_clockevent);
sparc64_clockevent.max_delta_ticks = 0x7fffffffffffffffUL;
sparc64_clockevent.min_delta_ns =
clockevent_delta2ns(0xF, &sparc64_clockevent);
sparc64_clockevent.min_delta_ticks = 0xF;
printk("clockevent: mult[%x] shift[%d]\n",
sparc64_clockevent.mult, sparc64_clockevent.shift);
setup_sparc64_timer();
}
unsigned long long sched_clock(void)
{
unsigned long quotient = tick_operations.ticks_per_nsec_quotient;
unsigned long offset = tick_operations.offset;
/* Use barrier so the compiler emits the loads first and overlaps load
* latency with reading tick, because reading %tick/%stick is a
* post-sync instruction that will flush and restart subsequent
* instructions after it commits.
*/
barrier();
return ((get_tick() * quotient) >> SPARC64_NSEC_PER_CYC_SHIFT) - offset;
}
int read_current_timer(unsigned long *timer_val)
{
*timer_val = get_tick();
return 0;
}
| linux-master | arch/sparc/kernel/time_64.c |
// SPDX-License-Identifier: GPL-2.0
/* cpu.c: Dinky routines to look for the kind of Sparc cpu
* we are on.
*
* Copyright (C) 1996 David S. Miller ([email protected])
*/
#include <linux/seq_file.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/threads.h>
#include <linux/pgtable.h>
#include <asm/spitfire.h>
#include <asm/oplib.h>
#include <asm/setup.h>
#include <asm/page.h>
#include <asm/head.h>
#include <asm/psr.h>
#include <asm/mbus.h>
#include <asm/cpudata.h>
#include "kernel.h"
#include "entry.h"
DEFINE_PER_CPU(cpuinfo_sparc, __cpu_data) = { 0 };
EXPORT_PER_CPU_SYMBOL(__cpu_data);
int ncpus_probed;
unsigned int fsr_storage;
struct cpu_info {
int psr_vers;
const char *name;
const char *pmu_name;
};
struct fpu_info {
int fp_vers;
const char *name;
};
#define NOCPU 8
#define NOFPU 8
struct manufacturer_info {
int psr_impl;
struct cpu_info cpu_info[NOCPU];
struct fpu_info fpu_info[NOFPU];
};
#define CPU(ver, _name) \
{ .psr_vers = ver, .name = _name }
#define CPU_PMU(ver, _name, _pmu_name) \
{ .psr_vers = ver, .name = _name, .pmu_name = _pmu_name }
#define FPU(ver, _name) \
{ .fp_vers = ver, .name = _name }
static const struct manufacturer_info __initconst manufacturer_info[] = {
{
0,
/* Sun4/100, 4/200, SLC */
.cpu_info = {
CPU(0, "Fujitsu MB86900/1A or LSI L64831 SparcKIT-40"),
/* borned STP1012PGA */
CPU(4, "Fujitsu MB86904"),
CPU(5, "Fujitsu TurboSparc MB86907"),
CPU(-1, NULL)
},
.fpu_info = {
FPU(0, "Fujitsu MB86910 or Weitek WTL1164/5"),
FPU(1, "Fujitsu MB86911 or Weitek WTL1164/5 or LSI L64831"),
FPU(2, "LSI Logic L64802 or Texas Instruments ACT8847"),
/* SparcStation SLC, SparcStation1 */
FPU(3, "Weitek WTL3170/2"),
/* SPARCstation-5 */
FPU(4, "Lsi Logic/Meiko L64804 or compatible"),
FPU(-1, NULL)
}
},{
1,
.cpu_info = {
/* SparcStation2, SparcServer 490 & 690 */
CPU(0, "LSI Logic Corporation - L64811"),
/* SparcStation2 */
CPU(1, "Cypress/ROSS CY7C601"),
/* Embedded controller */
CPU(3, "Cypress/ROSS CY7C611"),
/* Ross Technologies HyperSparc */
CPU(0xf, "ROSS HyperSparc RT620"),
CPU(0xe, "ROSS HyperSparc RT625 or RT626"),
CPU(-1, NULL)
},
.fpu_info = {
FPU(0, "ROSS HyperSparc combined IU/FPU"),
FPU(1, "Lsi Logic L64814"),
FPU(2, "Texas Instruments TMS390-C602A"),
FPU(3, "Cypress CY7C602 FPU"),
FPU(-1, NULL)
}
},{
2,
.cpu_info = {
/* ECL Implementation, CRAY S-MP Supercomputer... AIEEE! */
/* Someone please write the code to support this beast! ;) */
CPU(0, "Bipolar Integrated Technology - B5010"),
CPU(-1, NULL)
},
.fpu_info = {
FPU(-1, NULL)
}
},{
3,
.cpu_info = {
CPU(0, "LSI Logic Corporation - unknown-type"),
CPU(-1, NULL)
},
.fpu_info = {
FPU(-1, NULL)
}
},{
PSR_IMPL_TI,
.cpu_info = {
CPU(0, "Texas Instruments, Inc. - SuperSparc-(II)"),
/* SparcClassic -- borned STP1010TAB-50*/
CPU(1, "Texas Instruments, Inc. - MicroSparc"),
CPU(2, "Texas Instruments, Inc. - MicroSparc II"),
CPU(3, "Texas Instruments, Inc. - SuperSparc 51"),
CPU(4, "Texas Instruments, Inc. - SuperSparc 61"),
CPU(5, "Texas Instruments, Inc. - unknown"),
CPU(-1, NULL)
},
.fpu_info = {
/* SuperSparc 50 module */
FPU(0, "SuperSparc on-chip FPU"),
/* SparcClassic */
FPU(4, "TI MicroSparc on chip FPU"),
FPU(-1, NULL)
}
},{
5,
.cpu_info = {
CPU(0, "Matsushita - MN10501"),
CPU(-1, NULL)
},
.fpu_info = {
FPU(0, "Matsushita MN10501"),
FPU(-1, NULL)
}
},{
6,
.cpu_info = {
CPU(0, "Philips Corporation - unknown"),
CPU(-1, NULL)
},
.fpu_info = {
FPU(-1, NULL)
}
},{
7,
.cpu_info = {
CPU(0, "Harvest VLSI Design Center, Inc. - unknown"),
CPU(-1, NULL)
},
.fpu_info = {
FPU(-1, NULL)
}
},{
8,
.cpu_info = {
CPU(0, "Systems and Processes Engineering Corporation (SPEC)"),
CPU(-1, NULL)
},
.fpu_info = {
FPU(-1, NULL)
}
},{
9,
.cpu_info = {
/* Gallium arsenide 200MHz, BOOOOGOOOOMIPS!!! */
CPU(0, "Fujitsu or Weitek Power-UP"),
CPU(1, "Fujitsu or Weitek Power-UP"),
CPU(2, "Fujitsu or Weitek Power-UP"),
CPU(3, "Fujitsu or Weitek Power-UP"),
CPU(-1, NULL)
},
.fpu_info = {
FPU(3, "Fujitsu or Weitek on-chip FPU"),
FPU(-1, NULL)
}
},{
PSR_IMPL_LEON, /* Aeroflex Gaisler */
.cpu_info = {
CPU(3, "LEON"),
CPU(-1, NULL)
},
.fpu_info = {
FPU(2, "GRFPU"),
FPU(3, "GRFPU-Lite"),
FPU(-1, NULL)
}
},{
0x17,
.cpu_info = {
CPU_PMU(0x10, "TI UltraSparc I (SpitFire)", "ultra12"),
CPU_PMU(0x11, "TI UltraSparc II (BlackBird)", "ultra12"),
CPU_PMU(0x12, "TI UltraSparc IIi (Sabre)", "ultra12"),
CPU_PMU(0x13, "TI UltraSparc IIe (Hummingbird)", "ultra12"),
CPU(-1, NULL)
},
.fpu_info = {
FPU(0x10, "UltraSparc I integrated FPU"),
FPU(0x11, "UltraSparc II integrated FPU"),
FPU(0x12, "UltraSparc IIi integrated FPU"),
FPU(0x13, "UltraSparc IIe integrated FPU"),
FPU(-1, NULL)
}
},{
0x22,
.cpu_info = {
CPU_PMU(0x10, "TI UltraSparc I (SpitFire)", "ultra12"),
CPU(-1, NULL)
},
.fpu_info = {
FPU(0x10, "UltraSparc I integrated FPU"),
FPU(-1, NULL)
}
},{
0x3e,
.cpu_info = {
CPU_PMU(0x14, "TI UltraSparc III (Cheetah)", "ultra3"),
CPU_PMU(0x15, "TI UltraSparc III+ (Cheetah+)", "ultra3+"),
CPU_PMU(0x16, "TI UltraSparc IIIi (Jalapeno)", "ultra3i"),
CPU_PMU(0x18, "TI UltraSparc IV (Jaguar)", "ultra3+"),
CPU_PMU(0x19, "TI UltraSparc IV+ (Panther)", "ultra4+"),
CPU_PMU(0x22, "TI UltraSparc IIIi+ (Serrano)", "ultra3i"),
CPU(-1, NULL)
},
.fpu_info = {
FPU(0x14, "UltraSparc III integrated FPU"),
FPU(0x15, "UltraSparc III+ integrated FPU"),
FPU(0x16, "UltraSparc IIIi integrated FPU"),
FPU(0x18, "UltraSparc IV integrated FPU"),
FPU(0x19, "UltraSparc IV+ integrated FPU"),
FPU(0x22, "UltraSparc IIIi+ integrated FPU"),
FPU(-1, NULL)
}
}};
/* In order to get the fpu type correct, you need to take the IDPROM's
* machine type value into consideration too. I will fix this.
*/
static const char *sparc_cpu_type;
static const char *sparc_fpu_type;
const char *sparc_pmu_type;
static void __init set_cpu_and_fpu(int psr_impl, int psr_vers, int fpu_vers)
{
const struct manufacturer_info *manuf;
int i;
sparc_cpu_type = NULL;
sparc_fpu_type = NULL;
sparc_pmu_type = NULL;
manuf = NULL;
for (i = 0; i < ARRAY_SIZE(manufacturer_info); i++)
{
if (psr_impl == manufacturer_info[i].psr_impl) {
manuf = &manufacturer_info[i];
break;
}
}
if (manuf != NULL)
{
const struct cpu_info *cpu;
const struct fpu_info *fpu;
cpu = &manuf->cpu_info[0];
while (cpu->psr_vers != -1)
{
if (cpu->psr_vers == psr_vers) {
sparc_cpu_type = cpu->name;
sparc_pmu_type = cpu->pmu_name;
sparc_fpu_type = "No FPU";
break;
}
cpu++;
}
fpu = &manuf->fpu_info[0];
while (fpu->fp_vers != -1)
{
if (fpu->fp_vers == fpu_vers) {
sparc_fpu_type = fpu->name;
break;
}
fpu++;
}
}
if (sparc_cpu_type == NULL)
{
printk(KERN_ERR "CPU: Unknown chip, impl[0x%x] vers[0x%x]\n",
psr_impl, psr_vers);
sparc_cpu_type = "Unknown CPU";
}
if (sparc_fpu_type == NULL)
{
printk(KERN_ERR "FPU: Unknown chip, impl[0x%x] vers[0x%x]\n",
psr_impl, fpu_vers);
sparc_fpu_type = "Unknown FPU";
}
if (sparc_pmu_type == NULL)
sparc_pmu_type = "Unknown PMU";
}
#ifdef CONFIG_SPARC32
static int show_cpuinfo(struct seq_file *m, void *__unused)
{
seq_printf(m,
"cpu\t\t: %s\n"
"fpu\t\t: %s\n"
"promlib\t\t: Version %d Revision %d\n"
"prom\t\t: %d.%d\n"
"type\t\t: %s\n"
"ncpus probed\t: %d\n"
"ncpus active\t: %d\n"
#ifndef CONFIG_SMP
"CPU0Bogo\t: %lu.%02lu\n"
"CPU0ClkTck\t: %ld\n"
#endif
,
sparc_cpu_type,
sparc_fpu_type ,
romvec->pv_romvers,
prom_rev,
romvec->pv_printrev >> 16,
romvec->pv_printrev & 0xffff,
&cputypval[0],
ncpus_probed,
num_online_cpus()
#ifndef CONFIG_SMP
, cpu_data(0).udelay_val/(500000/HZ),
(cpu_data(0).udelay_val/(5000/HZ)) % 100,
cpu_data(0).clock_tick
#endif
);
#ifdef CONFIG_SMP
smp_bogo(m);
#endif
mmu_info(m);
#ifdef CONFIG_SMP
smp_info(m);
#endif
return 0;
}
#endif /* CONFIG_SPARC32 */
#ifdef CONFIG_SPARC64
unsigned int dcache_parity_tl1_occurred;
unsigned int icache_parity_tl1_occurred;
static int show_cpuinfo(struct seq_file *m, void *__unused)
{
seq_printf(m,
"cpu\t\t: %s\n"
"fpu\t\t: %s\n"
"pmu\t\t: %s\n"
"prom\t\t: %s\n"
"type\t\t: %s\n"
"ncpus probed\t: %d\n"
"ncpus active\t: %d\n"
"D$ parity tl1\t: %u\n"
"I$ parity tl1\t: %u\n"
#ifndef CONFIG_SMP
"Cpu0ClkTck\t: %016lx\n"
#endif
,
sparc_cpu_type,
sparc_fpu_type,
sparc_pmu_type,
prom_version,
((tlb_type == hypervisor) ?
"sun4v" :
"sun4u"),
ncpus_probed,
num_online_cpus(),
dcache_parity_tl1_occurred,
icache_parity_tl1_occurred
#ifndef CONFIG_SMP
, cpu_data(0).clock_tick
#endif
);
cpucap_info(m);
#ifdef CONFIG_SMP
smp_bogo(m);
#endif
mmu_info(m);
#ifdef CONFIG_SMP
smp_info(m);
#endif
return 0;
}
#endif /* CONFIG_SPARC64 */
static void *c_start(struct seq_file *m, loff_t *pos)
{
/* The pointer we are returning is arbitrary,
* it just has to be non-NULL and not IS_ERR
* in the success case.
*/
return *pos == 0 ? &c_start : NULL;
}
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
{
++*pos;
return c_start(m, pos);
}
static void c_stop(struct seq_file *m, void *v)
{
}
const struct seq_operations cpuinfo_op = {
.start =c_start,
.next = c_next,
.stop = c_stop,
.show = show_cpuinfo,
};
#ifdef CONFIG_SPARC32
static int __init cpu_type_probe(void)
{
int psr_impl, psr_vers, fpu_vers;
int psr;
psr_impl = ((get_psr() >> PSR_IMPL_SHIFT) & PSR_IMPL_SHIFTED_MASK);
psr_vers = ((get_psr() >> PSR_VERS_SHIFT) & PSR_VERS_SHIFTED_MASK);
psr = get_psr();
put_psr(psr | PSR_EF);
if (psr_impl == PSR_IMPL_LEON)
fpu_vers = get_psr() & PSR_EF ? ((get_fsr() >> 17) & 0x7) : 7;
else
fpu_vers = ((get_fsr() >> 17) & 0x7);
put_psr(psr);
set_cpu_and_fpu(psr_impl, psr_vers, fpu_vers);
return 0;
}
#endif /* CONFIG_SPARC32 */
#ifdef CONFIG_SPARC64
static void __init sun4v_cpu_probe(void)
{
switch (sun4v_chip_type) {
case SUN4V_CHIP_NIAGARA1:
sparc_cpu_type = "UltraSparc T1 (Niagara)";
sparc_fpu_type = "UltraSparc T1 integrated FPU";
sparc_pmu_type = "niagara";
break;
case SUN4V_CHIP_NIAGARA2:
sparc_cpu_type = "UltraSparc T2 (Niagara2)";
sparc_fpu_type = "UltraSparc T2 integrated FPU";
sparc_pmu_type = "niagara2";
break;
case SUN4V_CHIP_NIAGARA3:
sparc_cpu_type = "UltraSparc T3 (Niagara3)";
sparc_fpu_type = "UltraSparc T3 integrated FPU";
sparc_pmu_type = "niagara3";
break;
case SUN4V_CHIP_NIAGARA4:
sparc_cpu_type = "UltraSparc T4 (Niagara4)";
sparc_fpu_type = "UltraSparc T4 integrated FPU";
sparc_pmu_type = "niagara4";
break;
case SUN4V_CHIP_NIAGARA5:
sparc_cpu_type = "UltraSparc T5 (Niagara5)";
sparc_fpu_type = "UltraSparc T5 integrated FPU";
sparc_pmu_type = "niagara5";
break;
case SUN4V_CHIP_SPARC_M6:
sparc_cpu_type = "SPARC-M6";
sparc_fpu_type = "SPARC-M6 integrated FPU";
sparc_pmu_type = "sparc-m6";
break;
case SUN4V_CHIP_SPARC_M7:
sparc_cpu_type = "SPARC-M7";
sparc_fpu_type = "SPARC-M7 integrated FPU";
sparc_pmu_type = "sparc-m7";
break;
case SUN4V_CHIP_SPARC_M8:
sparc_cpu_type = "SPARC-M8";
sparc_fpu_type = "SPARC-M8 integrated FPU";
sparc_pmu_type = "sparc-m8";
break;
case SUN4V_CHIP_SPARC_SN:
sparc_cpu_type = "SPARC-SN";
sparc_fpu_type = "SPARC-SN integrated FPU";
sparc_pmu_type = "sparc-sn";
break;
case SUN4V_CHIP_SPARC64X:
sparc_cpu_type = "SPARC64-X";
sparc_fpu_type = "SPARC64-X integrated FPU";
sparc_pmu_type = "sparc64-x";
break;
default:
printk(KERN_WARNING "CPU: Unknown sun4v cpu type [%s]\n",
prom_cpu_compatible);
sparc_cpu_type = "Unknown SUN4V CPU";
sparc_fpu_type = "Unknown SUN4V FPU";
sparc_pmu_type = "Unknown SUN4V PMU";
break;
}
}
static int __init cpu_type_probe(void)
{
if (tlb_type == hypervisor) {
sun4v_cpu_probe();
} else {
unsigned long ver;
int manuf, impl;
__asm__ __volatile__("rdpr %%ver, %0" : "=r" (ver));
manuf = ((ver >> 48) & 0xffff);
impl = ((ver >> 32) & 0xffff);
set_cpu_and_fpu(manuf, impl, impl);
}
return 0;
}
#endif /* CONFIG_SPARC64 */
early_initcall(cpu_type_probe);
| linux-master | arch/sparc/kernel/cpu.c |
/*
* arch/sparc/kernel/ksyms.c: Sparc specific ksyms support.
*
* Copyright (C) 1996 David S. Miller ([email protected])
* Copyright (C) 1996 Eddie C. Dost ([email protected])
*/
#include <linux/init.h>
#include <linux/export.h>
/* This is needed only for drivers/sbus/char/openprom.c */
EXPORT_SYMBOL(saved_command_line);
| linux-master | arch/sparc/kernel/sparc_ksyms.c |
// SPDX-License-Identifier: GPL-2.0
/*
* ioport.c: Simple io mapping allocator.
*
* Copyright (C) 1995 David S. Miller ([email protected])
* Copyright (C) 1995 Miguel de Icaza ([email protected])
*
* 1996: sparc_free_io, 1999: ioremap()/iounmap() by Pete Zaitcev.
*
* 2000/01/29
* <rth> zait: as long as pci_alloc_consistent produces something addressable,
* things are ok.
* <zaitcev> rth: no, it is relevant, because get_free_pages returns you a
* pointer into the big page mapping
* <rth> zait: so what?
* <rth> zait: remap_it_my_way(virt_to_phys(get_free_page()))
* <zaitcev> Hmm
* <zaitcev> Suppose I did this remap_it_my_way(virt_to_phys(get_free_page())).
* So far so good.
* <zaitcev> Now, driver calls pci_free_consistent(with result of
* remap_it_my_way()).
* <zaitcev> How do you find the address to pass to free_pages()?
* <rth> zait: walk the page tables? It's only two or three level after all.
* <rth> zait: you have to walk them anyway to remove the mapping.
* <zaitcev> Hmm
* <zaitcev> Sounds reasonable
*/
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/ioport.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/pci.h> /* struct pci_dev */
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/scatterlist.h>
#include <linux/dma-map-ops.h>
#include <linux/of.h>
#include <asm/io.h>
#include <asm/vaddrs.h>
#include <asm/oplib.h>
#include <asm/prom.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/dma.h>
#include <asm/iommu.h>
#include <asm/io-unit.h>
#include <asm/leon.h>
static void __iomem *_sparc_ioremap(struct resource *res, u32 bus, u32 pa, int sz);
static void __iomem *_sparc_alloc_io(unsigned int busno, unsigned long phys,
unsigned long size, char *name);
static void _sparc_free_io(struct resource *res);
static void register_proc_sparc_ioport(void);
/* This points to the next to use virtual memory for DVMA mappings */
static struct resource _sparc_dvma = {
.name = "sparc_dvma", .start = DVMA_VADDR, .end = DVMA_END - 1
};
/* This points to the start of I/O mappings, cluable from outside. */
/*ext*/ struct resource sparc_iomap = {
.name = "sparc_iomap", .start = IOBASE_VADDR, .end = IOBASE_END - 1
};
/*
* Our mini-allocator...
* Boy this is gross! We need it because we must map I/O for
* timers and interrupt controller before the kmalloc is available.
*/
#define XNMLN 15
#define XNRES 10 /* SS-10 uses 8 */
struct xresource {
struct resource xres; /* Must be first */
int xflag; /* 1 == used */
char xname[XNMLN+1];
};
static struct xresource xresv[XNRES];
static struct xresource *xres_alloc(void) {
struct xresource *xrp;
int n;
xrp = xresv;
for (n = 0; n < XNRES; n++) {
if (xrp->xflag == 0) {
xrp->xflag = 1;
return xrp;
}
xrp++;
}
return NULL;
}
static void xres_free(struct xresource *xrp) {
xrp->xflag = 0;
}
/*
* These are typically used in PCI drivers
* which are trying to be cross-platform.
*
* Bus type is always zero on IIep.
*/
void __iomem *ioremap(phys_addr_t offset, size_t size)
{
char name[14];
sprintf(name, "phys_%08x", (u32)offset);
return _sparc_alloc_io(0, (unsigned long)offset, size, name);
}
EXPORT_SYMBOL(ioremap);
/*
* Complementary to ioremap().
*/
void iounmap(volatile void __iomem *virtual)
{
unsigned long vaddr = (unsigned long) virtual & PAGE_MASK;
struct resource *res;
/*
* XXX Too slow. Can have 8192 DVMA pages on sun4m in the worst case.
* This probably warrants some sort of hashing.
*/
if ((res = lookup_resource(&sparc_iomap, vaddr)) == NULL) {
printk("free_io/iounmap: cannot free %lx\n", vaddr);
return;
}
_sparc_free_io(res);
if ((char *)res >= (char*)xresv && (char *)res < (char *)&xresv[XNRES]) {
xres_free((struct xresource *)res);
} else {
kfree(res);
}
}
EXPORT_SYMBOL(iounmap);
void __iomem *of_ioremap(struct resource *res, unsigned long offset,
unsigned long size, char *name)
{
return _sparc_alloc_io(res->flags & 0xF,
res->start + offset,
size, name);
}
EXPORT_SYMBOL(of_ioremap);
void of_iounmap(struct resource *res, void __iomem *base, unsigned long size)
{
iounmap(base);
}
EXPORT_SYMBOL(of_iounmap);
/*
* Meat of mapping
*/
static void __iomem *_sparc_alloc_io(unsigned int busno, unsigned long phys,
unsigned long size, char *name)
{
static int printed_full;
struct xresource *xres;
struct resource *res;
char *tack;
int tlen;
void __iomem *va; /* P3 diag */
if (name == NULL) name = "???";
if ((xres = xres_alloc()) != NULL) {
tack = xres->xname;
res = &xres->xres;
} else {
if (!printed_full) {
printk("ioremap: done with statics, switching to malloc\n");
printed_full = 1;
}
tlen = strlen(name);
tack = kmalloc(sizeof (struct resource) + tlen + 1, GFP_KERNEL);
if (tack == NULL) return NULL;
memset(tack, 0, sizeof(struct resource));
res = (struct resource *) tack;
tack += sizeof (struct resource);
}
strscpy(tack, name, XNMLN+1);
res->name = tack;
va = _sparc_ioremap(res, busno, phys, size);
/* printk("ioremap(0x%x:%08lx[0x%lx])=%p\n", busno, phys, size, va); */ /* P3 diag */
return va;
}
/*
*/
static void __iomem *
_sparc_ioremap(struct resource *res, u32 bus, u32 pa, int sz)
{
unsigned long offset = ((unsigned long) pa) & (~PAGE_MASK);
if (allocate_resource(&sparc_iomap, res,
(offset + sz + PAGE_SIZE-1) & PAGE_MASK,
sparc_iomap.start, sparc_iomap.end, PAGE_SIZE, NULL, NULL) != 0) {
/* Usually we cannot see printks in this case. */
prom_printf("alloc_io_res(%s): cannot occupy\n",
(res->name != NULL)? res->name: "???");
prom_halt();
}
pa &= PAGE_MASK;
srmmu_mapiorange(bus, pa, res->start, resource_size(res));
return (void __iomem *)(unsigned long)(res->start + offset);
}
/*
* Complementary to _sparc_ioremap().
*/
static void _sparc_free_io(struct resource *res)
{
unsigned long plen;
plen = resource_size(res);
BUG_ON((plen & (PAGE_SIZE-1)) != 0);
srmmu_unmapiorange(res->start, plen);
release_resource(res);
}
unsigned long sparc_dma_alloc_resource(struct device *dev, size_t len)
{
struct resource *res;
res = kzalloc(sizeof(*res), GFP_KERNEL);
if (!res)
return 0;
res->name = dev->of_node->full_name;
if (allocate_resource(&_sparc_dvma, res, len, _sparc_dvma.start,
_sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) {
printk("%s: cannot occupy 0x%zx", __func__, len);
kfree(res);
return 0;
}
return res->start;
}
bool sparc_dma_free_resource(void *cpu_addr, size_t size)
{
unsigned long addr = (unsigned long)cpu_addr;
struct resource *res;
res = lookup_resource(&_sparc_dvma, addr);
if (!res) {
printk("%s: cannot free %p\n", __func__, cpu_addr);
return false;
}
if ((addr & (PAGE_SIZE - 1)) != 0) {
printk("%s: unaligned va %p\n", __func__, cpu_addr);
return false;
}
size = PAGE_ALIGN(size);
if (resource_size(res) != size) {
printk("%s: region 0x%lx asked 0x%zx\n",
__func__, (long)resource_size(res), size);
return false;
}
release_resource(res);
kfree(res);
return true;
}
#ifdef CONFIG_SBUS
void sbus_set_sbus64(struct device *dev, int x)
{
printk("sbus_set_sbus64: unsupported\n");
}
EXPORT_SYMBOL(sbus_set_sbus64);
static int __init sparc_register_ioport(void)
{
register_proc_sparc_ioport();
return 0;
}
arch_initcall(sparc_register_ioport);
#endif /* CONFIG_SBUS */
/*
* IIep is write-through, not flushing on cpu to device transfer.
*
* On LEON systems without cache snooping, the entire D-CACHE must be flushed to
* make DMA to cacheable memory coherent.
*/
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
if (dir != DMA_TO_DEVICE &&
sparc_cpu_model == sparc_leon &&
!sparc_leon3_snooping_enabled())
leon_flush_dcache_all();
}
#ifdef CONFIG_PROC_FS
static int sparc_io_proc_show(struct seq_file *m, void *v)
{
struct resource *root = m->private, *r;
const char *nm;
for (r = root->child; r != NULL; r = r->sibling) {
if ((nm = r->name) == NULL) nm = "???";
seq_printf(m, "%016llx-%016llx: %s\n",
(unsigned long long)r->start,
(unsigned long long)r->end, nm);
}
return 0;
}
#endif /* CONFIG_PROC_FS */
static void register_proc_sparc_ioport(void)
{
#ifdef CONFIG_PROC_FS
proc_create_single_data("io_map", 0, NULL, sparc_io_proc_show,
&sparc_iomap);
proc_create_single_data("dvma_map", 0, NULL, sparc_io_proc_show,
&_sparc_dvma);
#endif
}
| linux-master | arch/sparc/kernel/ioport.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Procedures for drawing on the screen early on in the boot process.
*
* Benjamin Herrenschmidt <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/console.h>
#include <asm/btext.h>
#include <asm/oplib.h>
#include <asm/io.h>
#define NO_SCROLL
#ifndef NO_SCROLL
static void scrollscreen(void);
#endif
static void draw_byte(unsigned char c, long locX, long locY);
static void draw_byte_32(unsigned char *bits, unsigned int *base, int rb);
static void draw_byte_16(unsigned char *bits, unsigned int *base, int rb);
static void draw_byte_8(unsigned char *bits, unsigned int *base, int rb);
#define __force_data __section(".data")
static int g_loc_X __force_data;
static int g_loc_Y __force_data;
static int g_max_loc_X __force_data;
static int g_max_loc_Y __force_data;
static int dispDeviceRowBytes __force_data;
static int dispDeviceDepth __force_data;
static int dispDeviceRect[4] __force_data;
static unsigned char *dispDeviceBase __force_data;
#define cmapsz (16*256)
static unsigned char vga_font[cmapsz];
static int __init btext_initialize(phandle node)
{
unsigned int width, height, depth, pitch;
unsigned long address = 0;
u32 prop;
if (prom_getproperty(node, "width", (char *)&width, 4) < 0)
return -EINVAL;
if (prom_getproperty(node, "height", (char *)&height, 4) < 0)
return -EINVAL;
if (prom_getproperty(node, "depth", (char *)&depth, 4) < 0)
return -EINVAL;
pitch = width * ((depth + 7) / 8);
if (prom_getproperty(node, "linebytes", (char *)&prop, 4) >= 0 &&
prop != 0xffffffffu)
pitch = prop;
if (pitch == 1)
pitch = 0x1000;
if (prom_getproperty(node, "address", (char *)&prop, 4) >= 0)
address = prop;
/* FIXME: Add support for PCI reg properties. Right now, only
* reliable on macs
*/
if (address == 0)
return -EINVAL;
g_loc_X = 0;
g_loc_Y = 0;
g_max_loc_X = width / 8;
g_max_loc_Y = height / 16;
dispDeviceBase = (unsigned char *)address;
dispDeviceRowBytes = pitch;
dispDeviceDepth = depth == 15 ? 16 : depth;
dispDeviceRect[0] = dispDeviceRect[1] = 0;
dispDeviceRect[2] = width;
dispDeviceRect[3] = height;
return 0;
}
/* Calc the base address of a given point (x,y) */
static unsigned char * calc_base(int x, int y)
{
unsigned char *base = dispDeviceBase;
base += (x + dispDeviceRect[0]) * (dispDeviceDepth >> 3);
base += (y + dispDeviceRect[1]) * dispDeviceRowBytes;
return base;
}
static void btext_clearscreen(void)
{
unsigned int *base = (unsigned int *)calc_base(0, 0);
unsigned long width = ((dispDeviceRect[2] - dispDeviceRect[0]) *
(dispDeviceDepth >> 3)) >> 2;
int i,j;
for (i=0; i<(dispDeviceRect[3] - dispDeviceRect[1]); i++)
{
unsigned int *ptr = base;
for(j=width; j; --j)
*(ptr++) = 0;
base += (dispDeviceRowBytes >> 2);
}
}
#ifndef NO_SCROLL
static void scrollscreen(void)
{
unsigned int *src = (unsigned int *)calc_base(0,16);
unsigned int *dst = (unsigned int *)calc_base(0,0);
unsigned long width = ((dispDeviceRect[2] - dispDeviceRect[0]) *
(dispDeviceDepth >> 3)) >> 2;
int i,j;
for (i=0; i<(dispDeviceRect[3] - dispDeviceRect[1] - 16); i++)
{
unsigned int *src_ptr = src;
unsigned int *dst_ptr = dst;
for(j=width; j; --j)
*(dst_ptr++) = *(src_ptr++);
src += (dispDeviceRowBytes >> 2);
dst += (dispDeviceRowBytes >> 2);
}
for (i=0; i<16; i++)
{
unsigned int *dst_ptr = dst;
for(j=width; j; --j)
*(dst_ptr++) = 0;
dst += (dispDeviceRowBytes >> 2);
}
}
#endif /* ndef NO_SCROLL */
static void btext_drawchar(char c)
{
int cline = 0;
#ifdef NO_SCROLL
int x;
#endif
switch (c) {
case '\b':
if (g_loc_X > 0)
--g_loc_X;
break;
case '\t':
g_loc_X = (g_loc_X & -8) + 8;
break;
case '\r':
g_loc_X = 0;
break;
case '\n':
g_loc_X = 0;
g_loc_Y++;
cline = 1;
break;
default:
draw_byte(c, g_loc_X++, g_loc_Y);
}
if (g_loc_X >= g_max_loc_X) {
g_loc_X = 0;
g_loc_Y++;
cline = 1;
}
#ifndef NO_SCROLL
while (g_loc_Y >= g_max_loc_Y) {
scrollscreen();
g_loc_Y--;
}
#else
/* wrap around from bottom to top of screen so we don't
waste time scrolling each line. -- paulus. */
if (g_loc_Y >= g_max_loc_Y)
g_loc_Y = 0;
if (cline) {
for (x = 0; x < g_max_loc_X; ++x)
draw_byte(' ', x, g_loc_Y);
}
#endif
}
static void btext_drawtext(const char *c, unsigned int len)
{
while (len--)
btext_drawchar(*c++);
}
static void draw_byte(unsigned char c, long locX, long locY)
{
unsigned char *base = calc_base(locX << 3, locY << 4);
unsigned char *font = &vga_font[((unsigned int)c) * 16];
int rb = dispDeviceRowBytes;
switch(dispDeviceDepth) {
case 24:
case 32:
draw_byte_32(font, (unsigned int *)base, rb);
break;
case 15:
case 16:
draw_byte_16(font, (unsigned int *)base, rb);
break;
case 8:
draw_byte_8(font, (unsigned int *)base, rb);
break;
}
}
static unsigned int expand_bits_8[16] = {
0x00000000,
0x000000ff,
0x0000ff00,
0x0000ffff,
0x00ff0000,
0x00ff00ff,
0x00ffff00,
0x00ffffff,
0xff000000,
0xff0000ff,
0xff00ff00,
0xff00ffff,
0xffff0000,
0xffff00ff,
0xffffff00,
0xffffffff
};
static unsigned int expand_bits_16[4] = {
0x00000000,
0x0000ffff,
0xffff0000,
0xffffffff
};
static void draw_byte_32(unsigned char *font, unsigned int *base, int rb)
{
int l, bits;
int fg = 0xFFFFFFFFUL;
int bg = 0x00000000UL;
for (l = 0; l < 16; ++l)
{
bits = *font++;
base[0] = (-(bits >> 7) & fg) ^ bg;
base[1] = (-((bits >> 6) & 1) & fg) ^ bg;
base[2] = (-((bits >> 5) & 1) & fg) ^ bg;
base[3] = (-((bits >> 4) & 1) & fg) ^ bg;
base[4] = (-((bits >> 3) & 1) & fg) ^ bg;
base[5] = (-((bits >> 2) & 1) & fg) ^ bg;
base[6] = (-((bits >> 1) & 1) & fg) ^ bg;
base[7] = (-(bits & 1) & fg) ^ bg;
base = (unsigned int *) ((char *)base + rb);
}
}
static void draw_byte_16(unsigned char *font, unsigned int *base, int rb)
{
int l, bits;
int fg = 0xFFFFFFFFUL;
int bg = 0x00000000UL;
unsigned int *eb = (int *)expand_bits_16;
for (l = 0; l < 16; ++l)
{
bits = *font++;
base[0] = (eb[bits >> 6] & fg) ^ bg;
base[1] = (eb[(bits >> 4) & 3] & fg) ^ bg;
base[2] = (eb[(bits >> 2) & 3] & fg) ^ bg;
base[3] = (eb[bits & 3] & fg) ^ bg;
base = (unsigned int *) ((char *)base + rb);
}
}
static void draw_byte_8(unsigned char *font, unsigned int *base, int rb)
{
int l, bits;
int fg = 0x0F0F0F0FUL;
int bg = 0x00000000UL;
unsigned int *eb = (int *)expand_bits_8;
for (l = 0; l < 16; ++l)
{
bits = *font++;
base[0] = (eb[bits >> 4] & fg) ^ bg;
base[1] = (eb[bits & 0xf] & fg) ^ bg;
base = (unsigned int *) ((char *)base + rb);
}
}
static void btext_console_write(struct console *con, const char *s,
unsigned int n)
{
btext_drawtext(s, n);
}
static struct console btext_console = {
.name = "btext",
.write = btext_console_write,
.flags = CON_PRINTBUFFER | CON_ENABLED | CON_BOOT | CON_ANYTIME,
.index = 0,
};
int __init btext_find_display(void)
{
phandle node;
char type[32];
int ret;
node = prom_inst2pkg(prom_stdout);
if (prom_getproperty(node, "device_type", type, 32) < 0)
return -ENODEV;
if (strcmp(type, "display"))
return -ENODEV;
ret = btext_initialize(node);
if (!ret) {
btext_clearscreen();
register_console(&btext_console);
}
return ret;
}
static unsigned char vga_font[cmapsz] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x81, 0xa5, 0x81, 0x81, 0xbd,
0x99, 0x81, 0x81, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0xff,
0xdb, 0xff, 0xff, 0xc3, 0xe7, 0xff, 0xff, 0x7e, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x6c, 0xfe, 0xfe, 0xfe, 0xfe, 0x7c, 0x38, 0x10,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x7c, 0xfe,
0x7c, 0x38, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18,
0x3c, 0x3c, 0xe7, 0xe7, 0xe7, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x18, 0x3c, 0x7e, 0xff, 0xff, 0x7e, 0x18, 0x18, 0x3c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x3c,
0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xe7, 0xc3, 0xc3, 0xe7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, 0x42, 0x42, 0x66, 0x3c, 0x00,
0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc3, 0x99, 0xbd,
0xbd, 0x99, 0xc3, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x1e, 0x0e,
0x1a, 0x32, 0x78, 0xcc, 0xcc, 0xcc, 0xcc, 0x78, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x3c, 0x66, 0x66, 0x66, 0x66, 0x3c, 0x18, 0x7e, 0x18, 0x18,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x33, 0x3f, 0x30, 0x30, 0x30,
0x30, 0x70, 0xf0, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0x63,
0x7f, 0x63, 0x63, 0x63, 0x63, 0x67, 0xe7, 0xe6, 0xc0, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x18, 0x18, 0xdb, 0x3c, 0xe7, 0x3c, 0xdb, 0x18, 0x18,
0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfe, 0xf8,
0xf0, 0xe0, 0xc0, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x06, 0x0e,
0x1e, 0x3e, 0xfe, 0x3e, 0x1e, 0x0e, 0x06, 0x02, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x18, 0x3c, 0x7e, 0x18, 0x18, 0x18, 0x7e, 0x3c, 0x18, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66,
0x66, 0x00, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xdb,
0xdb, 0xdb, 0x7b, 0x1b, 0x1b, 0x1b, 0x1b, 0x1b, 0x00, 0x00, 0x00, 0x00,
0x00, 0x7c, 0xc6, 0x60, 0x38, 0x6c, 0xc6, 0xc6, 0x6c, 0x38, 0x0c, 0xc6,
0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xfe, 0xfe, 0xfe, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x3c,
0x7e, 0x18, 0x18, 0x18, 0x7e, 0x3c, 0x18, 0x7e, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x18, 0x3c, 0x7e, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x7e, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x18, 0x0c, 0xfe, 0x0c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x60, 0xfe, 0x60, 0x30, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0xc0,
0xc0, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x24, 0x66, 0xff, 0x66, 0x24, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x38, 0x7c, 0x7c, 0xfe, 0xfe, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xfe, 0x7c, 0x7c,
0x38, 0x38, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x18, 0x3c, 0x3c, 0x3c, 0x18, 0x18, 0x18, 0x00, 0x18, 0x18,
0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x66, 0x66, 0x24, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6c,
0x6c, 0xfe, 0x6c, 0x6c, 0x6c, 0xfe, 0x6c, 0x6c, 0x00, 0x00, 0x00, 0x00,
0x18, 0x18, 0x7c, 0xc6, 0xc2, 0xc0, 0x7c, 0x06, 0x06, 0x86, 0xc6, 0x7c,
0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc2, 0xc6, 0x0c, 0x18,
0x30, 0x60, 0xc6, 0x86, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c,
0x6c, 0x38, 0x76, 0xdc, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00,
0x00, 0x30, 0x30, 0x30, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x18, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x18, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x18,
0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x18, 0x30, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x3c, 0xff, 0x3c, 0x66, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x7e,
0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x30, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x02, 0x06, 0x0c, 0x18, 0x30, 0x60, 0xc0, 0x80, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xce, 0xde, 0xf6, 0xe6, 0xc6, 0xc6, 0x7c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x38, 0x78, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6,
0x06, 0x0c, 0x18, 0x30, 0x60, 0xc0, 0xc6, 0xfe, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x7c, 0xc6, 0x06, 0x06, 0x3c, 0x06, 0x06, 0x06, 0xc6, 0x7c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x1c, 0x3c, 0x6c, 0xcc, 0xfe,
0x0c, 0x0c, 0x0c, 0x1e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc0,
0xc0, 0xc0, 0xfc, 0x06, 0x06, 0x06, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x38, 0x60, 0xc0, 0xc0, 0xfc, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc6, 0x06, 0x06, 0x0c, 0x18,
0x30, 0x30, 0x30, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6,
0xc6, 0xc6, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0x7e, 0x06, 0x06, 0x06, 0x0c, 0x78,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x00,
0x00, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x18, 0x18, 0x00, 0x00, 0x00, 0x18, 0x18, 0x30, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x06, 0x0c, 0x18, 0x30, 0x60, 0x30, 0x18, 0x0c, 0x06,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x00, 0x00,
0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x60,
0x30, 0x18, 0x0c, 0x06, 0x0c, 0x18, 0x30, 0x60, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x7c, 0xc6, 0xc6, 0x0c, 0x18, 0x18, 0x18, 0x00, 0x18, 0x18,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xde, 0xde,
0xde, 0xdc, 0xc0, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38,
0x6c, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xfc, 0x66, 0x66, 0x66, 0x7c, 0x66, 0x66, 0x66, 0x66, 0xfc,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, 0xc2, 0xc0, 0xc0, 0xc0,
0xc0, 0xc2, 0x66, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0x6c,
0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x6c, 0xf8, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xfe, 0x66, 0x62, 0x68, 0x78, 0x68, 0x60, 0x62, 0x66, 0xfe,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x66, 0x62, 0x68, 0x78, 0x68,
0x60, 0x60, 0x60, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66,
0xc2, 0xc0, 0xc0, 0xde, 0xc6, 0xc6, 0x66, 0x3a, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e, 0x0c,
0x0c, 0x0c, 0x0c, 0x0c, 0xcc, 0xcc, 0xcc, 0x78, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xe6, 0x66, 0x66, 0x6c, 0x78, 0x78, 0x6c, 0x66, 0x66, 0xe6,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x60, 0x60, 0x60, 0x60, 0x60,
0x60, 0x62, 0x66, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xe7,
0xff, 0xff, 0xdb, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xc6, 0xe6, 0xf6, 0xfe, 0xde, 0xce, 0xc6, 0xc6, 0xc6, 0xc6,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x66,
0x66, 0x66, 0x7c, 0x60, 0x60, 0x60, 0x60, 0xf0, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xd6, 0xde, 0x7c,
0x0c, 0x0e, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x66, 0x66, 0x66, 0x7c, 0x6c,
0x66, 0x66, 0x66, 0xe6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6,
0xc6, 0x60, 0x38, 0x0c, 0x06, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xff, 0xdb, 0x99, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3,
0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0x66, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xdb, 0xdb, 0xff, 0x66, 0x66,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3, 0x66, 0x3c, 0x18, 0x18,
0x3c, 0x66, 0xc3, 0xc3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3,
0xc3, 0x66, 0x3c, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xff, 0xc3, 0x86, 0x0c, 0x18, 0x30, 0x60, 0xc1, 0xc3, 0xff,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x30, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
0xc0, 0xe0, 0x70, 0x38, 0x1c, 0x0e, 0x06, 0x02, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x3c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x3c,
0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, 0xc6, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00,
0x30, 0x30, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x78, 0x0c, 0x7c,
0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe0, 0x60,
0x60, 0x78, 0x6c, 0x66, 0x66, 0x66, 0x66, 0x7c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc0, 0xc0, 0xc0, 0xc6, 0x7c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x0c, 0x0c, 0x3c, 0x6c, 0xcc,
0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x7c, 0xc6, 0xfe, 0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x38, 0x6c, 0x64, 0x60, 0xf0, 0x60, 0x60, 0x60, 0x60, 0xf0,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xcc, 0xcc,
0xcc, 0xcc, 0xcc, 0x7c, 0x0c, 0xcc, 0x78, 0x00, 0x00, 0x00, 0xe0, 0x60,
0x60, 0x6c, 0x76, 0x66, 0x66, 0x66, 0x66, 0xe6, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x18, 0x18, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x06, 0x00, 0x0e, 0x06, 0x06,
0x06, 0x06, 0x06, 0x06, 0x66, 0x66, 0x3c, 0x00, 0x00, 0x00, 0xe0, 0x60,
0x60, 0x66, 0x6c, 0x78, 0x78, 0x6c, 0x66, 0xe6, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe6, 0xff, 0xdb,
0xdb, 0xdb, 0xdb, 0xdb, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xdc, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xdc, 0x66, 0x66,
0x66, 0x66, 0x66, 0x7c, 0x60, 0x60, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x76, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x7c, 0x0c, 0x0c, 0x1e, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xdc, 0x76, 0x66, 0x60, 0x60, 0x60, 0xf0,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0x60,
0x38, 0x0c, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x30,
0x30, 0xfc, 0x30, 0x30, 0x30, 0x30, 0x36, 0x1c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x76,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3, 0xc3,
0xc3, 0x66, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xc3, 0xc3, 0xc3, 0xdb, 0xdb, 0xff, 0x66, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0x66, 0x3c, 0x18, 0x3c, 0x66, 0xc3,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0xc6, 0xc6,
0xc6, 0xc6, 0xc6, 0x7e, 0x06, 0x0c, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xfe, 0xcc, 0x18, 0x30, 0x60, 0xc6, 0xfe, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x0e, 0x18, 0x18, 0x18, 0x70, 0x18, 0x18, 0x18, 0x18, 0x0e,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x00, 0x18,
0x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x18,
0x18, 0x18, 0x0e, 0x18, 0x18, 0x18, 0x18, 0x70, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x76, 0xdc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, 0xc6,
0xc6, 0xc6, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66,
0xc2, 0xc0, 0xc0, 0xc0, 0xc2, 0x66, 0x3c, 0x0c, 0x06, 0x7c, 0x00, 0x00,
0x00, 0x00, 0xcc, 0x00, 0x00, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x76,
0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x18, 0x30, 0x00, 0x7c, 0xc6, 0xfe,
0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c,
0x00, 0x78, 0x0c, 0x7c, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xcc, 0x00, 0x00, 0x78, 0x0c, 0x7c, 0xcc, 0xcc, 0xcc, 0x76,
0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x30, 0x18, 0x00, 0x78, 0x0c, 0x7c,
0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x38,
0x00, 0x78, 0x0c, 0x7c, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, 0x60, 0x60, 0x66, 0x3c, 0x0c, 0x06,
0x3c, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, 0x00, 0x7c, 0xc6, 0xfe,
0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00,
0x00, 0x7c, 0xc6, 0xfe, 0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x60, 0x30, 0x18, 0x00, 0x7c, 0xc6, 0xfe, 0xc0, 0xc0, 0xc6, 0x7c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x00, 0x00, 0x38, 0x18, 0x18,
0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x3c, 0x66,
0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x60, 0x30, 0x18, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c,
0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, 0x10, 0x38, 0x6c, 0xc6, 0xc6,
0xfe, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x38, 0x00,
0x38, 0x6c, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00,
0x18, 0x30, 0x60, 0x00, 0xfe, 0x66, 0x60, 0x7c, 0x60, 0x60, 0x66, 0xfe,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6e, 0x3b, 0x1b,
0x7e, 0xd8, 0xdc, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0x6c,
0xcc, 0xcc, 0xfe, 0xcc, 0xcc, 0xcc, 0xcc, 0xce, 0x00, 0x00, 0x00, 0x00,
0x00, 0x10, 0x38, 0x6c, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, 0x00, 0x7c, 0xc6, 0xc6,
0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x30, 0x18,
0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x30, 0x78, 0xcc, 0x00, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x76,
0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x30, 0x18, 0x00, 0xcc, 0xcc, 0xcc,
0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00,
0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7e, 0x06, 0x0c, 0x78, 0x00,
0x00, 0xc6, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x7e,
0xc3, 0xc0, 0xc0, 0xc0, 0xc3, 0x7e, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00,
0x00, 0x38, 0x6c, 0x64, 0x60, 0xf0, 0x60, 0x60, 0x60, 0x60, 0xe6, 0xfc,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0x66, 0x3c, 0x18, 0xff, 0x18,
0xff, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x66, 0x66,
0x7c, 0x62, 0x66, 0x6f, 0x66, 0x66, 0x66, 0xf3, 0x00, 0x00, 0x00, 0x00,
0x00, 0x0e, 0x1b, 0x18, 0x18, 0x18, 0x7e, 0x18, 0x18, 0x18, 0x18, 0x18,
0xd8, 0x70, 0x00, 0x00, 0x00, 0x18, 0x30, 0x60, 0x00, 0x78, 0x0c, 0x7c,
0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x18, 0x30,
0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x18, 0x30, 0x60, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x30, 0x60, 0x00, 0xcc, 0xcc, 0xcc,
0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xdc,
0x00, 0xdc, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00,
0x76, 0xdc, 0x00, 0xc6, 0xe6, 0xf6, 0xfe, 0xde, 0xce, 0xc6, 0xc6, 0xc6,
0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x6c, 0x6c, 0x3e, 0x00, 0x7e, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x6c,
0x38, 0x00, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x30, 0x30, 0x00, 0x30, 0x30, 0x60, 0xc0, 0xc6, 0xc6, 0x7c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc0,
0xc0, 0xc0, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xfe, 0x06, 0x06, 0x06, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xc0, 0xc0, 0xc2, 0xc6, 0xcc, 0x18, 0x30, 0x60, 0xce, 0x9b, 0x06,
0x0c, 0x1f, 0x00, 0x00, 0x00, 0xc0, 0xc0, 0xc2, 0xc6, 0xcc, 0x18, 0x30,
0x66, 0xce, 0x96, 0x3e, 0x06, 0x06, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18,
0x00, 0x18, 0x18, 0x18, 0x3c, 0x3c, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x36, 0x6c, 0xd8, 0x6c, 0x36, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd8, 0x6c, 0x36,
0x6c, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x44, 0x11, 0x44,
0x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44,
0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa,
0x55, 0xaa, 0x55, 0xaa, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77,
0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8, 0x18, 0xf8,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x36, 0x36, 0x36, 0x36,
0x36, 0x36, 0x36, 0xf6, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x36, 0x36, 0x36, 0x36,
0x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0x18, 0xf8,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x36, 0x36, 0x36, 0x36,
0x36, 0xf6, 0x06, 0xf6, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
0x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x06, 0xf6,
0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
0x36, 0xf6, 0x06, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0xfe, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8, 0x18, 0xf8,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xf8, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x1f, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xff,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xff, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x1f, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0xff, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x1f, 0x18, 0x1f, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x37,
0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
0x36, 0x37, 0x30, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x30, 0x37, 0x36, 0x36, 0x36, 0x36,
0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0xf7, 0x00, 0xff,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xff, 0x00, 0xf7, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
0x36, 0x36, 0x36, 0x36, 0x36, 0x37, 0x30, 0x37, 0x36, 0x36, 0x36, 0x36,
0x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0xff,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x36, 0x36, 0x36, 0x36,
0x36, 0xf7, 0x00, 0xf7, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
0x18, 0x18, 0x18, 0x18, 0x18, 0xff, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0xff,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xff, 0x00, 0xff, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x36, 0x36, 0x36, 0x36,
0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x3f,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18,
0x18, 0x1f, 0x18, 0x1f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0x18, 0x1f, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f,
0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
0x36, 0x36, 0x36, 0xff, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
0x18, 0x18, 0x18, 0x18, 0x18, 0xff, 0x18, 0xff, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x1f, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xf0, 0xf0, 0xf0,
0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0,
0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f,
0x0f, 0x0f, 0x0f, 0x0f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x76, 0xdc, 0xd8, 0xd8, 0xd8, 0xdc, 0x76, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x78, 0xcc, 0xcc, 0xcc, 0xd8, 0xcc, 0xc6, 0xc6, 0xc6, 0xcc,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc6, 0xc6, 0xc0, 0xc0, 0xc0,
0xc0, 0xc0, 0xc0, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xfe, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xfe, 0xc6, 0x60, 0x30, 0x18, 0x30, 0x60, 0xc6, 0xfe,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0xd8, 0xd8,
0xd8, 0xd8, 0xd8, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x66, 0x66, 0x66, 0x66, 0x66, 0x7c, 0x60, 0x60, 0xc0, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x76, 0xdc, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x18, 0x3c, 0x66, 0x66,
0x66, 0x3c, 0x18, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38,
0x6c, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0x6c, 0x38, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x38, 0x6c, 0xc6, 0xc6, 0xc6, 0x6c, 0x6c, 0x6c, 0x6c, 0xee,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e, 0x30, 0x18, 0x0c, 0x3e, 0x66,
0x66, 0x66, 0x66, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x7e, 0xdb, 0xdb, 0xdb, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x03, 0x06, 0x7e, 0xdb, 0xdb, 0xf3, 0x7e, 0x60, 0xc0,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x30, 0x60, 0x60, 0x7c, 0x60,
0x60, 0x60, 0x30, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c,
0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0xfe, 0x00, 0x00, 0xfe, 0x00, 0x00, 0xfe, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x7e, 0x18,
0x18, 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30,
0x18, 0x0c, 0x06, 0x0c, 0x18, 0x30, 0x00, 0x7e, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x0c, 0x18, 0x30, 0x60, 0x30, 0x18, 0x0c, 0x00, 0x7e,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x1b, 0x1b, 0x1b, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0xd8, 0xd8, 0xd8, 0x70, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x7e, 0x00, 0x18, 0x18, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xdc, 0x00,
0x76, 0xdc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x6c,
0x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x0c, 0x0c,
0x0c, 0x0c, 0x0c, 0xec, 0x6c, 0x6c, 0x3c, 0x1c, 0x00, 0x00, 0x00, 0x00,
0x00, 0xd8, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0xd8, 0x30, 0x60, 0xc8, 0xf8, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
};
| linux-master | arch/sparc/kernel/btext.c |
// SPDX-License-Identifier: GPL-2.0
/*
* This program is used to generate definitions needed by
* assembly language modules.
*
* We use the technique used in the OSF Mach kernel code:
* generate asm statements containing #defines,
* compile this file to assembler, and then extract the
* #defines from the assembly-language output.
*
* On sparc, thread_info data is static and TI_XXX offsets are computed by hand.
*/
#include <linux/sched.h>
#include <linux/mm_types.h>
// #include <linux/mm.h>
#include <linux/kbuild.h>
#include <asm/hibernate.h>
#ifdef CONFIG_SPARC32
int sparc32_foo(void)
{
DEFINE(AOFF_thread_fork_kpsr,
offsetof(struct thread_struct, fork_kpsr));
return 0;
}
#else
int sparc64_foo(void)
{
#ifdef CONFIG_HIBERNATION
BLANK();
OFFSET(SC_REG_FP, saved_context, fp);
OFFSET(SC_REG_CWP, saved_context, cwp);
OFFSET(SC_REG_WSTATE, saved_context, wstate);
OFFSET(SC_REG_TICK, saved_context, tick);
OFFSET(SC_REG_PSTATE, saved_context, pstate);
OFFSET(SC_REG_G4, saved_context, g4);
OFFSET(SC_REG_G5, saved_context, g5);
OFFSET(SC_REG_G6, saved_context, g6);
#endif
return 0;
}
#endif
int foo(void)
{
BLANK();
DEFINE(AOFF_task_thread, offsetof(struct task_struct, thread));
BLANK();
DEFINE(AOFF_mm_context, offsetof(struct mm_struct, context));
BLANK();
DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm));
/* DEFINE(NUM_USER_SEGMENTS, TASK_SIZE>>28); */
return 0;
}
| linux-master | arch/sparc/kernel/asm-offsets.c |
// SPDX-License-Identifier: GPL-2.0
/* pci.c: UltraSparc PCI controller support.
*
* Copyright (C) 1997, 1998, 1999 David S. Miller ([email protected])
* Copyright (C) 1998, 1999 Eddie C. Dost ([email protected])
* Copyright (C) 1999 Jakub Jelinek ([email protected])
*
* OF tree based PCI bus probing taken from the PowerPC port
* with minor modifications, see there for credits.
*/
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/sched.h>
#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/msi.h>
#include <linux/irq.h>
#include <linux/init.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/pgtable.h>
#include <linux/platform_device.h>
#include <linux/uaccess.h>
#include <asm/irq.h>
#include <asm/prom.h>
#include <asm/apb.h>
#include "pci_impl.h"
#include "kernel.h"
/* List of all PCI controllers found in the system. */
struct pci_pbm_info *pci_pbm_root = NULL;
/* Each PBM found gets a unique index. */
int pci_num_pbms = 0;
volatile int pci_poke_in_progress;
volatile int pci_poke_cpu = -1;
volatile int pci_poke_faulted;
static DEFINE_SPINLOCK(pci_poke_lock);
void pci_config_read8(u8 *addr, u8 *ret)
{
unsigned long flags;
u8 byte;
spin_lock_irqsave(&pci_poke_lock, flags);
pci_poke_cpu = smp_processor_id();
pci_poke_in_progress = 1;
pci_poke_faulted = 0;
__asm__ __volatile__("membar #Sync\n\t"
"lduba [%1] %2, %0\n\t"
"membar #Sync"
: "=r" (byte)
: "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
: "memory");
pci_poke_in_progress = 0;
pci_poke_cpu = -1;
if (!pci_poke_faulted)
*ret = byte;
spin_unlock_irqrestore(&pci_poke_lock, flags);
}
void pci_config_read16(u16 *addr, u16 *ret)
{
unsigned long flags;
u16 word;
spin_lock_irqsave(&pci_poke_lock, flags);
pci_poke_cpu = smp_processor_id();
pci_poke_in_progress = 1;
pci_poke_faulted = 0;
__asm__ __volatile__("membar #Sync\n\t"
"lduha [%1] %2, %0\n\t"
"membar #Sync"
: "=r" (word)
: "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
: "memory");
pci_poke_in_progress = 0;
pci_poke_cpu = -1;
if (!pci_poke_faulted)
*ret = word;
spin_unlock_irqrestore(&pci_poke_lock, flags);
}
void pci_config_read32(u32 *addr, u32 *ret)
{
unsigned long flags;
u32 dword;
spin_lock_irqsave(&pci_poke_lock, flags);
pci_poke_cpu = smp_processor_id();
pci_poke_in_progress = 1;
pci_poke_faulted = 0;
__asm__ __volatile__("membar #Sync\n\t"
"lduwa [%1] %2, %0\n\t"
"membar #Sync"
: "=r" (dword)
: "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
: "memory");
pci_poke_in_progress = 0;
pci_poke_cpu = -1;
if (!pci_poke_faulted)
*ret = dword;
spin_unlock_irqrestore(&pci_poke_lock, flags);
}
void pci_config_write8(u8 *addr, u8 val)
{
unsigned long flags;
spin_lock_irqsave(&pci_poke_lock, flags);
pci_poke_cpu = smp_processor_id();
pci_poke_in_progress = 1;
pci_poke_faulted = 0;
__asm__ __volatile__("membar #Sync\n\t"
"stba %0, [%1] %2\n\t"
"membar #Sync"
: /* no outputs */
: "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
: "memory");
pci_poke_in_progress = 0;
pci_poke_cpu = -1;
spin_unlock_irqrestore(&pci_poke_lock, flags);
}
void pci_config_write16(u16 *addr, u16 val)
{
unsigned long flags;
spin_lock_irqsave(&pci_poke_lock, flags);
pci_poke_cpu = smp_processor_id();
pci_poke_in_progress = 1;
pci_poke_faulted = 0;
__asm__ __volatile__("membar #Sync\n\t"
"stha %0, [%1] %2\n\t"
"membar #Sync"
: /* no outputs */
: "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
: "memory");
pci_poke_in_progress = 0;
pci_poke_cpu = -1;
spin_unlock_irqrestore(&pci_poke_lock, flags);
}
void pci_config_write32(u32 *addr, u32 val)
{
unsigned long flags;
spin_lock_irqsave(&pci_poke_lock, flags);
pci_poke_cpu = smp_processor_id();
pci_poke_in_progress = 1;
pci_poke_faulted = 0;
__asm__ __volatile__("membar #Sync\n\t"
"stwa %0, [%1] %2\n\t"
"membar #Sync"
: /* no outputs */
: "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
: "memory");
pci_poke_in_progress = 0;
pci_poke_cpu = -1;
spin_unlock_irqrestore(&pci_poke_lock, flags);
}
static int ofpci_verbose;
static int __init ofpci_debug(char *str)
{
int val = 0;
get_option(&str, &val);
if (val)
ofpci_verbose = 1;
return 1;
}
__setup("ofpci_debug=", ofpci_debug);
static unsigned long pci_parse_of_flags(u32 addr0)
{
unsigned long flags = 0;
if (addr0 & 0x02000000) {
flags = IORESOURCE_MEM | PCI_BASE_ADDRESS_SPACE_MEMORY;
flags |= (addr0 >> 28) & PCI_BASE_ADDRESS_MEM_TYPE_1M;
if (addr0 & 0x01000000)
flags |= IORESOURCE_MEM_64
| PCI_BASE_ADDRESS_MEM_TYPE_64;
if (addr0 & 0x40000000)
flags |= IORESOURCE_PREFETCH
| PCI_BASE_ADDRESS_MEM_PREFETCH;
} else if (addr0 & 0x01000000)
flags = IORESOURCE_IO | PCI_BASE_ADDRESS_SPACE_IO;
return flags;
}
/* The of_device layer has translated all of the assigned-address properties
* into physical address resources, we only have to figure out the register
* mapping.
*/
static void pci_parse_of_addrs(struct platform_device *op,
struct device_node *node,
struct pci_dev *dev)
{
struct resource *op_res;
const u32 *addrs;
int proplen;
addrs = of_get_property(node, "assigned-addresses", &proplen);
if (!addrs)
return;
if (ofpci_verbose)
pci_info(dev, " parse addresses (%d bytes) @ %p\n",
proplen, addrs);
op_res = &op->resource[0];
for (; proplen >= 20; proplen -= 20, addrs += 5, op_res++) {
struct resource *res;
unsigned long flags;
int i;
flags = pci_parse_of_flags(addrs[0]);
if (!flags)
continue;
i = addrs[0] & 0xff;
if (ofpci_verbose)
pci_info(dev, " start: %llx, end: %llx, i: %x\n",
op_res->start, op_res->end, i);
if (PCI_BASE_ADDRESS_0 <= i && i <= PCI_BASE_ADDRESS_5) {
res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2];
} else if (i == dev->rom_base_reg) {
res = &dev->resource[PCI_ROM_RESOURCE];
flags |= IORESOURCE_READONLY | IORESOURCE_SIZEALIGN;
} else {
pci_err(dev, "bad cfg reg num 0x%x\n", i);
continue;
}
res->start = op_res->start;
res->end = op_res->end;
res->flags = flags;
res->name = pci_name(dev);
pci_info(dev, "reg 0x%x: %pR\n", i, res);
}
}
static void pci_init_dev_archdata(struct dev_archdata *sd, void *iommu,
void *stc, void *host_controller,
struct platform_device *op,
int numa_node)
{
sd->iommu = iommu;
sd->stc = stc;
sd->host_controller = host_controller;
sd->op = op;
sd->numa_node = numa_node;
}
static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
struct device_node *node,
struct pci_bus *bus, int devfn)
{
struct dev_archdata *sd;
struct platform_device *op;
struct pci_dev *dev;
u32 class;
dev = pci_alloc_dev(bus);
if (!dev)
return NULL;
op = of_find_device_by_node(node);
sd = &dev->dev.archdata;
pci_init_dev_archdata(sd, pbm->iommu, &pbm->stc, pbm, op,
pbm->numa_node);
sd = &op->dev.archdata;
sd->iommu = pbm->iommu;
sd->stc = &pbm->stc;
sd->numa_node = pbm->numa_node;
if (of_node_name_eq(node, "ebus"))
of_propagate_archdata(op);
if (ofpci_verbose)
pci_info(bus," create device, devfn: %x, type: %s\n",
devfn, of_node_get_device_type(node));
dev->sysdata = node;
dev->dev.parent = bus->bridge;
dev->dev.bus = &pci_bus_type;
dev->dev.of_node = of_node_get(node);
dev->devfn = devfn;
dev->multifunction = 0; /* maybe a lie? */
set_pcie_port_type(dev);
pci_dev_assign_slot(dev);
dev->vendor = of_getintprop_default(node, "vendor-id", 0xffff);
dev->device = of_getintprop_default(node, "device-id", 0xffff);
dev->subsystem_vendor =
of_getintprop_default(node, "subsystem-vendor-id", 0);
dev->subsystem_device =
of_getintprop_default(node, "subsystem-id", 0);
dev->cfg_size = pci_cfg_space_size(dev);
/* We can't actually use the firmware value, we have
* to read what is in the register right now. One
* reason is that in the case of IDE interfaces the
* firmware can sample the value before the the IDE
* interface is programmed into native mode.
*/
pci_read_config_dword(dev, PCI_CLASS_REVISION, &class);
dev->class = class >> 8;
dev->revision = class & 0xff;
dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(bus),
dev->bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn));
/* I have seen IDE devices which will not respond to
* the bmdma simplex check reads if bus mastering is
* disabled.
*/
if ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE)
pci_set_master(dev);
dev->current_state = PCI_UNKNOWN; /* unknown power state */
dev->error_state = pci_channel_io_normal;
dev->dma_mask = 0xffffffff;
if (of_node_name_eq(node, "pci")) {
/* a PCI-PCI bridge */
dev->hdr_type = PCI_HEADER_TYPE_BRIDGE;
dev->rom_base_reg = PCI_ROM_ADDRESS1;
} else if (of_node_is_type(node, "cardbus")) {
dev->hdr_type = PCI_HEADER_TYPE_CARDBUS;
} else {
dev->hdr_type = PCI_HEADER_TYPE_NORMAL;
dev->rom_base_reg = PCI_ROM_ADDRESS;
dev->irq = sd->op->archdata.irqs[0];
if (dev->irq == 0xffffffff)
dev->irq = PCI_IRQ_NONE;
}
pci_info(dev, "[%04x:%04x] type %02x class %#08x\n",
dev->vendor, dev->device, dev->hdr_type, dev->class);
pci_parse_of_addrs(sd->op, node, dev);
if (ofpci_verbose)
pci_info(dev, " adding to system ...\n");
pci_device_add(dev, bus);
return dev;
}
static void apb_calc_first_last(u8 map, u32 *first_p, u32 *last_p)
{
u32 idx, first, last;
first = 8;
last = 0;
for (idx = 0; idx < 8; idx++) {
if ((map & (1 << idx)) != 0) {
if (first > idx)
first = idx;
if (last < idx)
last = idx;
}
}
*first_p = first;
*last_p = last;
}
/* Cook up fake bus resources for SUNW,simba PCI bridges which lack
* a proper 'ranges' property.
*/
static void apb_fake_ranges(struct pci_dev *dev,
struct pci_bus *bus,
struct pci_pbm_info *pbm)
{
struct pci_bus_region region;
struct resource *res;
u32 first, last;
u8 map;
pci_read_config_byte(dev, APB_IO_ADDRESS_MAP, &map);
apb_calc_first_last(map, &first, &last);
res = bus->resource[0];
res->flags = IORESOURCE_IO;
region.start = (first << 21);
region.end = (last << 21) + ((1 << 21) - 1);
pcibios_bus_to_resource(dev->bus, res, ®ion);
pci_read_config_byte(dev, APB_MEM_ADDRESS_MAP, &map);
apb_calc_first_last(map, &first, &last);
res = bus->resource[1];
res->flags = IORESOURCE_MEM;
region.start = (first << 29);
region.end = (last << 29) + ((1 << 29) - 1);
pcibios_bus_to_resource(dev->bus, res, ®ion);
}
static void pci_of_scan_bus(struct pci_pbm_info *pbm,
struct device_node *node,
struct pci_bus *bus);
#define GET_64BIT(prop, i) ((((u64) (prop)[(i)]) << 32) | (prop)[(i)+1])
static void of_scan_pci_bridge(struct pci_pbm_info *pbm,
struct device_node *node,
struct pci_dev *dev)
{
struct pci_bus *bus;
const u32 *busrange, *ranges;
int len, i, simba;
struct pci_bus_region region;
struct resource *res;
unsigned int flags;
u64 size;
if (ofpci_verbose)
pci_info(dev, "of_scan_pci_bridge(%pOF)\n", node);
/* parse bus-range property */
busrange = of_get_property(node, "bus-range", &len);
if (busrange == NULL || len != 8) {
pci_info(dev, "Can't get bus-range for PCI-PCI bridge %pOF\n",
node);
return;
}
if (ofpci_verbose)
pci_info(dev, " Bridge bus range [%u --> %u]\n",
busrange[0], busrange[1]);
ranges = of_get_property(node, "ranges", &len);
simba = 0;
if (ranges == NULL) {
const char *model = of_get_property(node, "model", NULL);
if (model && !strcmp(model, "SUNW,simba"))
simba = 1;
}
bus = pci_add_new_bus(dev->bus, dev, busrange[0]);
if (!bus) {
pci_err(dev, "Failed to create pci bus for %pOF\n",
node);
return;
}
bus->primary = dev->bus->number;
pci_bus_insert_busn_res(bus, busrange[0], busrange[1]);
bus->bridge_ctl = 0;
if (ofpci_verbose)
pci_info(dev, " Bridge ranges[%p] simba[%d]\n",
ranges, simba);
/* parse ranges property, or cook one up by hand for Simba */
/* PCI #address-cells == 3 and #size-cells == 2 always */
res = &dev->resource[PCI_BRIDGE_RESOURCES];
for (i = 0; i < PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES; ++i) {
res->flags = 0;
bus->resource[i] = res;
++res;
}
if (simba) {
apb_fake_ranges(dev, bus, pbm);
goto after_ranges;
} else if (ranges == NULL) {
pci_read_bridge_bases(bus);
goto after_ranges;
}
i = 1;
for (; len >= 32; len -= 32, ranges += 8) {
u64 start;
if (ofpci_verbose)
pci_info(dev, " RAW Range[%08x:%08x:%08x:%08x:%08x:%08x:"
"%08x:%08x]\n",
ranges[0], ranges[1], ranges[2], ranges[3],
ranges[4], ranges[5], ranges[6], ranges[7]);
flags = pci_parse_of_flags(ranges[0]);
size = GET_64BIT(ranges, 6);
if (flags == 0 || size == 0)
continue;
/* On PCI-Express systems, PCI bridges that have no devices downstream
* have a bogus size value where the first 32-bit cell is 0xffffffff.
* This results in a bogus range where start + size overflows.
*
* Just skip these otherwise the kernel will complain when the resource
* tries to be claimed.
*/
if (size >> 32 == 0xffffffff)
continue;
if (flags & IORESOURCE_IO) {
res = bus->resource[0];
if (res->flags) {
pci_err(dev, "ignoring extra I/O range"
" for bridge %pOF\n", node);
continue;
}
} else {
if (i >= PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES) {
pci_err(dev, "too many memory ranges"
" for bridge %pOF\n", node);
continue;
}
res = bus->resource[i];
++i;
}
res->flags = flags;
region.start = start = GET_64BIT(ranges, 1);
region.end = region.start + size - 1;
if (ofpci_verbose)
pci_info(dev, " Using flags[%08x] start[%016llx] size[%016llx]\n",
flags, start, size);
pcibios_bus_to_resource(dev->bus, res, ®ion);
}
after_ranges:
sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus),
bus->number);
if (ofpci_verbose)
pci_info(dev, " bus name: %s\n", bus->name);
pci_of_scan_bus(pbm, node, bus);
}
static void pci_of_scan_bus(struct pci_pbm_info *pbm,
struct device_node *node,
struct pci_bus *bus)
{
struct device_node *child;
const u32 *reg;
int reglen, devfn, prev_devfn;
struct pci_dev *dev;
if (ofpci_verbose)
pci_info(bus, "scan_bus[%pOF] bus no %d\n",
node, bus->number);
prev_devfn = -1;
for_each_child_of_node(node, child) {
if (ofpci_verbose)
pci_info(bus, " * %pOF\n", child);
reg = of_get_property(child, "reg", ®len);
if (reg == NULL || reglen < 20)
continue;
devfn = (reg[0] >> 8) & 0xff;
/* This is a workaround for some device trees
* which list PCI devices twice. On the V100
* for example, device number 3 is listed twice.
* Once as "pm" and once again as "lomp".
*/
if (devfn == prev_devfn)
continue;
prev_devfn = devfn;
/* create a new pci_dev for this device */
dev = of_create_pci_dev(pbm, child, bus, devfn);
if (!dev)
continue;
if (ofpci_verbose)
pci_info(dev, "dev header type: %x\n", dev->hdr_type);
if (pci_is_bridge(dev))
of_scan_pci_bridge(pbm, child, dev);
}
}
static ssize_t
show_pciobppath_attr(struct device * dev, struct device_attribute * attr, char * buf)
{
struct pci_dev *pdev;
struct device_node *dp;
pdev = to_pci_dev(dev);
dp = pdev->dev.of_node;
return scnprintf(buf, PAGE_SIZE, "%pOF\n", dp);
}
static DEVICE_ATTR(obppath, S_IRUSR | S_IRGRP | S_IROTH, show_pciobppath_attr, NULL);
static void pci_bus_register_of_sysfs(struct pci_bus *bus)
{
struct pci_dev *dev;
struct pci_bus *child_bus;
int err;
list_for_each_entry(dev, &bus->devices, bus_list) {
/* we don't really care if we can create this file or
* not, but we need to assign the result of the call
* or the world will fall under alien invasion and
* everybody will be frozen on a spaceship ready to be
* eaten on alpha centauri by some green and jelly
* humanoid.
*/
err = sysfs_create_file(&dev->dev.kobj, &dev_attr_obppath.attr);
(void) err;
}
list_for_each_entry(child_bus, &bus->children, node)
pci_bus_register_of_sysfs(child_bus);
}
static void pci_claim_legacy_resources(struct pci_dev *dev)
{
struct pci_bus_region region;
struct resource *p, *root, *conflict;
if ((dev->class >> 8) != PCI_CLASS_DISPLAY_VGA)
return;
p = kzalloc(sizeof(*p), GFP_KERNEL);
if (!p)
return;
p->name = "Video RAM area";
p->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
region.start = 0xa0000UL;
region.end = region.start + 0x1ffffUL;
pcibios_bus_to_resource(dev->bus, p, ®ion);
root = pci_find_parent_resource(dev, p);
if (!root) {
pci_info(dev, "can't claim VGA legacy %pR: no compatible bridge window\n", p);
goto err;
}
conflict = request_resource_conflict(root, p);
if (conflict) {
pci_info(dev, "can't claim VGA legacy %pR: address conflict with %s %pR\n",
p, conflict->name, conflict);
goto err;
}
pci_info(dev, "VGA legacy framebuffer %pR\n", p);
return;
err:
kfree(p);
}
static void pci_claim_bus_resources(struct pci_bus *bus)
{
struct pci_bus *child_bus;
struct pci_dev *dev;
list_for_each_entry(dev, &bus->devices, bus_list) {
struct resource *r;
int i;
pci_dev_for_each_resource(dev, r, i) {
if (r->parent || !r->start || !r->flags)
continue;
if (ofpci_verbose)
pci_info(dev, "Claiming Resource %d: %pR\n",
i, r);
pci_claim_resource(dev, i);
}
pci_claim_legacy_resources(dev);
}
list_for_each_entry(child_bus, &bus->children, node)
pci_claim_bus_resources(child_bus);
}
struct pci_bus *pci_scan_one_pbm(struct pci_pbm_info *pbm,
struct device *parent)
{
LIST_HEAD(resources);
struct device_node *node = pbm->op->dev.of_node;
struct pci_bus *bus;
printk("PCI: Scanning PBM %pOF\n", node);
pci_add_resource_offset(&resources, &pbm->io_space,
pbm->io_offset);
pci_add_resource_offset(&resources, &pbm->mem_space,
pbm->mem_offset);
if (pbm->mem64_space.flags)
pci_add_resource_offset(&resources, &pbm->mem64_space,
pbm->mem64_offset);
pbm->busn.start = pbm->pci_first_busno;
pbm->busn.end = pbm->pci_last_busno;
pbm->busn.flags = IORESOURCE_BUS;
pci_add_resource(&resources, &pbm->busn);
bus = pci_create_root_bus(parent, pbm->pci_first_busno, pbm->pci_ops,
pbm, &resources);
if (!bus) {
printk(KERN_ERR "Failed to create bus for %pOF\n", node);
pci_free_resource_list(&resources);
return NULL;
}
pci_of_scan_bus(pbm, node, bus);
pci_bus_register_of_sysfs(bus);
pci_claim_bus_resources(bus);
pci_bus_add_devices(bus);
return bus;
}
int pcibios_enable_device(struct pci_dev *dev, int mask)
{
struct resource *res;
u16 cmd, oldcmd;
int i;
pci_read_config_word(dev, PCI_COMMAND, &cmd);
oldcmd = cmd;
pci_dev_for_each_resource(dev, res, i) {
/* Only set up the requested stuff */
if (!(mask & (1<<i)))
continue;
if (res->flags & IORESOURCE_IO)
cmd |= PCI_COMMAND_IO;
if (res->flags & IORESOURCE_MEM)
cmd |= PCI_COMMAND_MEMORY;
}
if (cmd != oldcmd) {
pci_info(dev, "enabling device (%04x -> %04x)\n", oldcmd, cmd);
pci_write_config_word(dev, PCI_COMMAND, cmd);
}
return 0;
}
/* Platform support for /proc/bus/pci/X/Y mmap()s. */
int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma)
{
struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
resource_size_t ioaddr = pci_resource_start(pdev, bar);
if (!pbm)
return -EINVAL;
vma->vm_pgoff += (ioaddr + pbm->io_space.start) >> PAGE_SHIFT;
return 0;
}
#ifdef CONFIG_NUMA
int pcibus_to_node(struct pci_bus *pbus)
{
struct pci_pbm_info *pbm = pbus->sysdata;
return pbm->numa_node;
}
EXPORT_SYMBOL(pcibus_to_node);
#endif
/* Return the domain number for this pci bus */
int pci_domain_nr(struct pci_bus *pbus)
{
struct pci_pbm_info *pbm = pbus->sysdata;
int ret;
if (!pbm) {
ret = -ENXIO;
} else {
ret = pbm->index;
}
return ret;
}
EXPORT_SYMBOL(pci_domain_nr);
#ifdef CONFIG_PCI_MSI
int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
{
struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
unsigned int irq;
if (!pbm->setup_msi_irq)
return -EINVAL;
return pbm->setup_msi_irq(&irq, pdev, desc);
}
void arch_teardown_msi_irq(unsigned int irq)
{
struct msi_desc *entry = irq_get_msi_desc(irq);
struct pci_dev *pdev = msi_desc_to_pci_dev(entry);
struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
if (pbm->teardown_msi_irq)
pbm->teardown_msi_irq(irq, pdev);
}
#endif /* !(CONFIG_PCI_MSI) */
/* ALI sound chips generate 31-bits of DMA, a special register
* determines what bit 31 is emitted as.
*/
int ali_sound_dma_hack(struct device *dev, u64 device_mask)
{
struct iommu *iommu = dev->archdata.iommu;
struct pci_dev *ali_isa_bridge;
u8 val;
if (!dev_is_pci(dev))
return 0;
if (to_pci_dev(dev)->vendor != PCI_VENDOR_ID_AL ||
to_pci_dev(dev)->device != PCI_DEVICE_ID_AL_M5451 ||
device_mask != 0x7fffffff)
return 0;
ali_isa_bridge = pci_get_device(PCI_VENDOR_ID_AL,
PCI_DEVICE_ID_AL_M1533,
NULL);
pci_read_config_byte(ali_isa_bridge, 0x7e, &val);
if (iommu->dma_addr_mask & 0x80000000)
val |= 0x01;
else
val &= ~0x01;
pci_write_config_byte(ali_isa_bridge, 0x7e, val);
pci_dev_put(ali_isa_bridge);
return 1;
}
void pci_resource_to_user(const struct pci_dev *pdev, int bar,
const struct resource *rp, resource_size_t *start,
resource_size_t *end)
{
struct pci_bus_region region;
/*
* "User" addresses are shown in /sys/devices/pci.../.../resource
* and /proc/bus/pci/devices and used as mmap offsets for
* /proc/bus/pci/BB/DD.F files (see proc_bus_pci_mmap()).
*
* On sparc, these are PCI bus addresses, i.e., raw BAR values.
*/
pcibios_resource_to_bus(pdev->bus, ®ion, (struct resource *) rp);
*start = region.start;
*end = region.end;
}
void pcibios_set_master(struct pci_dev *dev)
{
/* No special bus mastering setup handling */
}
#ifdef CONFIG_PCI_IOV
int pcibios_device_add(struct pci_dev *dev)
{
struct pci_dev *pdev;
/* Add sriov arch specific initialization here.
* Copy dev_archdata from PF to VF
*/
if (dev->is_virtfn) {
struct dev_archdata *psd;
pdev = dev->physfn;
psd = &pdev->dev.archdata;
pci_init_dev_archdata(&dev->dev.archdata, psd->iommu,
psd->stc, psd->host_controller, NULL,
psd->numa_node);
}
return 0;
}
#endif /* CONFIG_PCI_IOV */
static int __init pcibios_init(void)
{
pci_dfl_cache_line_size = 64 >> 2;
return 0;
}
subsys_initcall(pcibios_init);
#ifdef CONFIG_SYSFS
#define SLOT_NAME_SIZE 11 /* Max decimal digits + null in u32 */
static void pcie_bus_slot_names(struct pci_bus *pbus)
{
struct pci_dev *pdev;
struct pci_bus *bus;
list_for_each_entry(pdev, &pbus->devices, bus_list) {
char name[SLOT_NAME_SIZE];
struct pci_slot *pci_slot;
const u32 *slot_num;
int len;
slot_num = of_get_property(pdev->dev.of_node,
"physical-slot#", &len);
if (slot_num == NULL || len != 4)
continue;
snprintf(name, sizeof(name), "%u", slot_num[0]);
pci_slot = pci_create_slot(pbus, slot_num[0], name, NULL);
if (IS_ERR(pci_slot))
pr_err("PCI: pci_create_slot returned %ld.\n",
PTR_ERR(pci_slot));
}
list_for_each_entry(bus, &pbus->children, node)
pcie_bus_slot_names(bus);
}
static void pci_bus_slot_names(struct device_node *node, struct pci_bus *bus)
{
const struct pci_slot_names {
u32 slot_mask;
char names[0];
} *prop;
const char *sp;
int len, i;
u32 mask;
prop = of_get_property(node, "slot-names", &len);
if (!prop)
return;
mask = prop->slot_mask;
sp = prop->names;
if (ofpci_verbose)
pci_info(bus, "Making slots for [%pOF] mask[0x%02x]\n",
node, mask);
i = 0;
while (mask) {
struct pci_slot *pci_slot;
u32 this_bit = 1 << i;
if (!(mask & this_bit)) {
i++;
continue;
}
if (ofpci_verbose)
pci_info(bus, "Making slot [%s]\n", sp);
pci_slot = pci_create_slot(bus, i, sp, NULL);
if (IS_ERR(pci_slot))
pci_err(bus, "pci_create_slot returned %ld\n",
PTR_ERR(pci_slot));
sp += strlen(sp) + 1;
mask &= ~this_bit;
i++;
}
}
static int __init of_pci_slot_init(void)
{
struct pci_bus *pbus = NULL;
while ((pbus = pci_find_next_bus(pbus)) != NULL) {
struct device_node *node;
struct pci_dev *pdev;
pdev = list_first_entry(&pbus->devices, struct pci_dev,
bus_list);
if (pdev && pci_is_pcie(pdev)) {
pcie_bus_slot_names(pbus);
} else {
if (pbus->self) {
/* PCI->PCI bridge */
node = pbus->self->dev.of_node;
} else {
struct pci_pbm_info *pbm = pbus->sysdata;
/* Host PCI controller */
node = pbm->op->dev.of_node;
}
pci_bus_slot_names(node, pbus);
}
}
return 0;
}
device_initcall(of_pci_slot_init);
#endif
| linux-master | arch/sparc/kernel/pci.c |
// SPDX-License-Identifier: GPL-2.0
/* smp.c: Sparc SMP support.
*
* Copyright (C) 1996 David S. Miller ([email protected])
* Copyright (C) 1998 Jakub Jelinek ([email protected])
* Copyright (C) 2004 Keith M Wesolowski ([email protected])
*/
#include <asm/head.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/threads.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/seq_file.h>
#include <linux/cache.h>
#include <linux/delay.h>
#include <linux/profile.h>
#include <linux/cpu.h>
#include <asm/ptrace.h>
#include <linux/atomic.h>
#include <asm/irq.h>
#include <asm/page.h>
#include <asm/oplib.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <asm/cpudata.h>
#include <asm/timer.h>
#include <asm/leon.h>
#include "kernel.h"
#include "irq.h"
volatile unsigned long cpu_callin_map[NR_CPUS] = {0,};
cpumask_t smp_commenced_mask = CPU_MASK_NONE;
const struct sparc32_ipi_ops *sparc32_ipi_ops;
/* The only guaranteed locking primitive available on all Sparc
* processors is 'ldstub [%reg + immediate], %dest_reg' which atomically
* places the current byte at the effective address into dest_reg and
* places 0xff there afterwards. Pretty lame locking primitive
* compared to the Alpha and the Intel no? Most Sparcs have 'swap'
* instruction which is much better...
*/
void smp_store_cpu_info(int id)
{
int cpu_node;
int mid;
cpu_data(id).udelay_val = loops_per_jiffy;
cpu_find_by_mid(id, &cpu_node);
cpu_data(id).clock_tick = prom_getintdefault(cpu_node,
"clock-frequency", 0);
cpu_data(id).prom_node = cpu_node;
mid = cpu_get_hwmid(cpu_node);
if (mid < 0) {
printk(KERN_NOTICE "No MID found for CPU%d at node 0x%08x", id, cpu_node);
mid = 0;
}
cpu_data(id).mid = mid;
}
void __init smp_cpus_done(unsigned int max_cpus)
{
unsigned long bogosum = 0;
int cpu, num = 0;
for_each_online_cpu(cpu) {
num++;
bogosum += cpu_data(cpu).udelay_val;
}
printk("Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
num, bogosum/(500000/HZ),
(bogosum/(5000/HZ))%100);
switch(sparc_cpu_model) {
case sun4m:
smp4m_smp_done();
break;
case sun4d:
smp4d_smp_done();
break;
case sparc_leon:
leon_smp_done();
break;
case sun4e:
printk("SUN4E\n");
BUG();
break;
case sun4u:
printk("SUN4U\n");
BUG();
break;
default:
printk("UNKNOWN!\n");
BUG();
break;
}
}
void cpu_panic(void)
{
printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
panic("SMP bolixed\n");
}
struct linux_prom_registers smp_penguin_ctable = { 0 };
void arch_smp_send_reschedule(int cpu)
{
/*
* CPU model dependent way of implementing IPI generation targeting
* a single CPU. The trap handler needs only to do trap entry/return
* to call schedule.
*/
sparc32_ipi_ops->resched(cpu);
}
void smp_send_stop(void)
{
}
void arch_send_call_function_single_ipi(int cpu)
{
/* trigger one IPI single call on one CPU */
sparc32_ipi_ops->single(cpu);
}
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
int cpu;
/* trigger IPI mask call on each CPU */
for_each_cpu(cpu, mask)
sparc32_ipi_ops->mask_one(cpu);
}
void smp_resched_interrupt(void)
{
irq_enter();
scheduler_ipi();
local_cpu_data().irq_resched_count++;
irq_exit();
/* re-schedule routine called by interrupt return code. */
}
void smp_call_function_single_interrupt(void)
{
irq_enter();
generic_smp_call_function_single_interrupt();
local_cpu_data().irq_call_count++;
irq_exit();
}
void smp_call_function_interrupt(void)
{
irq_enter();
generic_smp_call_function_interrupt();
local_cpu_data().irq_call_count++;
irq_exit();
}
void __init smp_prepare_cpus(unsigned int max_cpus)
{
int i, cpuid, extra;
printk("Entering SMP Mode...\n");
extra = 0;
for (i = 0; !cpu_find_by_instance(i, NULL, &cpuid); i++) {
if (cpuid >= NR_CPUS)
extra++;
}
/* i = number of cpus */
if (extra && max_cpus > i - extra)
printk("Warning: NR_CPUS is too low to start all cpus\n");
smp_store_cpu_info(boot_cpu_id);
switch(sparc_cpu_model) {
case sun4m:
smp4m_boot_cpus();
break;
case sun4d:
smp4d_boot_cpus();
break;
case sparc_leon:
leon_boot_cpus();
break;
case sun4e:
printk("SUN4E\n");
BUG();
break;
case sun4u:
printk("SUN4U\n");
BUG();
break;
default:
printk("UNKNOWN!\n");
BUG();
break;
}
}
/* Set this up early so that things like the scheduler can init
* properly. We use the same cpu mask for both the present and
* possible cpu map.
*/
void __init smp_setup_cpu_possible_map(void)
{
int instance, mid;
instance = 0;
while (!cpu_find_by_instance(instance, NULL, &mid)) {
if (mid < NR_CPUS) {
set_cpu_possible(mid, true);
set_cpu_present(mid, true);
}
instance++;
}
}
void __init smp_prepare_boot_cpu(void)
{
int cpuid = hard_smp_processor_id();
if (cpuid >= NR_CPUS) {
prom_printf("Serious problem, boot cpu id >= NR_CPUS\n");
prom_halt();
}
if (cpuid != 0)
printk("boot cpu id != 0, this could work but is untested\n");
current_thread_info()->cpu = cpuid;
set_cpu_online(cpuid, true);
set_cpu_possible(cpuid, true);
}
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
int ret=0;
switch(sparc_cpu_model) {
case sun4m:
ret = smp4m_boot_one_cpu(cpu, tidle);
break;
case sun4d:
ret = smp4d_boot_one_cpu(cpu, tidle);
break;
case sparc_leon:
ret = leon_boot_one_cpu(cpu, tidle);
break;
case sun4e:
printk("SUN4E\n");
BUG();
break;
case sun4u:
printk("SUN4U\n");
BUG();
break;
default:
printk("UNKNOWN!\n");
BUG();
break;
}
if (!ret) {
cpumask_set_cpu(cpu, &smp_commenced_mask);
while (!cpu_online(cpu))
mb();
}
return ret;
}
static void arch_cpu_pre_starting(void *arg)
{
local_ops->cache_all();
local_ops->tlb_all();
switch(sparc_cpu_model) {
case sun4m:
sun4m_cpu_pre_starting(arg);
break;
case sun4d:
sun4d_cpu_pre_starting(arg);
break;
case sparc_leon:
leon_cpu_pre_starting(arg);
break;
default:
BUG();
}
}
static void arch_cpu_pre_online(void *arg)
{
unsigned int cpuid = hard_smp_processor_id();
register_percpu_ce(cpuid);
calibrate_delay();
smp_store_cpu_info(cpuid);
local_ops->cache_all();
local_ops->tlb_all();
switch(sparc_cpu_model) {
case sun4m:
sun4m_cpu_pre_online(arg);
break;
case sun4d:
sun4d_cpu_pre_online(arg);
break;
case sparc_leon:
leon_cpu_pre_online(arg);
break;
default:
BUG();
}
}
static void sparc_start_secondary(void *arg)
{
unsigned int cpu;
/*
* SMP booting is extremely fragile in some architectures. So run
* the cpu initialization code first before anything else.
*/
arch_cpu_pre_starting(arg);
cpu = smp_processor_id();
notify_cpu_starting(cpu);
arch_cpu_pre_online(arg);
/* Set the CPU in the cpu_online_mask */
set_cpu_online(cpu, true);
/* Enable local interrupts now */
local_irq_enable();
wmb();
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
/* We should never reach here! */
BUG();
}
void smp_callin(void)
{
sparc_start_secondary(NULL);
}
void smp_bogo(struct seq_file *m)
{
int i;
for_each_online_cpu(i) {
seq_printf(m,
"Cpu%dBogo\t: %lu.%02lu\n",
i,
cpu_data(i).udelay_val/(500000/HZ),
(cpu_data(i).udelay_val/(5000/HZ))%100);
}
}
void smp_info(struct seq_file *m)
{
int i;
seq_printf(m, "State:\n");
for_each_online_cpu(i)
seq_printf(m, "CPU%d\t\t: online\n", i);
}
| linux-master | arch/sparc/kernel/smp_32.c |
// SPDX-License-Identifier: GPL-2.0
/* ptrace.c: Sparc process tracing support.
*
* Copyright (C) 1996, 2008 David S. Miller ([email protected])
*
* Based upon code written by Ross Biro, Linus Torvalds, Bob Manson,
* and David Mosberger.
*
* Added Linux support -miguel (weird, eh?, the original code was meant
* to emulate SunOS).
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/user.h>
#include <linux/smp.h>
#include <linux/security.h>
#include <linux/signal.h>
#include <linux/regset.h>
#include <linux/elf.h>
#include <linux/uaccess.h>
#include <asm/cacheflush.h>
#include "kernel.h"
/* #define ALLOW_INIT_TRACING */
/*
* Called by kernel/ptrace.c when detaching..
*
* Make sure single step bits etc are not set.
*/
void ptrace_disable(struct task_struct *child)
{
/* nothing to do */
}
enum sparc_regset {
REGSET_GENERAL,
REGSET_FP,
};
static int regwindow32_get(struct task_struct *target,
const struct pt_regs *regs,
u32 *uregs)
{
unsigned long reg_window = regs->u_regs[UREG_I6];
int size = 16 * sizeof(u32);
if (target == current) {
if (copy_from_user(uregs, (void __user *)reg_window, size))
return -EFAULT;
} else {
if (access_process_vm(target, reg_window, uregs, size,
FOLL_FORCE) != size)
return -EFAULT;
}
return 0;
}
static int regwindow32_set(struct task_struct *target,
const struct pt_regs *regs,
u32 *uregs)
{
unsigned long reg_window = regs->u_regs[UREG_I6];
int size = 16 * sizeof(u32);
if (target == current) {
if (copy_to_user((void __user *)reg_window, uregs, size))
return -EFAULT;
} else {
if (access_process_vm(target, reg_window, uregs, size,
FOLL_FORCE | FOLL_WRITE) != size)
return -EFAULT;
}
return 0;
}
static int genregs32_get(struct task_struct *target,
const struct user_regset *regset,
struct membuf to)
{
const struct pt_regs *regs = target->thread.kregs;
u32 uregs[16];
if (target == current)
flush_user_windows();
membuf_write(&to, regs->u_regs, 16 * sizeof(u32));
if (!to.left)
return 0;
if (regwindow32_get(target, regs, uregs))
return -EFAULT;
membuf_write(&to, uregs, 16 * sizeof(u32));
membuf_store(&to, regs->psr);
membuf_store(&to, regs->pc);
membuf_store(&to, regs->npc);
membuf_store(&to, regs->y);
return membuf_zero(&to, 2 * sizeof(u32));
}
static int genregs32_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
struct pt_regs *regs = target->thread.kregs;
u32 uregs[16];
u32 psr;
int ret;
if (target == current)
flush_user_windows();
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
regs->u_regs,
0, 16 * sizeof(u32));
if (ret || !count)
return ret;
if (regwindow32_get(target, regs, uregs))
return -EFAULT;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
uregs,
16 * sizeof(u32), 32 * sizeof(u32));
if (ret)
return ret;
if (regwindow32_set(target, regs, uregs))
return -EFAULT;
if (!count)
return 0;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&psr,
32 * sizeof(u32), 33 * sizeof(u32));
if (ret)
return ret;
regs->psr = (regs->psr & ~(PSR_ICC | PSR_SYSCALL)) |
(psr & (PSR_ICC | PSR_SYSCALL));
if (!count)
return 0;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
®s->pc,
33 * sizeof(u32), 34 * sizeof(u32));
if (ret || !count)
return ret;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
®s->npc,
34 * sizeof(u32), 35 * sizeof(u32));
if (ret || !count)
return ret;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
®s->y,
35 * sizeof(u32), 36 * sizeof(u32));
if (ret || !count)
return ret;
user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 36 * sizeof(u32),
38 * sizeof(u32));
return 0;
}
static int fpregs32_get(struct task_struct *target,
const struct user_regset *regset,
struct membuf to)
{
#if 0
if (target == current)
save_and_clear_fpu();
#endif
membuf_write(&to, target->thread.float_regs, 32 * sizeof(u32));
membuf_zero(&to, sizeof(u32));
membuf_write(&to, &target->thread.fsr, sizeof(u32));
membuf_store(&to, (u32)((1 << 8) | (8 << 16)));
return membuf_zero(&to, 64 * sizeof(u32));
}
static int fpregs32_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
unsigned long *fpregs = target->thread.float_regs;
int ret;
#if 0
if (target == current)
save_and_clear_fpu();
#endif
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
fpregs,
0, 32 * sizeof(u32));
if (!ret)
user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
32 * sizeof(u32),
33 * sizeof(u32));
if (!ret)
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.fsr,
33 * sizeof(u32),
34 * sizeof(u32));
if (!ret)
user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
34 * sizeof(u32), -1);
return ret;
}
static const struct user_regset sparc32_regsets[] = {
/* Format is:
* G0 --> G7
* O0 --> O7
* L0 --> L7
* I0 --> I7
* PSR, PC, nPC, Y, WIM, TBR
*/
[REGSET_GENERAL] = {
.core_note_type = NT_PRSTATUS,
.n = 38,
.size = sizeof(u32), .align = sizeof(u32),
.regset_get = genregs32_get, .set = genregs32_set
},
/* Format is:
* F0 --> F31
* empty 32-bit word
* FSR (32--bit word)
* FPU QUEUE COUNT (8-bit char)
* FPU QUEUE ENTRYSIZE (8-bit char)
* FPU ENABLED (8-bit char)
* empty 8-bit char
* FPU QUEUE (64 32-bit ints)
*/
[REGSET_FP] = {
.core_note_type = NT_PRFPREG,
.n = 99,
.size = sizeof(u32), .align = sizeof(u32),
.regset_get = fpregs32_get, .set = fpregs32_set
},
};
static int getregs_get(struct task_struct *target,
const struct user_regset *regset,
struct membuf to)
{
const struct pt_regs *regs = target->thread.kregs;
if (target == current)
flush_user_windows();
membuf_store(&to, regs->psr);
membuf_store(&to, regs->pc);
membuf_store(&to, regs->npc);
membuf_store(&to, regs->y);
return membuf_write(&to, regs->u_regs + 1, 15 * sizeof(u32));
}
static int setregs_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
struct pt_regs *regs = target->thread.kregs;
u32 v[4];
int ret;
if (target == current)
flush_user_windows();
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
v,
0, 4 * sizeof(u32));
if (ret)
return ret;
regs->psr = (regs->psr & ~(PSR_ICC | PSR_SYSCALL)) |
(v[0] & (PSR_ICC | PSR_SYSCALL));
regs->pc = v[1];
regs->npc = v[2];
regs->y = v[3];
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
regs->u_regs + 1,
4 * sizeof(u32) , 19 * sizeof(u32));
}
static int getfpregs_get(struct task_struct *target,
const struct user_regset *regset,
struct membuf to)
{
#if 0
if (target == current)
save_and_clear_fpu();
#endif
membuf_write(&to, &target->thread.float_regs, 32 * sizeof(u32));
membuf_write(&to, &target->thread.fsr, sizeof(u32));
return membuf_zero(&to, 35 * sizeof(u32));
}
static int setfpregs_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
unsigned long *fpregs = target->thread.float_regs;
int ret;
#if 0
if (target == current)
save_and_clear_fpu();
#endif
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
fpregs,
0, 32 * sizeof(u32));
if (ret)
return ret;
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.fsr,
32 * sizeof(u32),
33 * sizeof(u32));
}
static const struct user_regset ptrace32_regsets[] = {
[REGSET_GENERAL] = {
.n = 19, .size = sizeof(u32),
.regset_get = getregs_get, .set = setregs_set,
},
[REGSET_FP] = {
.n = 68, .size = sizeof(u32),
.regset_get = getfpregs_get, .set = setfpregs_set,
},
};
static const struct user_regset_view ptrace32_view = {
.regsets = ptrace32_regsets, .n = ARRAY_SIZE(ptrace32_regsets)
};
static const struct user_regset_view user_sparc32_view = {
.name = "sparc", .e_machine = EM_SPARC,
.regsets = sparc32_regsets, .n = ARRAY_SIZE(sparc32_regsets)
};
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
{
return &user_sparc32_view;
}
struct fps {
unsigned long regs[32];
unsigned long fsr;
unsigned long flags;
unsigned long extra;
unsigned long fpqd;
struct fq {
unsigned long *insnaddr;
unsigned long insn;
} fpq[16];
};
long arch_ptrace(struct task_struct *child, long request,
unsigned long addr, unsigned long data)
{
unsigned long addr2 = current->thread.kregs->u_regs[UREG_I4];
void __user *addr2p;
struct pt_regs __user *pregs;
struct fps __user *fps;
int ret;
addr2p = (void __user *) addr2;
pregs = (struct pt_regs __user *) addr;
fps = (struct fps __user *) addr;
switch(request) {
case PTRACE_GETREGS: {
ret = copy_regset_to_user(child, &ptrace32_view,
REGSET_GENERAL, 0,
19 * sizeof(u32),
pregs);
break;
}
case PTRACE_SETREGS: {
ret = copy_regset_from_user(child, &ptrace32_view,
REGSET_GENERAL, 0,
19 * sizeof(u32),
pregs);
break;
}
case PTRACE_GETFPREGS: {
ret = copy_regset_to_user(child, &ptrace32_view,
REGSET_FP, 0,
68 * sizeof(u32),
fps);
break;
}
case PTRACE_SETFPREGS: {
ret = copy_regset_from_user(child, &ptrace32_view,
REGSET_FP, 0,
33 * sizeof(u32),
fps);
break;
}
case PTRACE_READTEXT:
case PTRACE_READDATA:
ret = ptrace_readdata(child, addr, addr2p, data);
if (ret == data)
ret = 0;
else if (ret >= 0)
ret = -EIO;
break;
case PTRACE_WRITETEXT:
case PTRACE_WRITEDATA:
ret = ptrace_writedata(child, addr2p, addr, data);
if (ret == data)
ret = 0;
else if (ret >= 0)
ret = -EIO;
break;
default:
if (request == PTRACE_SPARC_DETACH)
request = PTRACE_DETACH;
ret = ptrace_request(child, request, addr, data);
break;
}
return ret;
}
asmlinkage int syscall_trace(struct pt_regs *regs, int syscall_exit_p)
{
int ret = 0;
if (test_thread_flag(TIF_SYSCALL_TRACE)) {
if (syscall_exit_p)
ptrace_report_syscall_exit(regs, 0);
else
ret = ptrace_report_syscall_entry(regs);
}
return ret;
}
| linux-master | arch/sparc/kernel/ptrace_32.c |
// SPDX-License-Identifier: GPL-2.0
/* Kernel module help for sparc64.
*
* Copyright (C) 2001 Rusty Russell.
* Copyright (C) 2002 David S. Miller.
*/
#include <linux/moduleloader.h>
#include <linux/kernel.h>
#include <linux/elf.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
#include <linux/gfp.h>
#include <linux/string.h>
#include <linux/ctype.h>
#include <linux/mm.h>
#include <asm/processor.h>
#include <asm/spitfire.h>
#include <asm/cacheflush.h>
#include "entry.h"
#ifdef CONFIG_SPARC64
#include <linux/jump_label.h>
static void *module_map(unsigned long size)
{
if (PAGE_ALIGN(size) > MODULES_LEN)
return NULL;
return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE,
__builtin_return_address(0));
}
#else
static void *module_map(unsigned long size)
{
return vmalloc(size);
}
#endif /* CONFIG_SPARC64 */
void *module_alloc(unsigned long size)
{
void *ret;
ret = module_map(size);
if (ret)
memset(ret, 0, size);
return ret;
}
/* Make generic code ignore STT_REGISTER dummy undefined symbols. */
int module_frob_arch_sections(Elf_Ehdr *hdr,
Elf_Shdr *sechdrs,
char *secstrings,
struct module *mod)
{
unsigned int symidx;
Elf_Sym *sym;
char *strtab;
int i;
for (symidx = 0; sechdrs[symidx].sh_type != SHT_SYMTAB; symidx++) {
if (symidx == hdr->e_shnum-1) {
printk("%s: no symtab found.\n", mod->name);
return -ENOEXEC;
}
}
sym = (Elf_Sym *)sechdrs[symidx].sh_addr;
strtab = (char *)sechdrs[sechdrs[symidx].sh_link].sh_addr;
for (i = 1; i < sechdrs[symidx].sh_size / sizeof(Elf_Sym); i++) {
if (sym[i].st_shndx == SHN_UNDEF) {
if (ELF_ST_TYPE(sym[i].st_info) == STT_REGISTER)
sym[i].st_shndx = SHN_ABS;
}
}
return 0;
}
int apply_relocate_add(Elf_Shdr *sechdrs,
const char *strtab,
unsigned int symindex,
unsigned int relsec,
struct module *me)
{
unsigned int i;
Elf_Rela *rel = (void *)sechdrs[relsec].sh_addr;
Elf_Sym *sym;
u8 *location;
u32 *loc32;
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
Elf_Addr v;
/* This is where to make the change */
location = (u8 *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ rel[i].r_offset;
loc32 = (u32 *) location;
#ifdef CONFIG_SPARC64
BUG_ON(((u64)location >> (u64)32) != (u64)0);
#endif /* CONFIG_SPARC64 */
/* This is the symbol it is referring to. Note that all
undefined symbols have been resolved. */
sym = (Elf_Sym *)sechdrs[symindex].sh_addr
+ ELF_R_SYM(rel[i].r_info);
v = sym->st_value + rel[i].r_addend;
switch (ELF_R_TYPE(rel[i].r_info) & 0xff) {
case R_SPARC_DISP32:
v -= (Elf_Addr) location;
*loc32 = v;
break;
#ifdef CONFIG_SPARC64
case R_SPARC_64:
location[0] = v >> 56;
location[1] = v >> 48;
location[2] = v >> 40;
location[3] = v >> 32;
location[4] = v >> 24;
location[5] = v >> 16;
location[6] = v >> 8;
location[7] = v >> 0;
break;
case R_SPARC_WDISP19:
v -= (Elf_Addr) location;
*loc32 = (*loc32 & ~0x7ffff) |
((v >> 2) & 0x7ffff);
break;
case R_SPARC_OLO10:
*loc32 = (*loc32 & ~0x1fff) |
(((v & 0x3ff) +
(ELF_R_TYPE(rel[i].r_info) >> 8))
& 0x1fff);
break;
#endif /* CONFIG_SPARC64 */
case R_SPARC_32:
case R_SPARC_UA32:
location[0] = v >> 24;
location[1] = v >> 16;
location[2] = v >> 8;
location[3] = v >> 0;
break;
case R_SPARC_WDISP30:
v -= (Elf_Addr) location;
*loc32 = (*loc32 & ~0x3fffffff) |
((v >> 2) & 0x3fffffff);
break;
case R_SPARC_WDISP22:
v -= (Elf_Addr) location;
*loc32 = (*loc32 & ~0x3fffff) |
((v >> 2) & 0x3fffff);
break;
case R_SPARC_LO10:
*loc32 = (*loc32 & ~0x3ff) | (v & 0x3ff);
break;
case R_SPARC_HI22:
*loc32 = (*loc32 & ~0x3fffff) |
((v >> 10) & 0x3fffff);
break;
default:
printk(KERN_ERR "module %s: Unknown relocation: %x\n",
me->name,
(int) (ELF_R_TYPE(rel[i].r_info) & 0xff));
return -ENOEXEC;
}
}
return 0;
}
#ifdef CONFIG_SPARC64
static void do_patch_sections(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs)
{
const Elf_Shdr *s, *sun4v_1insn = NULL, *sun4v_2insn = NULL;
char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
if (!strcmp(".sun4v_1insn_patch", secstrings + s->sh_name))
sun4v_1insn = s;
if (!strcmp(".sun4v_2insn_patch", secstrings + s->sh_name))
sun4v_2insn = s;
}
if (sun4v_1insn && tlb_type == hypervisor) {
void *p = (void *) sun4v_1insn->sh_addr;
sun4v_patch_1insn_range(p, p + sun4v_1insn->sh_size);
}
if (sun4v_2insn && tlb_type == hypervisor) {
void *p = (void *) sun4v_2insn->sh_addr;
sun4v_patch_2insn_range(p, p + sun4v_2insn->sh_size);
}
}
int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
struct module *me)
{
do_patch_sections(hdr, sechdrs);
/* Cheetah's I-cache is fully coherent. */
if (tlb_type == spitfire) {
unsigned long va;
flushw_all();
for (va = 0; va < (PAGE_SIZE << 1); va += 32)
spitfire_put_icache_tag(va, 0x0);
__asm__ __volatile__("flush %g6");
}
return 0;
}
#endif /* CONFIG_SPARC64 */
| linux-master | arch/sparc/kernel/module.c |
// SPDX-License-Identifier: GPL-2.0
/*
* pcic.c: MicroSPARC-IIep PCI controller support
*
* Copyright (C) 1998 V. Roganov and G. Raiko
*
* Code is derived from Ultra/PCI PSYCHO controller support, see that
* for author info.
*
* Support for diverse IIep based platforms by Pete Zaitcev.
* CP-1200 by Eric Brower.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/jiffies.h>
#include <asm/swift.h> /* for cache flushing. */
#include <asm/io.h>
#include <linux/ctype.h>
#include <linux/pci.h>
#include <linux/time.h>
#include <linux/timex.h>
#include <linux/interrupt.h>
#include <linux/export.h>
#include <asm/irq.h>
#include <asm/oplib.h>
#include <asm/prom.h>
#include <asm/pcic.h>
#include <asm/timex.h>
#include <asm/timer.h>
#include <linux/uaccess.h>
#include <asm/irq_regs.h>
#include "kernel.h"
#include "irq.h"
/*
* I studied different documents and many live PROMs both from 2.30
* family and 3.xx versions. I came to the amazing conclusion: there is
* absolutely no way to route interrupts in IIep systems relying on
* information which PROM presents. We must hardcode interrupt routing
* schematics. And this actually sucks. -- zaitcev 1999/05/12
*
* To find irq for a device we determine which routing map
* is in effect or, in other words, on which machine we are running.
* We use PROM name for this although other techniques may be used
* in special cases (Gleb reports a PROMless IIep based system).
* Once we know the map we take device configuration address and
* find PCIC pin number where INT line goes. Then we may either program
* preferred irq into the PCIC or supply the preexisting irq to the device.
*/
struct pcic_ca2irq {
unsigned char busno; /* PCI bus number */
unsigned char devfn; /* Configuration address */
unsigned char pin; /* PCIC external interrupt pin */
unsigned char irq; /* Preferred IRQ (mappable in PCIC) */
unsigned int force; /* Enforce preferred IRQ */
};
struct pcic_sn2list {
char *sysname;
struct pcic_ca2irq *intmap;
int mapdim;
};
/*
* JavaEngine-1 apparently has different versions.
*
* According to communications with Sun folks, for P2 build 501-4628-03:
* pin 0 - parallel, audio;
* pin 1 - Ethernet;
* pin 2 - su;
* pin 3 - PS/2 kbd and mouse.
*
* OEM manual (805-1486):
* pin 0: Ethernet
* pin 1: All EBus
* pin 2: IGA (unused)
* pin 3: Not connected
* OEM manual says that 501-4628 & 501-4811 are the same thing,
* only the latter has NAND flash in place.
*
* So far unofficial Sun wins over the OEM manual. Poor OEMs...
*/
static struct pcic_ca2irq pcic_i_je1a[] = { /* 501-4811-03 */
{ 0, 0x00, 2, 12, 0 }, /* EBus: hogs all */
{ 0, 0x01, 1, 6, 1 }, /* Happy Meal */
{ 0, 0x80, 0, 7, 0 }, /* IGA (unused) */
};
/* XXX JS-E entry is incomplete - PCI Slot 2 address (pin 7)? */
static struct pcic_ca2irq pcic_i_jse[] = {
{ 0, 0x00, 0, 13, 0 }, /* Ebus - serial and keyboard */
{ 0, 0x01, 1, 6, 0 }, /* hme */
{ 0, 0x08, 2, 9, 0 }, /* VGA - we hope not used :) */
{ 0, 0x10, 6, 8, 0 }, /* PCI INTA# in Slot 1 */
{ 0, 0x18, 7, 12, 0 }, /* PCI INTA# in Slot 2, shared w. RTC */
{ 0, 0x38, 4, 9, 0 }, /* All ISA devices. Read 8259. */
{ 0, 0x80, 5, 11, 0 }, /* EIDE */
/* {0,0x88, 0,0,0} - unknown device... PMU? Probably no interrupt. */
{ 0, 0xA0, 4, 9, 0 }, /* USB */
/*
* Some pins belong to non-PCI devices, we hardcode them in drivers.
* sun4m timers - irq 10, 14
* PC style RTC - pin 7, irq 4 ?
* Smart card, Parallel - pin 4 shared with USB, ISA
* audio - pin 3, irq 5 ?
*/
};
/* SPARCengine-6 was the original release name of CP1200.
* The documentation differs between the two versions
*/
static struct pcic_ca2irq pcic_i_se6[] = {
{ 0, 0x08, 0, 2, 0 }, /* SCSI */
{ 0, 0x01, 1, 6, 0 }, /* HME */
{ 0, 0x00, 3, 13, 0 }, /* EBus */
};
/*
* Krups (courtesy of Varol Kaptan)
* No documentation available, but it was easy to guess
* because it was very similar to Espresso.
*
* pin 0 - kbd, mouse, serial;
* pin 1 - Ethernet;
* pin 2 - igs (we do not use it);
* pin 3 - audio;
* pin 4,5,6 - unused;
* pin 7 - RTC (from P2 onwards as David B. says).
*/
static struct pcic_ca2irq pcic_i_jk[] = {
{ 0, 0x00, 0, 13, 0 }, /* Ebus - serial and keyboard */
{ 0, 0x01, 1, 6, 0 }, /* hme */
};
/*
* Several entries in this list may point to the same routing map
* as several PROMs may be installed on the same physical board.
*/
#define SN2L_INIT(name, map) \
{ name, map, ARRAY_SIZE(map) }
static struct pcic_sn2list pcic_known_sysnames[] = {
SN2L_INIT("SUNW,JavaEngine1", pcic_i_je1a), /* JE1, PROM 2.32 */
SN2L_INIT("SUNW,JS-E", pcic_i_jse), /* PROLL JavaStation-E */
SN2L_INIT("SUNW,SPARCengine-6", pcic_i_se6), /* SPARCengine-6/CP-1200 */
SN2L_INIT("SUNW,JS-NC", pcic_i_jk), /* PROLL JavaStation-NC */
SN2L_INIT("SUNW,JSIIep", pcic_i_jk), /* OBP JavaStation-NC */
{ NULL, NULL, 0 }
};
/*
* Only one PCIC per IIep,
* and since we have no SMP IIep, only one per system.
*/
static int pcic0_up;
static struct linux_pcic pcic0;
void __iomem *pcic_regs;
static volatile int pcic_speculative;
static volatile int pcic_trapped;
/* forward */
unsigned int pcic_build_device_irq(struct platform_device *op,
unsigned int real_irq);
#define CONFIG_CMD(bus, device_fn, where) (0x80000000 | (((unsigned int)bus) << 16) | (((unsigned int)device_fn) << 8) | (where & ~3))
static int pcic_read_config_dword(unsigned int busno, unsigned int devfn,
int where, u32 *value)
{
struct linux_pcic *pcic;
unsigned long flags;
pcic = &pcic0;
local_irq_save(flags);
#if 0 /* does not fail here */
pcic_speculative = 1;
pcic_trapped = 0;
#endif
writel(CONFIG_CMD(busno, devfn, where), pcic->pcic_config_space_addr);
#if 0 /* does not fail here */
nop();
if (pcic_trapped) {
local_irq_restore(flags);
*value = ~0;
return 0;
}
#endif
pcic_speculative = 2;
pcic_trapped = 0;
*value = readl(pcic->pcic_config_space_data + (where&4));
nop();
if (pcic_trapped) {
pcic_speculative = 0;
local_irq_restore(flags);
*value = ~0;
return 0;
}
pcic_speculative = 0;
local_irq_restore(flags);
return 0;
}
static int pcic_read_config(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 *val)
{
unsigned int v;
if (bus->number != 0) return -EINVAL;
switch (size) {
case 1:
pcic_read_config_dword(bus->number, devfn, where&~3, &v);
*val = 0xff & (v >> (8*(where & 3)));
return 0;
case 2:
if (where&1) return -EINVAL;
pcic_read_config_dword(bus->number, devfn, where&~3, &v);
*val = 0xffff & (v >> (8*(where & 3)));
return 0;
case 4:
if (where&3) return -EINVAL;
pcic_read_config_dword(bus->number, devfn, where&~3, val);
return 0;
}
return -EINVAL;
}
static int pcic_write_config_dword(unsigned int busno, unsigned int devfn,
int where, u32 value)
{
struct linux_pcic *pcic;
unsigned long flags;
pcic = &pcic0;
local_irq_save(flags);
writel(CONFIG_CMD(busno, devfn, where), pcic->pcic_config_space_addr);
writel(value, pcic->pcic_config_space_data + (where&4));
local_irq_restore(flags);
return 0;
}
static int pcic_write_config(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 val)
{
unsigned int v;
if (bus->number != 0) return -EINVAL;
switch (size) {
case 1:
pcic_read_config_dword(bus->number, devfn, where&~3, &v);
v = (v & ~(0xff << (8*(where&3)))) |
((0xff&val) << (8*(where&3)));
return pcic_write_config_dword(bus->number, devfn, where&~3, v);
case 2:
if (where&1) return -EINVAL;
pcic_read_config_dword(bus->number, devfn, where&~3, &v);
v = (v & ~(0xffff << (8*(where&3)))) |
((0xffff&val) << (8*(where&3)));
return pcic_write_config_dword(bus->number, devfn, where&~3, v);
case 4:
if (where&3) return -EINVAL;
return pcic_write_config_dword(bus->number, devfn, where, val);
}
return -EINVAL;
}
static struct pci_ops pcic_ops = {
.read = pcic_read_config,
.write = pcic_write_config,
};
/*
* On sparc64 pcibios_init() calls pci_controller_probe().
* We want PCIC probed little ahead so that interrupt controller
* would be operational.
*/
int __init pcic_probe(void)
{
struct linux_pcic *pcic;
struct linux_prom_registers regs[PROMREG_MAX];
struct linux_pbm_info* pbm;
char namebuf[64];
phandle node;
int err;
if (pcic0_up) {
prom_printf("PCIC: called twice!\n");
prom_halt();
}
pcic = &pcic0;
node = prom_getchild (prom_root_node);
node = prom_searchsiblings (node, "pci");
if (node == 0)
return -ENODEV;
/*
* Map in PCIC register set, config space, and IO base
*/
err = prom_getproperty(node, "reg", (char*)regs, sizeof(regs));
if (err == 0 || err == -1) {
prom_printf("PCIC: Error, cannot get PCIC registers "
"from PROM.\n");
prom_halt();
}
pcic0_up = 1;
pcic->pcic_res_regs.name = "pcic_registers";
pcic->pcic_regs = ioremap(regs[0].phys_addr, regs[0].reg_size);
if (!pcic->pcic_regs) {
prom_printf("PCIC: Error, cannot map PCIC registers.\n");
prom_halt();
}
pcic->pcic_res_io.name = "pcic_io";
if ((pcic->pcic_io = (unsigned long)
ioremap(regs[1].phys_addr, 0x10000)) == 0) {
prom_printf("PCIC: Error, cannot map PCIC IO Base.\n");
prom_halt();
}
pcic->pcic_res_cfg_addr.name = "pcic_cfg_addr";
if ((pcic->pcic_config_space_addr =
ioremap(regs[2].phys_addr, regs[2].reg_size * 2)) == NULL) {
prom_printf("PCIC: Error, cannot map "
"PCI Configuration Space Address.\n");
prom_halt();
}
/*
* Docs say three least significant bits in address and data
* must be the same. Thus, we need adjust size of data.
*/
pcic->pcic_res_cfg_data.name = "pcic_cfg_data";
if ((pcic->pcic_config_space_data =
ioremap(regs[3].phys_addr, regs[3].reg_size * 2)) == NULL) {
prom_printf("PCIC: Error, cannot map "
"PCI Configuration Space Data.\n");
prom_halt();
}
pbm = &pcic->pbm;
pbm->prom_node = node;
prom_getstring(node, "name", namebuf, 63); namebuf[63] = 0;
strcpy(pbm->prom_name, namebuf);
{
extern int pcic_nmi_trap_patch[4];
t_nmi[0] = pcic_nmi_trap_patch[0];
t_nmi[1] = pcic_nmi_trap_patch[1];
t_nmi[2] = pcic_nmi_trap_patch[2];
t_nmi[3] = pcic_nmi_trap_patch[3];
swift_flush_dcache();
pcic_regs = pcic->pcic_regs;
}
prom_getstring(prom_root_node, "name", namebuf, 63); namebuf[63] = 0;
{
struct pcic_sn2list *p;
for (p = pcic_known_sysnames; p->sysname != NULL; p++) {
if (strcmp(namebuf, p->sysname) == 0)
break;
}
pcic->pcic_imap = p->intmap;
pcic->pcic_imdim = p->mapdim;
}
if (pcic->pcic_imap == NULL) {
/*
* We do not panic here for the sake of embedded systems.
*/
printk("PCIC: System %s is unknown, cannot route interrupts\n",
namebuf);
}
return 0;
}
static void __init pcic_pbm_scan_bus(struct linux_pcic *pcic)
{
struct linux_pbm_info *pbm = &pcic->pbm;
pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno, &pcic_ops, pbm);
if (!pbm->pci_bus)
return;
#if 0 /* deadwood transplanted from sparc64 */
pci_fill_in_pbm_cookies(pbm->pci_bus, pbm, pbm->prom_node);
pci_record_assignments(pbm, pbm->pci_bus);
pci_assign_unassigned(pbm, pbm->pci_bus);
pci_fixup_irq(pbm, pbm->pci_bus);
#endif
pci_bus_add_devices(pbm->pci_bus);
}
/*
* Main entry point from the PCI subsystem.
*/
static int __init pcic_init(void)
{
struct linux_pcic *pcic;
/*
* PCIC should be initialized at start of the timer.
* So, here we report the presence of PCIC and do some magic passes.
*/
if(!pcic0_up)
return 0;
pcic = &pcic0;
/*
* Switch off IOTLB translation.
*/
writeb(PCI_DVMA_CONTROL_IOTLB_DISABLE,
pcic->pcic_regs+PCI_DVMA_CONTROL);
/*
* Increase mapped size for PCI memory space (DMA access).
* Should be done in that order (size first, address second).
* Why we couldn't set up 4GB and forget about it? XXX
*/
writel(0xF0000000UL, pcic->pcic_regs+PCI_SIZE_0);
writel(0+PCI_BASE_ADDRESS_SPACE_MEMORY,
pcic->pcic_regs+PCI_BASE_ADDRESS_0);
pcic_pbm_scan_bus(pcic);
return 0;
}
int pcic_present(void)
{
return pcic0_up;
}
static int pdev_to_pnode(struct linux_pbm_info *pbm, struct pci_dev *pdev)
{
struct linux_prom_pci_registers regs[PROMREG_MAX];
int err;
phandle node = prom_getchild(pbm->prom_node);
while(node) {
err = prom_getproperty(node, "reg",
(char *)®s[0], sizeof(regs));
if(err != 0 && err != -1) {
unsigned long devfn = (regs[0].which_io >> 8) & 0xff;
if(devfn == pdev->devfn)
return node;
}
node = prom_getsibling(node);
}
return 0;
}
static inline struct pcidev_cookie *pci_devcookie_alloc(void)
{
return kmalloc(sizeof(struct pcidev_cookie), GFP_ATOMIC);
}
static void pcic_map_pci_device(struct linux_pcic *pcic,
struct pci_dev *dev, int node)
{
char namebuf[64];
unsigned long address;
unsigned long flags;
int j;
if (node == 0 || node == -1) {
strcpy(namebuf, "???");
} else {
prom_getstring(node, "name", namebuf, 63); namebuf[63] = 0;
}
for (j = 0; j < 6; j++) {
address = dev->resource[j].start;
if (address == 0) break; /* are sequential */
flags = dev->resource[j].flags;
if ((flags & IORESOURCE_IO) != 0) {
if (address < 0x10000) {
/*
* A device responds to I/O cycles on PCI.
* We generate these cycles with memory
* access into the fixed map (phys 0x30000000).
*
* Since a device driver does not want to
* do ioremap() before accessing PC-style I/O,
* we supply virtual, ready to access address.
*
* Note that request_region()
* works for these devices.
*
* XXX Neat trick, but it's a *bad* idea
* to shit into regions like that.
* What if we want to allocate one more
* PCI base address...
*/
dev->resource[j].start =
pcic->pcic_io + address;
dev->resource[j].end = 1; /* XXX */
dev->resource[j].flags =
(flags & ~IORESOURCE_IO) | IORESOURCE_MEM;
} else {
/*
* OOPS... PCI Spec allows this. Sun does
* not have any devices getting above 64K
* so it must be user with a weird I/O
* board in a PCI slot. We must remap it
* under 64K but it is not done yet. XXX
*/
pci_info(dev, "PCIC: Skipping I/O space at "
"0x%lx, this will Oops if a driver "
"attaches device '%s'\n", address,
namebuf);
}
}
}
}
static void
pcic_fill_irq(struct linux_pcic *pcic, struct pci_dev *dev, int node)
{
struct pcic_ca2irq *p;
unsigned int real_irq;
int i, ivec;
char namebuf[64];
if (node == 0 || node == -1) {
strcpy(namebuf, "???");
} else {
prom_getstring(node, "name", namebuf, sizeof(namebuf));
}
if ((p = pcic->pcic_imap) == NULL) {
dev->irq = 0;
return;
}
for (i = 0; i < pcic->pcic_imdim; i++) {
if (p->busno == dev->bus->number && p->devfn == dev->devfn)
break;
p++;
}
if (i >= pcic->pcic_imdim) {
pci_info(dev, "PCIC: device %s not found in %d\n", namebuf,
pcic->pcic_imdim);
dev->irq = 0;
return;
}
i = p->pin;
if (i >= 0 && i < 4) {
ivec = readw(pcic->pcic_regs+PCI_INT_SELECT_LO);
real_irq = ivec >> (i << 2) & 0xF;
} else if (i >= 4 && i < 8) {
ivec = readw(pcic->pcic_regs+PCI_INT_SELECT_HI);
real_irq = ivec >> ((i-4) << 2) & 0xF;
} else { /* Corrupted map */
pci_info(dev, "PCIC: BAD PIN %d\n", i); for (;;) {}
}
/* P3 */ /* printk("PCIC: device %s pin %d ivec 0x%x irq %x\n", namebuf, i, ivec, dev->irq); */
/* real_irq means PROM did not bother to program the upper
* half of PCIC. This happens on JS-E with PROM 3.11, for instance.
*/
if (real_irq == 0 || p->force) {
if (p->irq == 0 || p->irq >= 15) { /* Corrupted map */
pci_info(dev, "PCIC: BAD IRQ %d\n", p->irq); for (;;) {}
}
pci_info(dev, "PCIC: setting irq %d at pin %d\n", p->irq,
p->pin);
real_irq = p->irq;
i = p->pin;
if (i >= 4) {
ivec = readw(pcic->pcic_regs+PCI_INT_SELECT_HI);
ivec &= ~(0xF << ((i - 4) << 2));
ivec |= p->irq << ((i - 4) << 2);
writew(ivec, pcic->pcic_regs+PCI_INT_SELECT_HI);
} else {
ivec = readw(pcic->pcic_regs+PCI_INT_SELECT_LO);
ivec &= ~(0xF << (i << 2));
ivec |= p->irq << (i << 2);
writew(ivec, pcic->pcic_regs+PCI_INT_SELECT_LO);
}
}
dev->irq = pcic_build_device_irq(NULL, real_irq);
}
/*
* Normally called from {do_}pci_scan_bus...
*/
void pcibios_fixup_bus(struct pci_bus *bus)
{
struct pci_dev *dev;
struct linux_pcic *pcic;
/* struct linux_pbm_info* pbm = &pcic->pbm; */
int node;
struct pcidev_cookie *pcp;
if (!pcic0_up) {
pci_info(bus, "pcibios_fixup_bus: no PCIC\n");
return;
}
pcic = &pcic0;
/*
* Next crud is an equivalent of pbm = pcic_bus_to_pbm(bus);
*/
if (bus->number != 0) {
pci_info(bus, "pcibios_fixup_bus: nonzero bus 0x%x\n",
bus->number);
return;
}
list_for_each_entry(dev, &bus->devices, bus_list) {
node = pdev_to_pnode(&pcic->pbm, dev);
if(node == 0)
node = -1;
/* cookies */
pcp = pci_devcookie_alloc();
pcp->pbm = &pcic->pbm;
pcp->prom_node = of_find_node_by_phandle(node);
dev->sysdata = pcp;
/* fixing I/O to look like memory */
if ((dev->class>>16) != PCI_BASE_CLASS_BRIDGE)
pcic_map_pci_device(pcic, dev, node);
pcic_fill_irq(pcic, dev, node);
}
}
int pcibios_enable_device(struct pci_dev *dev, int mask)
{
struct resource *res;
u16 cmd, oldcmd;
int i;
pci_read_config_word(dev, PCI_COMMAND, &cmd);
oldcmd = cmd;
pci_dev_for_each_resource(dev, res, i) {
/* Only set up the requested stuff */
if (!(mask & (1<<i)))
continue;
if (res->flags & IORESOURCE_IO)
cmd |= PCI_COMMAND_IO;
if (res->flags & IORESOURCE_MEM)
cmd |= PCI_COMMAND_MEMORY;
}
if (cmd != oldcmd) {
pci_info(dev, "enabling device (%04x -> %04x)\n", oldcmd, cmd);
pci_write_config_word(dev, PCI_COMMAND, cmd);
}
return 0;
}
/* Makes compiler happy */
static volatile int pcic_timer_dummy;
static void pcic_clear_clock_irq(void)
{
pcic_timer_dummy = readl(pcic0.pcic_regs+PCI_SYS_LIMIT);
}
/* CPU frequency is 100 MHz, timer increments every 4 CPU clocks */
#define USECS_PER_JIFFY (1000000 / HZ)
#define TICK_TIMER_LIMIT ((100 * 1000000 / 4) / HZ)
static unsigned int pcic_cycles_offset(void)
{
u32 value, count;
value = readl(pcic0.pcic_regs + PCI_SYS_COUNTER);
count = value & ~PCI_SYS_COUNTER_OVERFLOW;
if (value & PCI_SYS_COUNTER_OVERFLOW)
count += TICK_TIMER_LIMIT;
/*
* We divide all by HZ
* to have microsecond resolution and to avoid overflow
*/
count = ((count / HZ) * USECS_PER_JIFFY) / (TICK_TIMER_LIMIT / HZ);
/* Coordinate with the sparc_config.clock_rate setting */
return count * 2;
}
void __init pci_time_init(void)
{
struct linux_pcic *pcic = &pcic0;
unsigned long v;
int timer_irq, irq;
int err;
#ifndef CONFIG_SMP
/*
* The clock_rate is in SBUS dimension.
* We take into account this in pcic_cycles_offset()
*/
sparc_config.clock_rate = SBUS_CLOCK_RATE / HZ;
sparc_config.features |= FEAT_L10_CLOCKEVENT;
#endif
sparc_config.features |= FEAT_L10_CLOCKSOURCE;
sparc_config.get_cycles_offset = pcic_cycles_offset;
writel (TICK_TIMER_LIMIT, pcic->pcic_regs+PCI_SYS_LIMIT);
/* PROM should set appropriate irq */
v = readb(pcic->pcic_regs+PCI_COUNTER_IRQ);
timer_irq = PCI_COUNTER_IRQ_SYS(v);
writel (PCI_COUNTER_IRQ_SET(timer_irq, 0),
pcic->pcic_regs+PCI_COUNTER_IRQ);
irq = pcic_build_device_irq(NULL, timer_irq);
err = request_irq(irq, timer_interrupt,
IRQF_TIMER, "timer", NULL);
if (err) {
prom_printf("time_init: unable to attach IRQ%d\n", timer_irq);
prom_halt();
}
local_irq_enable();
}
#if 0
static void watchdog_reset() {
writeb(0, pcic->pcic_regs+PCI_SYS_STATUS);
}
#endif
/*
* NMI
*/
void pcic_nmi(unsigned int pend, struct pt_regs *regs)
{
pend = swab32(pend);
if (!pcic_speculative || (pend & PCI_SYS_INT_PENDING_PIO) == 0) {
/*
* XXX On CP-1200 PCI #SERR may happen, we do not know
* what to do about it yet.
*/
printk("Aiee, NMI pend 0x%x pc 0x%x spec %d, hanging\n",
pend, (int)regs->pc, pcic_speculative);
for (;;) { }
}
pcic_speculative = 0;
pcic_trapped = 1;
regs->pc = regs->npc;
regs->npc += 4;
}
static inline unsigned long get_irqmask(int irq_nr)
{
return 1 << irq_nr;
}
static void pcic_mask_irq(struct irq_data *data)
{
unsigned long mask, flags;
mask = (unsigned long)data->chip_data;
local_irq_save(flags);
writel(mask, pcic0.pcic_regs+PCI_SYS_INT_TARGET_MASK_SET);
local_irq_restore(flags);
}
static void pcic_unmask_irq(struct irq_data *data)
{
unsigned long mask, flags;
mask = (unsigned long)data->chip_data;
local_irq_save(flags);
writel(mask, pcic0.pcic_regs+PCI_SYS_INT_TARGET_MASK_CLEAR);
local_irq_restore(flags);
}
static unsigned int pcic_startup_irq(struct irq_data *data)
{
irq_link(data->irq);
pcic_unmask_irq(data);
return 0;
}
static struct irq_chip pcic_irq = {
.name = "pcic",
.irq_startup = pcic_startup_irq,
.irq_mask = pcic_mask_irq,
.irq_unmask = pcic_unmask_irq,
};
unsigned int pcic_build_device_irq(struct platform_device *op,
unsigned int real_irq)
{
unsigned int irq;
unsigned long mask;
irq = 0;
mask = get_irqmask(real_irq);
if (mask == 0)
goto out;
irq = irq_alloc(real_irq, real_irq);
if (irq == 0)
goto out;
irq_set_chip_and_handler_name(irq, &pcic_irq,
handle_level_irq, "PCIC");
irq_set_chip_data(irq, (void *)mask);
out:
return irq;
}
static void pcic_load_profile_irq(int cpu, unsigned int limit)
{
printk("PCIC: unimplemented code: FILE=%s LINE=%d", __FILE__, __LINE__);
}
void __init sun4m_pci_init_IRQ(void)
{
sparc_config.build_device_irq = pcic_build_device_irq;
sparc_config.clear_clock_irq = pcic_clear_clock_irq;
sparc_config.load_profile_irq = pcic_load_profile_irq;
}
subsys_initcall(pcic_init);
| linux-master | arch/sparc/kernel/pcic.c |
// SPDX-License-Identifier: GPL-2.0
/* pci_psycho.c: PSYCHO/U2P specific PCI controller support.
*
* Copyright (C) 1997, 1998, 1999, 2007 David S. Miller ([email protected])
* Copyright (C) 1998, 1999 Eddie C. Dost ([email protected])
* Copyright (C) 1999 Jakub Jelinek ([email protected])
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <asm/iommu.h>
#include <asm/irq.h>
#include <asm/starfire.h>
#include <asm/prom.h>
#include <asm/upa.h>
#include "pci_impl.h"
#include "iommu_common.h"
#include "psycho_common.h"
#define DRIVER_NAME "psycho"
#define PFX DRIVER_NAME ": "
/* Misc. PSYCHO PCI controller register offsets and definitions. */
#define PSYCHO_CONTROL 0x0010UL
#define PSYCHO_CONTROL_IMPL 0xf000000000000000UL /* Implementation of this PSYCHO*/
#define PSYCHO_CONTROL_VER 0x0f00000000000000UL /* Version of this PSYCHO */
#define PSYCHO_CONTROL_MID 0x00f8000000000000UL /* UPA Module ID of PSYCHO */
#define PSYCHO_CONTROL_IGN 0x0007c00000000000UL /* Interrupt Group Number */
#define PSYCHO_CONTROL_RESV 0x00003ffffffffff0UL /* Reserved */
#define PSYCHO_CONTROL_APCKEN 0x0000000000000008UL /* Address Parity Check Enable */
#define PSYCHO_CONTROL_APERR 0x0000000000000004UL /* Incoming System Addr Parerr */
#define PSYCHO_CONTROL_IAP 0x0000000000000002UL /* Invert UPA Parity */
#define PSYCHO_CONTROL_MODE 0x0000000000000001UL /* PSYCHO clock mode */
#define PSYCHO_PCIA_CTRL 0x2000UL
#define PSYCHO_PCIB_CTRL 0x4000UL
#define PSYCHO_PCICTRL_RESV1 0xfffffff000000000UL /* Reserved */
#define PSYCHO_PCICTRL_SBH_ERR 0x0000000800000000UL /* Streaming byte hole error */
#define PSYCHO_PCICTRL_SERR 0x0000000400000000UL /* SERR signal asserted */
#define PSYCHO_PCICTRL_SPEED 0x0000000200000000UL /* PCI speed (1 is U2P clock) */
#define PSYCHO_PCICTRL_RESV2 0x00000001ffc00000UL /* Reserved */
#define PSYCHO_PCICTRL_ARB_PARK 0x0000000000200000UL /* PCI arbitration parking */
#define PSYCHO_PCICTRL_RESV3 0x00000000001ff800UL /* Reserved */
#define PSYCHO_PCICTRL_SBH_INT 0x0000000000000400UL /* Streaming byte hole int enab */
#define PSYCHO_PCICTRL_WEN 0x0000000000000200UL /* Power Mgmt Wake Enable */
#define PSYCHO_PCICTRL_EEN 0x0000000000000100UL /* PCI Error Interrupt Enable */
#define PSYCHO_PCICTRL_RESV4 0x00000000000000c0UL /* Reserved */
#define PSYCHO_PCICTRL_AEN 0x000000000000003fUL /* PCI DVMA Arbitration Enable */
/* PSYCHO error handling support. */
/* Helper function of IOMMU error checking, which checks out
* the state of the streaming buffers. The IOMMU lock is
* held when this is called.
*
* For the PCI error case we know which PBM (and thus which
* streaming buffer) caused the error, but for the uncorrectable
* error case we do not. So we always check both streaming caches.
*/
#define PSYCHO_STRBUF_CONTROL_A 0x2800UL
#define PSYCHO_STRBUF_CONTROL_B 0x4800UL
#define PSYCHO_STRBUF_CTRL_LPTR 0x00000000000000f0UL /* LRU Lock Pointer */
#define PSYCHO_STRBUF_CTRL_LENAB 0x0000000000000008UL /* LRU Lock Enable */
#define PSYCHO_STRBUF_CTRL_RRDIS 0x0000000000000004UL /* Rerun Disable */
#define PSYCHO_STRBUF_CTRL_DENAB 0x0000000000000002UL /* Diagnostic Mode Enable */
#define PSYCHO_STRBUF_CTRL_ENAB 0x0000000000000001UL /* Streaming Buffer Enable */
#define PSYCHO_STRBUF_FLUSH_A 0x2808UL
#define PSYCHO_STRBUF_FLUSH_B 0x4808UL
#define PSYCHO_STRBUF_FSYNC_A 0x2810UL
#define PSYCHO_STRBUF_FSYNC_B 0x4810UL
#define PSYCHO_STC_DATA_A 0xb000UL
#define PSYCHO_STC_DATA_B 0xc000UL
#define PSYCHO_STC_ERR_A 0xb400UL
#define PSYCHO_STC_ERR_B 0xc400UL
#define PSYCHO_STC_TAG_A 0xb800UL
#define PSYCHO_STC_TAG_B 0xc800UL
#define PSYCHO_STC_LINE_A 0xb900UL
#define PSYCHO_STC_LINE_B 0xc900UL
/* When an Uncorrectable Error or a PCI Error happens, we
* interrogate the IOMMU state to see if it is the cause.
*/
#define PSYCHO_IOMMU_CONTROL 0x0200UL
#define PSYCHO_IOMMU_CTRL_RESV 0xfffffffff9000000UL /* Reserved */
#define PSYCHO_IOMMU_CTRL_XLTESTAT 0x0000000006000000UL /* Translation Error Status */
#define PSYCHO_IOMMU_CTRL_XLTEERR 0x0000000001000000UL /* Translation Error encountered */
#define PSYCHO_IOMMU_CTRL_LCKEN 0x0000000000800000UL /* Enable translation locking */
#define PSYCHO_IOMMU_CTRL_LCKPTR 0x0000000000780000UL /* Translation lock pointer */
#define PSYCHO_IOMMU_CTRL_TSBSZ 0x0000000000070000UL /* TSB Size */
#define PSYCHO_IOMMU_TSBSZ_1K 0x0000000000000000UL /* TSB Table 1024 8-byte entries */
#define PSYCHO_IOMMU_TSBSZ_2K 0x0000000000010000UL /* TSB Table 2048 8-byte entries */
#define PSYCHO_IOMMU_TSBSZ_4K 0x0000000000020000UL /* TSB Table 4096 8-byte entries */
#define PSYCHO_IOMMU_TSBSZ_8K 0x0000000000030000UL /* TSB Table 8192 8-byte entries */
#define PSYCHO_IOMMU_TSBSZ_16K 0x0000000000040000UL /* TSB Table 16k 8-byte entries */
#define PSYCHO_IOMMU_TSBSZ_32K 0x0000000000050000UL /* TSB Table 32k 8-byte entries */
#define PSYCHO_IOMMU_TSBSZ_64K 0x0000000000060000UL /* TSB Table 64k 8-byte entries */
#define PSYCHO_IOMMU_TSBSZ_128K 0x0000000000070000UL /* TSB Table 128k 8-byte entries */
#define PSYCHO_IOMMU_CTRL_RESV2 0x000000000000fff8UL /* Reserved */
#define PSYCHO_IOMMU_CTRL_TBWSZ 0x0000000000000004UL /* Assumed page size, 0=8k 1=64k */
#define PSYCHO_IOMMU_CTRL_DENAB 0x0000000000000002UL /* Diagnostic mode enable */
#define PSYCHO_IOMMU_CTRL_ENAB 0x0000000000000001UL /* IOMMU Enable */
#define PSYCHO_IOMMU_TSBBASE 0x0208UL
#define PSYCHO_IOMMU_FLUSH 0x0210UL
#define PSYCHO_IOMMU_TAG 0xa580UL
#define PSYCHO_IOMMU_DATA 0xa600UL
/* Uncorrectable Errors. Cause of the error and the address are
* recorded in the UE_AFSR and UE_AFAR of PSYCHO. They are errors
* relating to UPA interface transactions.
*/
#define PSYCHO_UE_AFSR 0x0030UL
#define PSYCHO_UEAFSR_PPIO 0x8000000000000000UL /* Primary PIO is cause */
#define PSYCHO_UEAFSR_PDRD 0x4000000000000000UL /* Primary DVMA read is cause */
#define PSYCHO_UEAFSR_PDWR 0x2000000000000000UL /* Primary DVMA write is cause */
#define PSYCHO_UEAFSR_SPIO 0x1000000000000000UL /* Secondary PIO is cause */
#define PSYCHO_UEAFSR_SDRD 0x0800000000000000UL /* Secondary DVMA read is cause */
#define PSYCHO_UEAFSR_SDWR 0x0400000000000000UL /* Secondary DVMA write is cause*/
#define PSYCHO_UEAFSR_RESV1 0x03ff000000000000UL /* Reserved */
#define PSYCHO_UEAFSR_BMSK 0x0000ffff00000000UL /* Bytemask of failed transfer */
#define PSYCHO_UEAFSR_DOFF 0x00000000e0000000UL /* Doubleword Offset */
#define PSYCHO_UEAFSR_MID 0x000000001f000000UL /* UPA MID causing the fault */
#define PSYCHO_UEAFSR_BLK 0x0000000000800000UL /* Trans was block operation */
#define PSYCHO_UEAFSR_RESV2 0x00000000007fffffUL /* Reserved */
#define PSYCHO_UE_AFAR 0x0038UL
static irqreturn_t psycho_ue_intr(int irq, void *dev_id)
{
struct pci_pbm_info *pbm = dev_id;
unsigned long afsr_reg = pbm->controller_regs + PSYCHO_UE_AFSR;
unsigned long afar_reg = pbm->controller_regs + PSYCHO_UE_AFAR;
unsigned long afsr, afar, error_bits;
int reported;
/* Latch uncorrectable error status. */
afar = upa_readq(afar_reg);
afsr = upa_readq(afsr_reg);
/* Clear the primary/secondary error status bits. */
error_bits = afsr &
(PSYCHO_UEAFSR_PPIO | PSYCHO_UEAFSR_PDRD | PSYCHO_UEAFSR_PDWR |
PSYCHO_UEAFSR_SPIO | PSYCHO_UEAFSR_SDRD | PSYCHO_UEAFSR_SDWR);
if (!error_bits)
return IRQ_NONE;
upa_writeq(error_bits, afsr_reg);
/* Log the error. */
printk("%s: Uncorrectable Error, primary error type[%s]\n",
pbm->name,
(((error_bits & PSYCHO_UEAFSR_PPIO) ?
"PIO" :
((error_bits & PSYCHO_UEAFSR_PDRD) ?
"DMA Read" :
((error_bits & PSYCHO_UEAFSR_PDWR) ?
"DMA Write" : "???")))));
printk("%s: bytemask[%04lx] dword_offset[%lx] UPA_MID[%02lx] was_block(%d)\n",
pbm->name,
(afsr & PSYCHO_UEAFSR_BMSK) >> 32UL,
(afsr & PSYCHO_UEAFSR_DOFF) >> 29UL,
(afsr & PSYCHO_UEAFSR_MID) >> 24UL,
((afsr & PSYCHO_UEAFSR_BLK) ? 1 : 0));
printk("%s: UE AFAR [%016lx]\n", pbm->name, afar);
printk("%s: UE Secondary errors [", pbm->name);
reported = 0;
if (afsr & PSYCHO_UEAFSR_SPIO) {
reported++;
printk("(PIO)");
}
if (afsr & PSYCHO_UEAFSR_SDRD) {
reported++;
printk("(DMA Read)");
}
if (afsr & PSYCHO_UEAFSR_SDWR) {
reported++;
printk("(DMA Write)");
}
if (!reported)
printk("(none)");
printk("]\n");
/* Interrogate both IOMMUs for error status. */
psycho_check_iommu_error(pbm, afsr, afar, UE_ERR);
if (pbm->sibling)
psycho_check_iommu_error(pbm->sibling, afsr, afar, UE_ERR);
return IRQ_HANDLED;
}
/* Correctable Errors. */
#define PSYCHO_CE_AFSR 0x0040UL
#define PSYCHO_CEAFSR_PPIO 0x8000000000000000UL /* Primary PIO is cause */
#define PSYCHO_CEAFSR_PDRD 0x4000000000000000UL /* Primary DVMA read is cause */
#define PSYCHO_CEAFSR_PDWR 0x2000000000000000UL /* Primary DVMA write is cause */
#define PSYCHO_CEAFSR_SPIO 0x1000000000000000UL /* Secondary PIO is cause */
#define PSYCHO_CEAFSR_SDRD 0x0800000000000000UL /* Secondary DVMA read is cause */
#define PSYCHO_CEAFSR_SDWR 0x0400000000000000UL /* Secondary DVMA write is cause*/
#define PSYCHO_CEAFSR_RESV1 0x0300000000000000UL /* Reserved */
#define PSYCHO_CEAFSR_ESYND 0x00ff000000000000UL /* Syndrome Bits */
#define PSYCHO_CEAFSR_BMSK 0x0000ffff00000000UL /* Bytemask of failed transfer */
#define PSYCHO_CEAFSR_DOFF 0x00000000e0000000UL /* Double Offset */
#define PSYCHO_CEAFSR_MID 0x000000001f000000UL /* UPA MID causing the fault */
#define PSYCHO_CEAFSR_BLK 0x0000000000800000UL /* Trans was block operation */
#define PSYCHO_CEAFSR_RESV2 0x00000000007fffffUL /* Reserved */
#define PSYCHO_CE_AFAR 0x0040UL
static irqreturn_t psycho_ce_intr(int irq, void *dev_id)
{
struct pci_pbm_info *pbm = dev_id;
unsigned long afsr_reg = pbm->controller_regs + PSYCHO_CE_AFSR;
unsigned long afar_reg = pbm->controller_regs + PSYCHO_CE_AFAR;
unsigned long afsr, afar, error_bits;
int reported;
/* Latch error status. */
afar = upa_readq(afar_reg);
afsr = upa_readq(afsr_reg);
/* Clear primary/secondary error status bits. */
error_bits = afsr &
(PSYCHO_CEAFSR_PPIO | PSYCHO_CEAFSR_PDRD | PSYCHO_CEAFSR_PDWR |
PSYCHO_CEAFSR_SPIO | PSYCHO_CEAFSR_SDRD | PSYCHO_CEAFSR_SDWR);
if (!error_bits)
return IRQ_NONE;
upa_writeq(error_bits, afsr_reg);
/* Log the error. */
printk("%s: Correctable Error, primary error type[%s]\n",
pbm->name,
(((error_bits & PSYCHO_CEAFSR_PPIO) ?
"PIO" :
((error_bits & PSYCHO_CEAFSR_PDRD) ?
"DMA Read" :
((error_bits & PSYCHO_CEAFSR_PDWR) ?
"DMA Write" : "???")))));
/* XXX Use syndrome and afar to print out module string just like
* XXX UDB CE trap handler does... -DaveM
*/
printk("%s: syndrome[%02lx] bytemask[%04lx] dword_offset[%lx] "
"UPA_MID[%02lx] was_block(%d)\n",
pbm->name,
(afsr & PSYCHO_CEAFSR_ESYND) >> 48UL,
(afsr & PSYCHO_CEAFSR_BMSK) >> 32UL,
(afsr & PSYCHO_CEAFSR_DOFF) >> 29UL,
(afsr & PSYCHO_CEAFSR_MID) >> 24UL,
((afsr & PSYCHO_CEAFSR_BLK) ? 1 : 0));
printk("%s: CE AFAR [%016lx]\n", pbm->name, afar);
printk("%s: CE Secondary errors [", pbm->name);
reported = 0;
if (afsr & PSYCHO_CEAFSR_SPIO) {
reported++;
printk("(PIO)");
}
if (afsr & PSYCHO_CEAFSR_SDRD) {
reported++;
printk("(DMA Read)");
}
if (afsr & PSYCHO_CEAFSR_SDWR) {
reported++;
printk("(DMA Write)");
}
if (!reported)
printk("(none)");
printk("]\n");
return IRQ_HANDLED;
}
/* PCI Errors. They are signalled by the PCI bus module since they
* are associated with a specific bus segment.
*/
#define PSYCHO_PCI_AFSR_A 0x2010UL
#define PSYCHO_PCI_AFSR_B 0x4010UL
#define PSYCHO_PCI_AFAR_A 0x2018UL
#define PSYCHO_PCI_AFAR_B 0x4018UL
/* XXX What about PowerFail/PowerManagement??? -DaveM */
#define PSYCHO_ECC_CTRL 0x0020
#define PSYCHO_ECCCTRL_EE 0x8000000000000000UL /* Enable ECC Checking */
#define PSYCHO_ECCCTRL_UE 0x4000000000000000UL /* Enable UE Interrupts */
#define PSYCHO_ECCCTRL_CE 0x2000000000000000UL /* Enable CE INterrupts */
static void psycho_register_error_handlers(struct pci_pbm_info *pbm)
{
struct platform_device *op = of_find_device_by_node(pbm->op->dev.of_node);
unsigned long base = pbm->controller_regs;
u64 tmp;
int err;
if (!op)
return;
/* Psycho interrupt property order is:
* 0: PCIERR INO for this PBM
* 1: UE ERR
* 2: CE ERR
* 3: POWER FAIL
* 4: SPARE HARDWARE
* 5: POWER MANAGEMENT
*/
if (op->archdata.num_irqs < 6)
return;
/* We really mean to ignore the return result here. Two
* PCI controller share the same interrupt numbers and
* drive the same front-end hardware.
*/
err = request_irq(op->archdata.irqs[1], psycho_ue_intr, IRQF_SHARED,
"PSYCHO_UE", pbm);
err = request_irq(op->archdata.irqs[2], psycho_ce_intr, IRQF_SHARED,
"PSYCHO_CE", pbm);
/* This one, however, ought not to fail. We can just warn
* about it since the system can still operate properly even
* if this fails.
*/
err = request_irq(op->archdata.irqs[0], psycho_pcierr_intr, IRQF_SHARED,
"PSYCHO_PCIERR", pbm);
if (err)
printk(KERN_WARNING "%s: Could not register PCIERR, "
"err=%d\n", pbm->name, err);
/* Enable UE and CE interrupts for controller. */
upa_writeq((PSYCHO_ECCCTRL_EE |
PSYCHO_ECCCTRL_UE |
PSYCHO_ECCCTRL_CE), base + PSYCHO_ECC_CTRL);
/* Enable PCI Error interrupts and clear error
* bits for each PBM.
*/
tmp = upa_readq(base + PSYCHO_PCIA_CTRL);
tmp |= (PSYCHO_PCICTRL_SERR |
PSYCHO_PCICTRL_SBH_ERR |
PSYCHO_PCICTRL_EEN);
tmp &= ~(PSYCHO_PCICTRL_SBH_INT);
upa_writeq(tmp, base + PSYCHO_PCIA_CTRL);
tmp = upa_readq(base + PSYCHO_PCIB_CTRL);
tmp |= (PSYCHO_PCICTRL_SERR |
PSYCHO_PCICTRL_SBH_ERR |
PSYCHO_PCICTRL_EEN);
tmp &= ~(PSYCHO_PCICTRL_SBH_INT);
upa_writeq(tmp, base + PSYCHO_PCIB_CTRL);
}
/* PSYCHO boot time probing and initialization. */
static void pbm_config_busmastering(struct pci_pbm_info *pbm)
{
u8 *addr;
/* Set cache-line size to 64 bytes, this is actually
* a nop but I do it for completeness.
*/
addr = psycho_pci_config_mkaddr(pbm, pbm->pci_first_busno,
0, PCI_CACHE_LINE_SIZE);
pci_config_write8(addr, 64 / sizeof(u32));
/* Set PBM latency timer to 64 PCI clocks. */
addr = psycho_pci_config_mkaddr(pbm, pbm->pci_first_busno,
0, PCI_LATENCY_TIMER);
pci_config_write8(addr, 64);
}
static void psycho_scan_bus(struct pci_pbm_info *pbm,
struct device *parent)
{
pbm_config_busmastering(pbm);
pbm->is_66mhz_capable = 0;
pbm->pci_bus = pci_scan_one_pbm(pbm, parent);
/* After the PCI bus scan is complete, we can register
* the error interrupt handlers.
*/
psycho_register_error_handlers(pbm);
}
#define PSYCHO_IRQ_RETRY 0x1a00UL
#define PSYCHO_PCIA_DIAG 0x2020UL
#define PSYCHO_PCIB_DIAG 0x4020UL
#define PSYCHO_PCIDIAG_RESV 0xffffffffffffff80UL /* Reserved */
#define PSYCHO_PCIDIAG_DRETRY 0x0000000000000040UL /* Disable retry limit */
#define PSYCHO_PCIDIAG_DISYNC 0x0000000000000020UL /* Disable DMA wr / irq sync */
#define PSYCHO_PCIDIAG_DDWSYNC 0x0000000000000010UL /* Disable DMA wr / PIO rd sync */
#define PSYCHO_PCIDIAG_IDDPAR 0x0000000000000008UL /* Invert DMA data parity */
#define PSYCHO_PCIDIAG_IPDPAR 0x0000000000000004UL /* Invert PIO data parity */
#define PSYCHO_PCIDIAG_IPAPAR 0x0000000000000002UL /* Invert PIO address parity */
#define PSYCHO_PCIDIAG_LPBACK 0x0000000000000001UL /* Enable loopback mode */
static void psycho_controller_hwinit(struct pci_pbm_info *pbm)
{
u64 tmp;
upa_writeq(5, pbm->controller_regs + PSYCHO_IRQ_RETRY);
/* Enable arbiter for all PCI slots. */
tmp = upa_readq(pbm->controller_regs + PSYCHO_PCIA_CTRL);
tmp |= PSYCHO_PCICTRL_AEN;
upa_writeq(tmp, pbm->controller_regs + PSYCHO_PCIA_CTRL);
tmp = upa_readq(pbm->controller_regs + PSYCHO_PCIB_CTRL);
tmp |= PSYCHO_PCICTRL_AEN;
upa_writeq(tmp, pbm->controller_regs + PSYCHO_PCIB_CTRL);
/* Disable DMA write / PIO read synchronization on
* both PCI bus segments.
* [ U2P Erratum 1243770, STP2223BGA data sheet ]
*/
tmp = upa_readq(pbm->controller_regs + PSYCHO_PCIA_DIAG);
tmp |= PSYCHO_PCIDIAG_DDWSYNC;
upa_writeq(tmp, pbm->controller_regs + PSYCHO_PCIA_DIAG);
tmp = upa_readq(pbm->controller_regs + PSYCHO_PCIB_DIAG);
tmp |= PSYCHO_PCIDIAG_DDWSYNC;
upa_writeq(tmp, pbm->controller_regs + PSYCHO_PCIB_DIAG);
}
static void psycho_pbm_strbuf_init(struct pci_pbm_info *pbm,
int is_pbm_a)
{
unsigned long base = pbm->controller_regs;
u64 control;
if (is_pbm_a) {
pbm->stc.strbuf_control = base + PSYCHO_STRBUF_CONTROL_A;
pbm->stc.strbuf_pflush = base + PSYCHO_STRBUF_FLUSH_A;
pbm->stc.strbuf_fsync = base + PSYCHO_STRBUF_FSYNC_A;
pbm->stc.strbuf_err_stat = base + PSYCHO_STC_ERR_A;
pbm->stc.strbuf_tag_diag = base + PSYCHO_STC_TAG_A;
pbm->stc.strbuf_line_diag= base + PSYCHO_STC_LINE_A;
} else {
pbm->stc.strbuf_control = base + PSYCHO_STRBUF_CONTROL_B;
pbm->stc.strbuf_pflush = base + PSYCHO_STRBUF_FLUSH_B;
pbm->stc.strbuf_fsync = base + PSYCHO_STRBUF_FSYNC_B;
pbm->stc.strbuf_err_stat = base + PSYCHO_STC_ERR_B;
pbm->stc.strbuf_tag_diag = base + PSYCHO_STC_TAG_B;
pbm->stc.strbuf_line_diag= base + PSYCHO_STC_LINE_B;
}
/* PSYCHO's streaming buffer lacks ctx flushing. */
pbm->stc.strbuf_ctxflush = 0;
pbm->stc.strbuf_ctxmatch_base = 0;
pbm->stc.strbuf_flushflag = (volatile unsigned long *)
((((unsigned long)&pbm->stc.__flushflag_buf[0])
+ 63UL)
& ~63UL);
pbm->stc.strbuf_flushflag_pa = (unsigned long)
__pa(pbm->stc.strbuf_flushflag);
/* Enable the streaming buffer. We have to be careful
* just in case OBP left it with LRU locking enabled.
*
* It is possible to control if PBM will be rerun on
* line misses. Currently I just retain whatever setting
* OBP left us with. All checks so far show it having
* a value of zero.
*/
#undef PSYCHO_STRBUF_RERUN_ENABLE
#undef PSYCHO_STRBUF_RERUN_DISABLE
control = upa_readq(pbm->stc.strbuf_control);
control |= PSYCHO_STRBUF_CTRL_ENAB;
control &= ~(PSYCHO_STRBUF_CTRL_LENAB | PSYCHO_STRBUF_CTRL_LPTR);
#ifdef PSYCHO_STRBUF_RERUN_ENABLE
control &= ~(PSYCHO_STRBUF_CTRL_RRDIS);
#else
#ifdef PSYCHO_STRBUF_RERUN_DISABLE
control |= PSYCHO_STRBUF_CTRL_RRDIS;
#endif
#endif
upa_writeq(control, pbm->stc.strbuf_control);
pbm->stc.strbuf_enabled = 1;
}
#define PSYCHO_IOSPACE_A 0x002000000UL
#define PSYCHO_IOSPACE_B 0x002010000UL
#define PSYCHO_IOSPACE_SIZE 0x00000ffffUL
#define PSYCHO_MEMSPACE_A 0x100000000UL
#define PSYCHO_MEMSPACE_B 0x180000000UL
#define PSYCHO_MEMSPACE_SIZE 0x07fffffffUL
static void psycho_pbm_init(struct pci_pbm_info *pbm,
struct platform_device *op, int is_pbm_a)
{
psycho_pbm_init_common(pbm, op, "PSYCHO", PBM_CHIP_TYPE_PSYCHO);
psycho_pbm_strbuf_init(pbm, is_pbm_a);
psycho_scan_bus(pbm, &op->dev);
}
static struct pci_pbm_info *psycho_find_sibling(u32 upa_portid)
{
struct pci_pbm_info *pbm;
for (pbm = pci_pbm_root; pbm; pbm = pbm->next) {
if (pbm->portid == upa_portid)
return pbm;
}
return NULL;
}
#define PSYCHO_CONFIGSPACE 0x001000000UL
static int psycho_probe(struct platform_device *op)
{
const struct linux_prom64_registers *pr_regs;
struct device_node *dp = op->dev.of_node;
struct pci_pbm_info *pbm;
struct iommu *iommu;
int is_pbm_a, err;
u32 upa_portid;
upa_portid = of_getintprop_default(dp, "upa-portid", 0xff);
err = -ENOMEM;
pbm = kzalloc(sizeof(*pbm), GFP_KERNEL);
if (!pbm) {
printk(KERN_ERR PFX "Cannot allocate pci_pbm_info.\n");
goto out_err;
}
pbm->sibling = psycho_find_sibling(upa_portid);
if (pbm->sibling) {
iommu = pbm->sibling->iommu;
} else {
iommu = kzalloc(sizeof(struct iommu), GFP_KERNEL);
if (!iommu) {
printk(KERN_ERR PFX "Cannot allocate PBM iommu.\n");
goto out_free_controller;
}
}
pbm->iommu = iommu;
pbm->portid = upa_portid;
pr_regs = of_get_property(dp, "reg", NULL);
err = -ENODEV;
if (!pr_regs) {
printk(KERN_ERR PFX "No reg property.\n");
goto out_free_iommu;
}
is_pbm_a = ((pr_regs[0].phys_addr & 0x6000) == 0x2000);
pbm->controller_regs = pr_regs[2].phys_addr;
pbm->config_space = (pr_regs[2].phys_addr + PSYCHO_CONFIGSPACE);
if (is_pbm_a) {
pbm->pci_afsr = pbm->controller_regs + PSYCHO_PCI_AFSR_A;
pbm->pci_afar = pbm->controller_regs + PSYCHO_PCI_AFAR_A;
pbm->pci_csr = pbm->controller_regs + PSYCHO_PCIA_CTRL;
} else {
pbm->pci_afsr = pbm->controller_regs + PSYCHO_PCI_AFSR_B;
pbm->pci_afar = pbm->controller_regs + PSYCHO_PCI_AFAR_B;
pbm->pci_csr = pbm->controller_regs + PSYCHO_PCIB_CTRL;
}
psycho_controller_hwinit(pbm);
if (!pbm->sibling) {
err = psycho_iommu_init(pbm, 128, 0xc0000000,
0xffffffff, PSYCHO_CONTROL);
if (err)
goto out_free_iommu;
/* If necessary, hook us up for starfire IRQ translations. */
if (this_is_starfire)
starfire_hookup(pbm->portid);
}
psycho_pbm_init(pbm, op, is_pbm_a);
pbm->next = pci_pbm_root;
pci_pbm_root = pbm;
if (pbm->sibling)
pbm->sibling->sibling = pbm;
dev_set_drvdata(&op->dev, pbm);
return 0;
out_free_iommu:
if (!pbm->sibling)
kfree(pbm->iommu);
out_free_controller:
kfree(pbm);
out_err:
return err;
}
static const struct of_device_id psycho_match[] = {
{
.name = "pci",
.compatible = "pci108e,8000",
},
{},
};
static struct platform_driver psycho_driver = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = psycho_match,
},
.probe = psycho_probe,
};
static int __init psycho_init(void)
{
return platform_driver_register(&psycho_driver);
}
subsys_initcall(psycho_init);
| linux-master | arch/sparc/kernel/pci_psycho.c |
// SPDX-License-Identifier: GPL-2.0
/* leon_pmc.c: LEON Power-down cpu_idle() handler
*
* Copyright (C) 2011 Daniel Hellstrom ([email protected]) Aeroflex Gaisler AB
*/
#include <linux/init.h>
#include <linux/pm.h>
#include <asm/leon_amba.h>
#include <asm/cpu_type.h>
#include <asm/leon.h>
#include <asm/processor.h>
/* List of Systems that need fixup instructions around power-down instruction */
static unsigned int pmc_leon_fixup_ids[] = {
AEROFLEX_UT699,
GAISLER_GR712RC,
LEON4_NEXTREME1,
0
};
static int pmc_leon_need_fixup(void)
{
unsigned int systemid = amba_system_id >> 16;
unsigned int *id;
id = &pmc_leon_fixup_ids[0];
while (*id != 0) {
if (*id == systemid)
return 1;
id++;
}
return 0;
}
/*
* CPU idle callback function for systems that need some extra handling
* See .../arch/sparc/kernel/process.c
*/
static void pmc_leon_idle_fixup(void)
{
/* Prepare an address to a non-cachable region. APB is always
* none-cachable. One instruction is executed after the Sleep
* instruction, we make sure to read the bus and throw away the
* value by accessing a non-cachable area, also we make sure the
* MMU does not get a TLB miss here by using the MMU BYPASS ASI.
*/
register unsigned int address = (unsigned int)leon3_irqctrl_regs;
/* Interrupts need to be enabled to not hang the CPU */
raw_local_irq_enable();
__asm__ __volatile__ (
"wr %%g0, %%asr19\n"
"lda [%0] %1, %%g0\n"
:
: "r"(address), "i"(ASI_LEON_BYPASS));
raw_local_irq_disable();
}
/*
* CPU idle callback function
* See .../arch/sparc/kernel/process.c
*/
static void pmc_leon_idle(void)
{
/* Interrupts need to be enabled to not hang the CPU */
raw_local_irq_enable();
/* For systems without power-down, this will be no-op */
__asm__ __volatile__ ("wr %g0, %asr19\n\t");
raw_local_irq_disable();
}
/* Install LEON Power Down function */
static int __init leon_pmc_install(void)
{
if (sparc_cpu_model == sparc_leon) {
/* Assign power management IDLE handler */
if (pmc_leon_need_fixup())
sparc_idle = pmc_leon_idle_fixup;
else
sparc_idle = pmc_leon_idle;
printk(KERN_INFO "leon: power management initialized\n");
}
return 0;
}
/* This driver is not critical to the boot process, don't care
* if initialized late.
*/
late_initcall(leon_pmc_install);
| linux-master | arch/sparc/kernel/leon_pmc.c |
// SPDX-License-Identifier: GPL-2.0
/* pci_sun4v.c: SUN4V specific PCI controller support.
*
* Copyright (C) 2006, 2007, 2008 David S. Miller ([email protected])
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/percpu.h>
#include <linux/irq.h>
#include <linux/msi.h>
#include <linux/export.h>
#include <linux/log2.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/dma-map-ops.h>
#include <asm/iommu-common.h>
#include <asm/iommu.h>
#include <asm/irq.h>
#include <asm/hypervisor.h>
#include <asm/prom.h>
#include "pci_impl.h"
#include "iommu_common.h"
#include "kernel.h"
#include "pci_sun4v.h"
#define DRIVER_NAME "pci_sun4v"
#define PFX DRIVER_NAME ": "
static unsigned long vpci_major;
static unsigned long vpci_minor;
struct vpci_version {
unsigned long major;
unsigned long minor;
};
/* Ordered from largest major to lowest */
static struct vpci_version vpci_versions[] = {
{ .major = 2, .minor = 0 },
{ .major = 1, .minor = 1 },
};
static unsigned long vatu_major = 1;
static unsigned long vatu_minor = 1;
#define PGLIST_NENTS (PAGE_SIZE / sizeof(u64))
struct iommu_batch {
struct device *dev; /* Device mapping is for. */
unsigned long prot; /* IOMMU page protections */
unsigned long entry; /* Index into IOTSB. */
u64 *pglist; /* List of physical pages */
unsigned long npages; /* Number of pages in list. */
};
static DEFINE_PER_CPU(struct iommu_batch, iommu_batch);
static int iommu_batch_initialized;
/* Interrupts must be disabled. */
static inline void iommu_batch_start(struct device *dev, unsigned long prot, unsigned long entry)
{
struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
p->dev = dev;
p->prot = prot;
p->entry = entry;
p->npages = 0;
}
static inline bool iommu_use_atu(struct iommu *iommu, u64 mask)
{
return iommu->atu && mask > DMA_BIT_MASK(32);
}
/* Interrupts must be disabled. */
static long iommu_batch_flush(struct iommu_batch *p, u64 mask)
{
struct pci_pbm_info *pbm = p->dev->archdata.host_controller;
u64 *pglist = p->pglist;
u64 index_count;
unsigned long devhandle = pbm->devhandle;
unsigned long prot = p->prot;
unsigned long entry = p->entry;
unsigned long npages = p->npages;
unsigned long iotsb_num;
unsigned long ret;
long num;
/* VPCI maj=1, min=[0,1] only supports read and write */
if (vpci_major < 2)
prot &= (HV_PCI_MAP_ATTR_READ | HV_PCI_MAP_ATTR_WRITE);
while (npages != 0) {
if (!iommu_use_atu(pbm->iommu, mask)) {
num = pci_sun4v_iommu_map(devhandle,
HV_PCI_TSBID(0, entry),
npages,
prot,
__pa(pglist));
if (unlikely(num < 0)) {
pr_err_ratelimited("%s: IOMMU map of [%08lx:%08llx:%lx:%lx:%lx] failed with status %ld\n",
__func__,
devhandle,
HV_PCI_TSBID(0, entry),
npages, prot, __pa(pglist),
num);
return -1;
}
} else {
index_count = HV_PCI_IOTSB_INDEX_COUNT(npages, entry),
iotsb_num = pbm->iommu->atu->iotsb->iotsb_num;
ret = pci_sun4v_iotsb_map(devhandle,
iotsb_num,
index_count,
prot,
__pa(pglist),
&num);
if (unlikely(ret != HV_EOK)) {
pr_err_ratelimited("%s: ATU map of [%08lx:%lx:%llx:%lx:%lx] failed with status %ld\n",
__func__,
devhandle, iotsb_num,
index_count, prot,
__pa(pglist), ret);
return -1;
}
}
entry += num;
npages -= num;
pglist += num;
}
p->entry = entry;
p->npages = 0;
return 0;
}
static inline void iommu_batch_new_entry(unsigned long entry, u64 mask)
{
struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
if (p->entry + p->npages == entry)
return;
if (p->entry != ~0UL)
iommu_batch_flush(p, mask);
p->entry = entry;
}
/* Interrupts must be disabled. */
static inline long iommu_batch_add(u64 phys_page, u64 mask)
{
struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
BUG_ON(p->npages >= PGLIST_NENTS);
p->pglist[p->npages++] = phys_page;
if (p->npages == PGLIST_NENTS)
return iommu_batch_flush(p, mask);
return 0;
}
/* Interrupts must be disabled. */
static inline long iommu_batch_end(u64 mask)
{
struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
BUG_ON(p->npages >= PGLIST_NENTS);
return iommu_batch_flush(p, mask);
}
static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addrp, gfp_t gfp,
unsigned long attrs)
{
u64 mask;
unsigned long flags, order, first_page, npages, n;
unsigned long prot = 0;
struct iommu *iommu;
struct iommu_map_table *tbl;
struct page *page;
void *ret;
long entry;
int nid;
size = IO_PAGE_ALIGN(size);
order = get_order(size);
if (unlikely(order > MAX_ORDER))
return NULL;
npages = size >> IO_PAGE_SHIFT;
if (attrs & DMA_ATTR_WEAK_ORDERING)
prot = HV_PCI_MAP_ATTR_RELAXED_ORDER;
nid = dev->archdata.numa_node;
page = alloc_pages_node(nid, gfp, order);
if (unlikely(!page))
return NULL;
first_page = (unsigned long) page_address(page);
memset((char *)first_page, 0, PAGE_SIZE << order);
iommu = dev->archdata.iommu;
mask = dev->coherent_dma_mask;
if (!iommu_use_atu(iommu, mask))
tbl = &iommu->tbl;
else
tbl = &iommu->atu->tbl;
entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL,
(unsigned long)(-1), 0);
if (unlikely(entry == IOMMU_ERROR_CODE))
goto range_alloc_fail;
*dma_addrp = (tbl->table_map_base + (entry << IO_PAGE_SHIFT));
ret = (void *) first_page;
first_page = __pa(first_page);
local_irq_save(flags);
iommu_batch_start(dev,
(HV_PCI_MAP_ATTR_READ | prot |
HV_PCI_MAP_ATTR_WRITE),
entry);
for (n = 0; n < npages; n++) {
long err = iommu_batch_add(first_page + (n * PAGE_SIZE), mask);
if (unlikely(err < 0L))
goto iommu_map_fail;
}
if (unlikely(iommu_batch_end(mask) < 0L))
goto iommu_map_fail;
local_irq_restore(flags);
return ret;
iommu_map_fail:
local_irq_restore(flags);
iommu_tbl_range_free(tbl, *dma_addrp, npages, IOMMU_ERROR_CODE);
range_alloc_fail:
free_pages(first_page, order);
return NULL;
}
unsigned long dma_4v_iotsb_bind(unsigned long devhandle,
unsigned long iotsb_num,
struct pci_bus *bus_dev)
{
struct pci_dev *pdev;
unsigned long err;
unsigned int bus;
unsigned int device;
unsigned int fun;
list_for_each_entry(pdev, &bus_dev->devices, bus_list) {
if (pdev->subordinate) {
/* No need to bind pci bridge */
dma_4v_iotsb_bind(devhandle, iotsb_num,
pdev->subordinate);
} else {
bus = bus_dev->number;
device = PCI_SLOT(pdev->devfn);
fun = PCI_FUNC(pdev->devfn);
err = pci_sun4v_iotsb_bind(devhandle, iotsb_num,
HV_PCI_DEVICE_BUILD(bus,
device,
fun));
/* If bind fails for one device it is going to fail
* for rest of the devices because we are sharing
* IOTSB. So in case of failure simply return with
* error.
*/
if (err)
return err;
}
}
return 0;
}
static void dma_4v_iommu_demap(struct device *dev, unsigned long devhandle,
dma_addr_t dvma, unsigned long iotsb_num,
unsigned long entry, unsigned long npages)
{
unsigned long num, flags;
unsigned long ret;
local_irq_save(flags);
do {
if (dvma <= DMA_BIT_MASK(32)) {
num = pci_sun4v_iommu_demap(devhandle,
HV_PCI_TSBID(0, entry),
npages);
} else {
ret = pci_sun4v_iotsb_demap(devhandle, iotsb_num,
entry, npages, &num);
if (unlikely(ret != HV_EOK)) {
pr_err_ratelimited("pci_iotsb_demap() failed with error: %ld\n",
ret);
}
}
entry += num;
npages -= num;
} while (npages != 0);
local_irq_restore(flags);
}
static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
dma_addr_t dvma, unsigned long attrs)
{
struct pci_pbm_info *pbm;
struct iommu *iommu;
struct atu *atu;
struct iommu_map_table *tbl;
unsigned long order, npages, entry;
unsigned long iotsb_num;
u32 devhandle;
npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
iommu = dev->archdata.iommu;
pbm = dev->archdata.host_controller;
atu = iommu->atu;
devhandle = pbm->devhandle;
if (!iommu_use_atu(iommu, dvma)) {
tbl = &iommu->tbl;
iotsb_num = 0; /* we don't care for legacy iommu */
} else {
tbl = &atu->tbl;
iotsb_num = atu->iotsb->iotsb_num;
}
entry = ((dvma - tbl->table_map_base) >> IO_PAGE_SHIFT);
dma_4v_iommu_demap(dev, devhandle, dvma, iotsb_num, entry, npages);
iommu_tbl_range_free(tbl, dvma, npages, IOMMU_ERROR_CODE);
order = get_order(size);
if (order < 10)
free_pages((unsigned long)cpu, order);
}
static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t sz,
enum dma_data_direction direction,
unsigned long attrs)
{
struct iommu *iommu;
struct atu *atu;
struct iommu_map_table *tbl;
u64 mask;
unsigned long flags, npages, oaddr;
unsigned long i, base_paddr;
unsigned long prot;
dma_addr_t bus_addr, ret;
long entry;
iommu = dev->archdata.iommu;
atu = iommu->atu;
if (unlikely(direction == DMA_NONE))
goto bad;
oaddr = (unsigned long)(page_address(page) + offset);
npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
npages >>= IO_PAGE_SHIFT;
mask = *dev->dma_mask;
if (!iommu_use_atu(iommu, mask))
tbl = &iommu->tbl;
else
tbl = &atu->tbl;
entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL,
(unsigned long)(-1), 0);
if (unlikely(entry == IOMMU_ERROR_CODE))
goto bad;
bus_addr = (tbl->table_map_base + (entry << IO_PAGE_SHIFT));
ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
base_paddr = __pa(oaddr & IO_PAGE_MASK);
prot = HV_PCI_MAP_ATTR_READ;
if (direction != DMA_TO_DEVICE)
prot |= HV_PCI_MAP_ATTR_WRITE;
if (attrs & DMA_ATTR_WEAK_ORDERING)
prot |= HV_PCI_MAP_ATTR_RELAXED_ORDER;
local_irq_save(flags);
iommu_batch_start(dev, prot, entry);
for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) {
long err = iommu_batch_add(base_paddr, mask);
if (unlikely(err < 0L))
goto iommu_map_fail;
}
if (unlikely(iommu_batch_end(mask) < 0L))
goto iommu_map_fail;
local_irq_restore(flags);
return ret;
bad:
if (printk_ratelimit())
WARN_ON(1);
return DMA_MAPPING_ERROR;
iommu_map_fail:
local_irq_restore(flags);
iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE);
return DMA_MAPPING_ERROR;
}
static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
size_t sz, enum dma_data_direction direction,
unsigned long attrs)
{
struct pci_pbm_info *pbm;
struct iommu *iommu;
struct atu *atu;
struct iommu_map_table *tbl;
unsigned long npages;
unsigned long iotsb_num;
long entry;
u32 devhandle;
if (unlikely(direction == DMA_NONE)) {
if (printk_ratelimit())
WARN_ON(1);
return;
}
iommu = dev->archdata.iommu;
pbm = dev->archdata.host_controller;
atu = iommu->atu;
devhandle = pbm->devhandle;
npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
npages >>= IO_PAGE_SHIFT;
bus_addr &= IO_PAGE_MASK;
if (bus_addr <= DMA_BIT_MASK(32)) {
iotsb_num = 0; /* we don't care for legacy iommu */
tbl = &iommu->tbl;
} else {
iotsb_num = atu->iotsb->iotsb_num;
tbl = &atu->tbl;
}
entry = (bus_addr - tbl->table_map_base) >> IO_PAGE_SHIFT;
dma_4v_iommu_demap(dev, devhandle, bus_addr, iotsb_num, entry, npages);
iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE);
}
static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction,
unsigned long attrs)
{
struct scatterlist *s, *outs, *segstart;
unsigned long flags, handle, prot;
dma_addr_t dma_next = 0, dma_addr;
unsigned int max_seg_size;
unsigned long seg_boundary_size;
int outcount, incount, i;
struct iommu *iommu;
struct atu *atu;
struct iommu_map_table *tbl;
u64 mask;
unsigned long base_shift;
long err;
BUG_ON(direction == DMA_NONE);
iommu = dev->archdata.iommu;
if (nelems == 0 || !iommu)
return -EINVAL;
atu = iommu->atu;
prot = HV_PCI_MAP_ATTR_READ;
if (direction != DMA_TO_DEVICE)
prot |= HV_PCI_MAP_ATTR_WRITE;
if (attrs & DMA_ATTR_WEAK_ORDERING)
prot |= HV_PCI_MAP_ATTR_RELAXED_ORDER;
outs = s = segstart = &sglist[0];
outcount = 1;
incount = nelems;
handle = 0;
/* Init first segment length for backout at failure */
outs->dma_length = 0;
local_irq_save(flags);
iommu_batch_start(dev, prot, ~0UL);
max_seg_size = dma_get_max_seg_size(dev);
seg_boundary_size = dma_get_seg_boundary_nr_pages(dev, IO_PAGE_SHIFT);
mask = *dev->dma_mask;
if (!iommu_use_atu(iommu, mask))
tbl = &iommu->tbl;
else
tbl = &atu->tbl;
base_shift = tbl->table_map_base >> IO_PAGE_SHIFT;
for_each_sg(sglist, s, nelems, i) {
unsigned long paddr, npages, entry, out_entry = 0, slen;
slen = s->length;
/* Sanity check */
if (slen == 0) {
dma_next = 0;
continue;
}
/* Allocate iommu entries for that segment */
paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
entry = iommu_tbl_range_alloc(dev, tbl, npages,
&handle, (unsigned long)(-1), 0);
/* Handle failure */
if (unlikely(entry == IOMMU_ERROR_CODE)) {
pr_err_ratelimited("iommu_alloc failed, iommu %p paddr %lx npages %lx\n",
tbl, paddr, npages);
goto iommu_map_failed;
}
iommu_batch_new_entry(entry, mask);
/* Convert entry to a dma_addr_t */
dma_addr = tbl->table_map_base + (entry << IO_PAGE_SHIFT);
dma_addr |= (s->offset & ~IO_PAGE_MASK);
/* Insert into HW table */
paddr &= IO_PAGE_MASK;
while (npages--) {
err = iommu_batch_add(paddr, mask);
if (unlikely(err < 0L))
goto iommu_map_failed;
paddr += IO_PAGE_SIZE;
}
/* If we are in an open segment, try merging */
if (segstart != s) {
/* We cannot merge if:
* - allocated dma_addr isn't contiguous to previous allocation
*/
if ((dma_addr != dma_next) ||
(outs->dma_length + s->length > max_seg_size) ||
(is_span_boundary(out_entry, base_shift,
seg_boundary_size, outs, s))) {
/* Can't merge: create a new segment */
segstart = s;
outcount++;
outs = sg_next(outs);
} else {
outs->dma_length += s->length;
}
}
if (segstart == s) {
/* This is a new segment, fill entries */
outs->dma_address = dma_addr;
outs->dma_length = slen;
out_entry = entry;
}
/* Calculate next page pointer for contiguous check */
dma_next = dma_addr + slen;
}
err = iommu_batch_end(mask);
if (unlikely(err < 0L))
goto iommu_map_failed;
local_irq_restore(flags);
if (outcount < incount) {
outs = sg_next(outs);
outs->dma_length = 0;
}
return outcount;
iommu_map_failed:
for_each_sg(sglist, s, nelems, i) {
if (s->dma_length != 0) {
unsigned long vaddr, npages;
vaddr = s->dma_address & IO_PAGE_MASK;
npages = iommu_num_pages(s->dma_address, s->dma_length,
IO_PAGE_SIZE);
iommu_tbl_range_free(tbl, vaddr, npages,
IOMMU_ERROR_CODE);
/* XXX demap? XXX */
s->dma_length = 0;
}
if (s == outs)
break;
}
local_irq_restore(flags);
return -EINVAL;
}
static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction,
unsigned long attrs)
{
struct pci_pbm_info *pbm;
struct scatterlist *sg;
struct iommu *iommu;
struct atu *atu;
unsigned long flags, entry;
unsigned long iotsb_num;
u32 devhandle;
BUG_ON(direction == DMA_NONE);
iommu = dev->archdata.iommu;
pbm = dev->archdata.host_controller;
atu = iommu->atu;
devhandle = pbm->devhandle;
local_irq_save(flags);
sg = sglist;
while (nelems--) {
dma_addr_t dma_handle = sg->dma_address;
unsigned int len = sg->dma_length;
unsigned long npages;
struct iommu_map_table *tbl;
unsigned long shift = IO_PAGE_SHIFT;
if (!len)
break;
npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
if (dma_handle <= DMA_BIT_MASK(32)) {
iotsb_num = 0; /* we don't care for legacy iommu */
tbl = &iommu->tbl;
} else {
iotsb_num = atu->iotsb->iotsb_num;
tbl = &atu->tbl;
}
entry = ((dma_handle - tbl->table_map_base) >> shift);
dma_4v_iommu_demap(dev, devhandle, dma_handle, iotsb_num,
entry, npages);
iommu_tbl_range_free(tbl, dma_handle, npages,
IOMMU_ERROR_CODE);
sg = sg_next(sg);
}
local_irq_restore(flags);
}
static int dma_4v_supported(struct device *dev, u64 device_mask)
{
struct iommu *iommu = dev->archdata.iommu;
if (ali_sound_dma_hack(dev, device_mask))
return 1;
if (device_mask < iommu->dma_addr_mask)
return 0;
return 1;
}
static const struct dma_map_ops sun4v_dma_ops = {
.alloc = dma_4v_alloc_coherent,
.free = dma_4v_free_coherent,
.map_page = dma_4v_map_page,
.unmap_page = dma_4v_unmap_page,
.map_sg = dma_4v_map_sg,
.unmap_sg = dma_4v_unmap_sg,
.dma_supported = dma_4v_supported,
};
static void pci_sun4v_scan_bus(struct pci_pbm_info *pbm, struct device *parent)
{
struct property *prop;
struct device_node *dp;
dp = pbm->op->dev.of_node;
prop = of_find_property(dp, "66mhz-capable", NULL);
pbm->is_66mhz_capable = (prop != NULL);
pbm->pci_bus = pci_scan_one_pbm(pbm, parent);
/* XXX register error interrupt handlers XXX */
}
static unsigned long probe_existing_entries(struct pci_pbm_info *pbm,
struct iommu_map_table *iommu)
{
struct iommu_pool *pool;
unsigned long i, pool_nr, cnt = 0;
u32 devhandle;
devhandle = pbm->devhandle;
for (pool_nr = 0; pool_nr < iommu->nr_pools; pool_nr++) {
pool = &(iommu->pools[pool_nr]);
for (i = pool->start; i <= pool->end; i++) {
unsigned long ret, io_attrs, ra;
ret = pci_sun4v_iommu_getmap(devhandle,
HV_PCI_TSBID(0, i),
&io_attrs, &ra);
if (ret == HV_EOK) {
if (page_in_phys_avail(ra)) {
pci_sun4v_iommu_demap(devhandle,
HV_PCI_TSBID(0,
i), 1);
} else {
cnt++;
__set_bit(i, iommu->map);
}
}
}
}
return cnt;
}
static int pci_sun4v_atu_alloc_iotsb(struct pci_pbm_info *pbm)
{
struct atu *atu = pbm->iommu->atu;
struct atu_iotsb *iotsb;
void *table;
u64 table_size;
u64 iotsb_num;
unsigned long order;
unsigned long err;
iotsb = kzalloc(sizeof(*iotsb), GFP_KERNEL);
if (!iotsb) {
err = -ENOMEM;
goto out_err;
}
atu->iotsb = iotsb;
/* calculate size of IOTSB */
table_size = (atu->size / IO_PAGE_SIZE) * 8;
order = get_order(table_size);
table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
if (!table) {
err = -ENOMEM;
goto table_failed;
}
iotsb->table = table;
iotsb->ra = __pa(table);
iotsb->dvma_size = atu->size;
iotsb->dvma_base = atu->base;
iotsb->table_size = table_size;
iotsb->page_size = IO_PAGE_SIZE;
/* configure and register IOTSB with HV */
err = pci_sun4v_iotsb_conf(pbm->devhandle,
iotsb->ra,
iotsb->table_size,
iotsb->page_size,
iotsb->dvma_base,
&iotsb_num);
if (err) {
pr_err(PFX "pci_iotsb_conf failed error: %ld\n", err);
goto iotsb_conf_failed;
}
iotsb->iotsb_num = iotsb_num;
err = dma_4v_iotsb_bind(pbm->devhandle, iotsb_num, pbm->pci_bus);
if (err) {
pr_err(PFX "pci_iotsb_bind failed error: %ld\n", err);
goto iotsb_conf_failed;
}
return 0;
iotsb_conf_failed:
free_pages((unsigned long)table, order);
table_failed:
kfree(iotsb);
out_err:
return err;
}
static int pci_sun4v_atu_init(struct pci_pbm_info *pbm)
{
struct atu *atu = pbm->iommu->atu;
unsigned long err;
const u64 *ranges;
u64 map_size, num_iotte;
u64 dma_mask;
const u32 *page_size;
int len;
ranges = of_get_property(pbm->op->dev.of_node, "iommu-address-ranges",
&len);
if (!ranges) {
pr_err(PFX "No iommu-address-ranges\n");
return -EINVAL;
}
page_size = of_get_property(pbm->op->dev.of_node, "iommu-pagesizes",
NULL);
if (!page_size) {
pr_err(PFX "No iommu-pagesizes\n");
return -EINVAL;
}
/* There are 4 iommu-address-ranges supported. Each range is pair of
* {base, size}. The ranges[0] and ranges[1] are 32bit address space
* while ranges[2] and ranges[3] are 64bit space. We want to use 64bit
* address ranges to support 64bit addressing. Because 'size' for
* address ranges[2] and ranges[3] are same we can select either of
* ranges[2] or ranges[3] for mapping. However due to 'size' is too
* large for OS to allocate IOTSB we are using fix size 32G
* (ATU_64_SPACE_SIZE) which is more than enough for all PCIe devices
* to share.
*/
atu->ranges = (struct atu_ranges *)ranges;
atu->base = atu->ranges[3].base;
atu->size = ATU_64_SPACE_SIZE;
/* Create IOTSB */
err = pci_sun4v_atu_alloc_iotsb(pbm);
if (err) {
pr_err(PFX "Error creating ATU IOTSB\n");
return err;
}
/* Create ATU iommu map.
* One bit represents one iotte in IOTSB table.
*/
dma_mask = (roundup_pow_of_two(atu->size) - 1UL);
num_iotte = atu->size / IO_PAGE_SIZE;
map_size = num_iotte / 8;
atu->tbl.table_map_base = atu->base;
atu->dma_addr_mask = dma_mask;
atu->tbl.map = kzalloc(map_size, GFP_KERNEL);
if (!atu->tbl.map)
return -ENOMEM;
iommu_tbl_pool_init(&atu->tbl, num_iotte, IO_PAGE_SHIFT,
NULL, false /* no large_pool */,
0 /* default npools */,
false /* want span boundary checking */);
return 0;
}
static int pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
{
static const u32 vdma_default[] = { 0x80000000, 0x80000000 };
struct iommu *iommu = pbm->iommu;
unsigned long num_tsb_entries, sz;
u32 dma_mask, dma_offset;
const u32 *vdma;
vdma = of_get_property(pbm->op->dev.of_node, "virtual-dma", NULL);
if (!vdma)
vdma = vdma_default;
if ((vdma[0] | vdma[1]) & ~IO_PAGE_MASK) {
printk(KERN_ERR PFX "Strange virtual-dma[%08x:%08x].\n",
vdma[0], vdma[1]);
return -EINVAL;
}
dma_mask = (roundup_pow_of_two(vdma[1]) - 1UL);
num_tsb_entries = vdma[1] / IO_PAGE_SIZE;
dma_offset = vdma[0];
/* Setup initial software IOMMU state. */
spin_lock_init(&iommu->lock);
iommu->ctx_lowest_free = 1;
iommu->tbl.table_map_base = dma_offset;
iommu->dma_addr_mask = dma_mask;
/* Allocate and initialize the free area map. */
sz = (num_tsb_entries + 7) / 8;
sz = (sz + 7UL) & ~7UL;
iommu->tbl.map = kzalloc(sz, GFP_KERNEL);
if (!iommu->tbl.map) {
printk(KERN_ERR PFX "Error, kmalloc(arena.map) failed.\n");
return -ENOMEM;
}
iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT,
NULL, false /* no large_pool */,
0 /* default npools */,
false /* want span boundary checking */);
sz = probe_existing_entries(pbm, &iommu->tbl);
if (sz)
printk("%s: Imported %lu TSB entries from OBP\n",
pbm->name, sz);
return 0;
}
#ifdef CONFIG_PCI_MSI
struct pci_sun4v_msiq_entry {
u64 version_type;
#define MSIQ_VERSION_MASK 0xffffffff00000000UL
#define MSIQ_VERSION_SHIFT 32
#define MSIQ_TYPE_MASK 0x00000000000000ffUL
#define MSIQ_TYPE_SHIFT 0
#define MSIQ_TYPE_NONE 0x00
#define MSIQ_TYPE_MSG 0x01
#define MSIQ_TYPE_MSI32 0x02
#define MSIQ_TYPE_MSI64 0x03
#define MSIQ_TYPE_INTX 0x08
#define MSIQ_TYPE_NONE2 0xff
u64 intx_sysino;
u64 reserved1;
u64 stick;
u64 req_id; /* bus/device/func */
#define MSIQ_REQID_BUS_MASK 0xff00UL
#define MSIQ_REQID_BUS_SHIFT 8
#define MSIQ_REQID_DEVICE_MASK 0x00f8UL
#define MSIQ_REQID_DEVICE_SHIFT 3
#define MSIQ_REQID_FUNC_MASK 0x0007UL
#define MSIQ_REQID_FUNC_SHIFT 0
u64 msi_address;
/* The format of this value is message type dependent.
* For MSI bits 15:0 are the data from the MSI packet.
* For MSI-X bits 31:0 are the data from the MSI packet.
* For MSG, the message code and message routing code where:
* bits 39:32 is the bus/device/fn of the msg target-id
* bits 18:16 is the message routing code
* bits 7:0 is the message code
* For INTx the low order 2-bits are:
* 00 - INTA
* 01 - INTB
* 10 - INTC
* 11 - INTD
*/
u64 msi_data;
u64 reserved2;
};
static int pci_sun4v_get_head(struct pci_pbm_info *pbm, unsigned long msiqid,
unsigned long *head)
{
unsigned long err, limit;
err = pci_sun4v_msiq_gethead(pbm->devhandle, msiqid, head);
if (unlikely(err))
return -ENXIO;
limit = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
if (unlikely(*head >= limit))
return -EFBIG;
return 0;
}
static int pci_sun4v_dequeue_msi(struct pci_pbm_info *pbm,
unsigned long msiqid, unsigned long *head,
unsigned long *msi)
{
struct pci_sun4v_msiq_entry *ep;
unsigned long err, type;
/* Note: void pointer arithmetic, 'head' is a byte offset */
ep = (pbm->msi_queues + ((msiqid - pbm->msiq_first) *
(pbm->msiq_ent_count *
sizeof(struct pci_sun4v_msiq_entry))) +
*head);
if ((ep->version_type & MSIQ_TYPE_MASK) == 0)
return 0;
type = (ep->version_type & MSIQ_TYPE_MASK) >> MSIQ_TYPE_SHIFT;
if (unlikely(type != MSIQ_TYPE_MSI32 &&
type != MSIQ_TYPE_MSI64))
return -EINVAL;
*msi = ep->msi_data;
err = pci_sun4v_msi_setstate(pbm->devhandle,
ep->msi_data /* msi_num */,
HV_MSISTATE_IDLE);
if (unlikely(err))
return -ENXIO;
/* Clear the entry. */
ep->version_type &= ~MSIQ_TYPE_MASK;
(*head) += sizeof(struct pci_sun4v_msiq_entry);
if (*head >=
(pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry)))
*head = 0;
return 1;
}
static int pci_sun4v_set_head(struct pci_pbm_info *pbm, unsigned long msiqid,
unsigned long head)
{
unsigned long err;
err = pci_sun4v_msiq_sethead(pbm->devhandle, msiqid, head);
if (unlikely(err))
return -EINVAL;
return 0;
}
static int pci_sun4v_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid,
unsigned long msi, int is_msi64)
{
if (pci_sun4v_msi_setmsiq(pbm->devhandle, msi, msiqid,
(is_msi64 ?
HV_MSITYPE_MSI64 : HV_MSITYPE_MSI32)))
return -ENXIO;
if (pci_sun4v_msi_setstate(pbm->devhandle, msi, HV_MSISTATE_IDLE))
return -ENXIO;
if (pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_VALID))
return -ENXIO;
return 0;
}
static int pci_sun4v_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi)
{
unsigned long err, msiqid;
err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi, &msiqid);
if (err)
return -ENXIO;
pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_INVALID);
return 0;
}
static int pci_sun4v_msiq_alloc(struct pci_pbm_info *pbm)
{
unsigned long q_size, alloc_size, pages, order;
int i;
q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
alloc_size = (pbm->msiq_num * q_size);
order = get_order(alloc_size);
pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order);
if (pages == 0UL) {
printk(KERN_ERR "MSI: Cannot allocate MSI queues (o=%lu).\n",
order);
return -ENOMEM;
}
memset((char *)pages, 0, PAGE_SIZE << order);
pbm->msi_queues = (void *) pages;
for (i = 0; i < pbm->msiq_num; i++) {
unsigned long err, base = __pa(pages + (i * q_size));
unsigned long ret1, ret2;
err = pci_sun4v_msiq_conf(pbm->devhandle,
pbm->msiq_first + i,
base, pbm->msiq_ent_count);
if (err) {
printk(KERN_ERR "MSI: msiq register fails (err=%lu)\n",
err);
goto h_error;
}
err = pci_sun4v_msiq_info(pbm->devhandle,
pbm->msiq_first + i,
&ret1, &ret2);
if (err) {
printk(KERN_ERR "MSI: Cannot read msiq (err=%lu)\n",
err);
goto h_error;
}
if (ret1 != base || ret2 != pbm->msiq_ent_count) {
printk(KERN_ERR "MSI: Bogus qconf "
"expected[%lx:%x] got[%lx:%lx]\n",
base, pbm->msiq_ent_count,
ret1, ret2);
goto h_error;
}
}
return 0;
h_error:
free_pages(pages, order);
return -EINVAL;
}
static void pci_sun4v_msiq_free(struct pci_pbm_info *pbm)
{
unsigned long q_size, alloc_size, pages, order;
int i;
for (i = 0; i < pbm->msiq_num; i++) {
unsigned long msiqid = pbm->msiq_first + i;
(void) pci_sun4v_msiq_conf(pbm->devhandle, msiqid, 0UL, 0);
}
q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
alloc_size = (pbm->msiq_num * q_size);
order = get_order(alloc_size);
pages = (unsigned long) pbm->msi_queues;
free_pages(pages, order);
pbm->msi_queues = NULL;
}
static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm,
unsigned long msiqid,
unsigned long devino)
{
unsigned int irq = sun4v_build_irq(pbm->devhandle, devino);
if (!irq)
return -ENOMEM;
if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID))
return -EINVAL;
if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
return -EINVAL;
return irq;
}
static const struct sparc64_msiq_ops pci_sun4v_msiq_ops = {
.get_head = pci_sun4v_get_head,
.dequeue_msi = pci_sun4v_dequeue_msi,
.set_head = pci_sun4v_set_head,
.msi_setup = pci_sun4v_msi_setup,
.msi_teardown = pci_sun4v_msi_teardown,
.msiq_alloc = pci_sun4v_msiq_alloc,
.msiq_free = pci_sun4v_msiq_free,
.msiq_build_irq = pci_sun4v_msiq_build_irq,
};
static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
{
sparc64_pbm_msi_init(pbm, &pci_sun4v_msiq_ops);
}
#else /* CONFIG_PCI_MSI */
static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
{
}
#endif /* !(CONFIG_PCI_MSI) */
static int pci_sun4v_pbm_init(struct pci_pbm_info *pbm,
struct platform_device *op, u32 devhandle)
{
struct device_node *dp = op->dev.of_node;
int err;
pbm->numa_node = of_node_to_nid(dp);
pbm->pci_ops = &sun4v_pci_ops;
pbm->config_space_reg_bits = 12;
pbm->index = pci_num_pbms++;
pbm->op = op;
pbm->devhandle = devhandle;
pbm->name = dp->full_name;
printk("%s: SUN4V PCI Bus Module\n", pbm->name);
printk("%s: On NUMA node %d\n", pbm->name, pbm->numa_node);
pci_determine_mem_io_space(pbm);
pci_get_pbm_props(pbm);
err = pci_sun4v_iommu_init(pbm);
if (err)
return err;
pci_sun4v_msi_init(pbm);
pci_sun4v_scan_bus(pbm, &op->dev);
/* if atu_init fails its not complete failure.
* we can still continue using legacy iommu.
*/
if (pbm->iommu->atu) {
err = pci_sun4v_atu_init(pbm);
if (err) {
kfree(pbm->iommu->atu);
pbm->iommu->atu = NULL;
pr_err(PFX "ATU init failed, err=%d\n", err);
}
}
pbm->next = pci_pbm_root;
pci_pbm_root = pbm;
return 0;
}
static int pci_sun4v_probe(struct platform_device *op)
{
const struct linux_prom64_registers *regs;
static int hvapi_negotiated = 0;
struct pci_pbm_info *pbm;
struct device_node *dp;
struct iommu *iommu;
struct atu *atu;
u32 devhandle;
int i, err = -ENODEV;
static bool hv_atu = true;
dp = op->dev.of_node;
if (!hvapi_negotiated++) {
for (i = 0; i < ARRAY_SIZE(vpci_versions); i++) {
vpci_major = vpci_versions[i].major;
vpci_minor = vpci_versions[i].minor;
err = sun4v_hvapi_register(HV_GRP_PCI, vpci_major,
&vpci_minor);
if (!err)
break;
}
if (err) {
pr_err(PFX "Could not register hvapi, err=%d\n", err);
return err;
}
pr_info(PFX "Registered hvapi major[%lu] minor[%lu]\n",
vpci_major, vpci_minor);
err = sun4v_hvapi_register(HV_GRP_ATU, vatu_major, &vatu_minor);
if (err) {
/* don't return an error if we fail to register the
* ATU group, but ATU hcalls won't be available.
*/
hv_atu = false;
} else {
pr_info(PFX "Registered hvapi ATU major[%lu] minor[%lu]\n",
vatu_major, vatu_minor);
}
dma_ops = &sun4v_dma_ops;
}
regs = of_get_property(dp, "reg", NULL);
err = -ENODEV;
if (!regs) {
printk(KERN_ERR PFX "Could not find config registers\n");
goto out_err;
}
devhandle = (regs->phys_addr >> 32UL) & 0x0fffffff;
err = -ENOMEM;
if (!iommu_batch_initialized) {
for_each_possible_cpu(i) {
unsigned long page = get_zeroed_page(GFP_KERNEL);
if (!page)
goto out_err;
per_cpu(iommu_batch, i).pglist = (u64 *) page;
}
iommu_batch_initialized = 1;
}
pbm = kzalloc(sizeof(*pbm), GFP_KERNEL);
if (!pbm) {
printk(KERN_ERR PFX "Could not allocate pci_pbm_info\n");
goto out_err;
}
iommu = kzalloc(sizeof(struct iommu), GFP_KERNEL);
if (!iommu) {
printk(KERN_ERR PFX "Could not allocate pbm iommu\n");
goto out_free_controller;
}
pbm->iommu = iommu;
iommu->atu = NULL;
if (hv_atu) {
atu = kzalloc(sizeof(*atu), GFP_KERNEL);
if (!atu)
pr_err(PFX "Could not allocate atu\n");
else
iommu->atu = atu;
}
err = pci_sun4v_pbm_init(pbm, op, devhandle);
if (err)
goto out_free_iommu;
dev_set_drvdata(&op->dev, pbm);
return 0;
out_free_iommu:
kfree(iommu->atu);
kfree(pbm->iommu);
out_free_controller:
kfree(pbm);
out_err:
return err;
}
static const struct of_device_id pci_sun4v_match[] = {
{
.name = "pci",
.compatible = "SUNW,sun4v-pci",
},
{},
};
static struct platform_driver pci_sun4v_driver = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = pci_sun4v_match,
},
.probe = pci_sun4v_probe,
};
static int __init pci_sun4v_init(void)
{
return platform_driver_register(&pci_sun4v_driver);
}
subsys_initcall(pci_sun4v_init);
| linux-master | arch/sparc/kernel/pci_sun4v.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/thread_info.h>
#include <linux/uaccess.h>
#include <linux/errno.h>
#include <asm/sigcontext.h>
#include <asm/fpumacro.h>
#include <asm/ptrace.h>
#include <asm/switch_to.h>
#include "sigutil.h"
int save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
{
unsigned long *fpregs = current_thread_info()->fpregs;
unsigned long fprs;
int err = 0;
fprs = current_thread_info()->fpsaved[0];
if (fprs & FPRS_DL)
err |= copy_to_user(&fpu->si_float_regs[0], fpregs,
(sizeof(unsigned int) * 32));
if (fprs & FPRS_DU)
err |= copy_to_user(&fpu->si_float_regs[32], fpregs+16,
(sizeof(unsigned int) * 32));
err |= __put_user(current_thread_info()->xfsr[0], &fpu->si_fsr);
err |= __put_user(current_thread_info()->gsr[0], &fpu->si_gsr);
err |= __put_user(fprs, &fpu->si_fprs);
return err;
}
int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
{
unsigned long *fpregs = current_thread_info()->fpregs;
unsigned long fprs;
int err;
if (((unsigned long) fpu) & 7)
return -EFAULT;
err = get_user(fprs, &fpu->si_fprs);
fprs_write(0);
regs->tstate &= ~TSTATE_PEF;
if (fprs & FPRS_DL)
err |= copy_from_user(fpregs, &fpu->si_float_regs[0],
(sizeof(unsigned int) * 32));
if (fprs & FPRS_DU)
err |= copy_from_user(fpregs+16, &fpu->si_float_regs[32],
(sizeof(unsigned int) * 32));
err |= __get_user(current_thread_info()->xfsr[0], &fpu->si_fsr);
err |= __get_user(current_thread_info()->gsr[0], &fpu->si_gsr);
current_thread_info()->fpsaved[0] |= fprs;
return err;
}
int save_rwin_state(int wsaved, __siginfo_rwin_t __user *rwin)
{
int i, err = __put_user(wsaved, &rwin->wsaved);
for (i = 0; i < wsaved; i++) {
struct reg_window *rp = ¤t_thread_info()->reg_window[i];
unsigned long fp = current_thread_info()->rwbuf_stkptrs[i];
err |= copy_to_user(&rwin->reg_window[i], rp,
sizeof(struct reg_window));
err |= __put_user(fp, &rwin->rwbuf_stkptrs[i]);
}
return err;
}
int restore_rwin_state(__siginfo_rwin_t __user *rp)
{
struct thread_info *t = current_thread_info();
int i, wsaved, err;
if (((unsigned long) rp) & 7)
return -EFAULT;
get_user(wsaved, &rp->wsaved);
if (wsaved > NSWINS)
return -EFAULT;
err = 0;
for (i = 0; i < wsaved; i++) {
err |= copy_from_user(&t->reg_window[i],
&rp->reg_window[i],
sizeof(struct reg_window));
err |= __get_user(t->rwbuf_stkptrs[i],
&rp->rwbuf_stkptrs[i]);
}
if (err)
return err;
set_thread_wsaved(wsaved);
synchronize_user_stack();
if (get_thread_wsaved())
return -EFAULT;
return 0;
}
| linux-master | arch/sparc/kernel/sigutil_64.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/termios_internal.h>
/*
* c_cc characters in the termio structure. Oh, how I love being
* backwardly compatible. Notice that character 4 and 5 are
* interpreted differently depending on whether ICANON is set in
* c_lflag. If it's set, they are used as _VEOF and _VEOL, otherwise
* as _VMIN and V_TIME. This is for compatibility with OSF/1 (which
* is compatible with sysV)...
*/
#define _VMIN 4
#define _VTIME 5
int kernel_termios_to_user_termio(struct termio __user *termio,
struct ktermios *termios)
{
struct termio v;
memset(&v, 0, sizeof(struct termio));
v.c_iflag = termios->c_iflag;
v.c_oflag = termios->c_oflag;
v.c_cflag = termios->c_cflag;
v.c_lflag = termios->c_lflag;
v.c_line = termios->c_line;
memcpy(v.c_cc, termios->c_cc, NCC);
if (!(v.c_lflag & ICANON)) {
v.c_cc[_VMIN] = termios->c_cc[VMIN];
v.c_cc[_VTIME] = termios->c_cc[VTIME];
}
return copy_to_user(termio, &v, sizeof(struct termio));
}
int user_termios_to_kernel_termios(struct ktermios *k,
struct termios2 __user *u)
{
int err;
err = get_user(k->c_iflag, &u->c_iflag);
err |= get_user(k->c_oflag, &u->c_oflag);
err |= get_user(k->c_cflag, &u->c_cflag);
err |= get_user(k->c_lflag, &u->c_lflag);
err |= get_user(k->c_line, &u->c_line);
err |= copy_from_user(k->c_cc, u->c_cc, NCCS);
if (k->c_lflag & ICANON) {
err |= get_user(k->c_cc[VEOF], &u->c_cc[VEOF]);
err |= get_user(k->c_cc[VEOL], &u->c_cc[VEOL]);
} else {
err |= get_user(k->c_cc[VMIN], &u->c_cc[_VMIN]);
err |= get_user(k->c_cc[VTIME], &u->c_cc[_VTIME]);
}
err |= get_user(k->c_ispeed, &u->c_ispeed);
err |= get_user(k->c_ospeed, &u->c_ospeed);
return err;
}
int kernel_termios_to_user_termios(struct termios2 __user *u,
struct ktermios *k)
{
int err;
err = put_user(k->c_iflag, &u->c_iflag);
err |= put_user(k->c_oflag, &u->c_oflag);
err |= put_user(k->c_cflag, &u->c_cflag);
err |= put_user(k->c_lflag, &u->c_lflag);
err |= put_user(k->c_line, &u->c_line);
err |= copy_to_user(u->c_cc, k->c_cc, NCCS);
if (!(k->c_lflag & ICANON)) {
err |= put_user(k->c_cc[VMIN], &u->c_cc[_VMIN]);
err |= put_user(k->c_cc[VTIME], &u->c_cc[_VTIME]);
} else {
err |= put_user(k->c_cc[VEOF], &u->c_cc[VEOF]);
err |= put_user(k->c_cc[VEOL], &u->c_cc[VEOL]);
}
err |= put_user(k->c_ispeed, &u->c_ispeed);
err |= put_user(k->c_ospeed, &u->c_ospeed);
return err;
}
int user_termios_to_kernel_termios_1(struct ktermios *k,
struct termios __user *u)
{
int err;
err = get_user(k->c_iflag, &u->c_iflag);
err |= get_user(k->c_oflag, &u->c_oflag);
err |= get_user(k->c_cflag, &u->c_cflag);
err |= get_user(k->c_lflag, &u->c_lflag);
err |= get_user(k->c_line, &u->c_line);
err |= copy_from_user(k->c_cc, u->c_cc, NCCS);
if (k->c_lflag & ICANON) {
err |= get_user(k->c_cc[VEOF], &u->c_cc[VEOF]);
err |= get_user(k->c_cc[VEOL], &u->c_cc[VEOL]);
} else {
err |= get_user(k->c_cc[VMIN], &u->c_cc[_VMIN]);
err |= get_user(k->c_cc[VTIME], &u->c_cc[_VTIME]);
}
return err;
}
int kernel_termios_to_user_termios_1(struct termios __user *u,
struct ktermios *k)
{
int err;
err = put_user(k->c_iflag, &u->c_iflag);
err |= put_user(k->c_oflag, &u->c_oflag);
err |= put_user(k->c_cflag, &u->c_cflag);
err |= put_user(k->c_lflag, &u->c_lflag);
err |= put_user(k->c_line, &u->c_line);
err |= copy_to_user(u->c_cc, k->c_cc, NCCS);
if (!(k->c_lflag & ICANON)) {
err |= put_user(k->c_cc[VMIN], &u->c_cc[_VMIN]);
err |= put_user(k->c_cc[VTIME], &u->c_cc[_VTIME]);
} else {
err |= put_user(k->c_cc[VEOF], &u->c_cc[VEOF]);
err |= put_user(k->c_cc[VEOL], &u->c_cc[VEOL]);
}
return err;
}
| linux-master | arch/sparc/kernel/termios.c |
/*
* Copyright (C) 2001 Andrea Arcangeli <[email protected]> SuSE
* Copyright 2003 Andi Kleen, SuSE Labs.
*
* Thanks to [email protected] for some useful hint.
* Special thanks to Ingo Molnar for his early experience with
* a different vsyscall implementation for Linux/IA32 and for the name.
*/
#include <linux/time.h>
#include <linux/timekeeper_internal.h>
#include <asm/vvar.h>
void update_vsyscall_tz(void)
{
if (unlikely(vvar_data == NULL))
return;
vvar_data->tz_minuteswest = sys_tz.tz_minuteswest;
vvar_data->tz_dsttime = sys_tz.tz_dsttime;
}
void update_vsyscall(struct timekeeper *tk)
{
struct vvar_data *vdata = vvar_data;
if (unlikely(vdata == NULL))
return;
vvar_write_begin(vdata);
vdata->vclock_mode = tk->tkr_mono.clock->archdata.vclock_mode;
vdata->clock.cycle_last = tk->tkr_mono.cycle_last;
vdata->clock.mask = tk->tkr_mono.mask;
vdata->clock.mult = tk->tkr_mono.mult;
vdata->clock.shift = tk->tkr_mono.shift;
vdata->wall_time_sec = tk->xtime_sec;
vdata->wall_time_snsec = tk->tkr_mono.xtime_nsec;
vdata->monotonic_time_sec = tk->xtime_sec +
tk->wall_to_monotonic.tv_sec;
vdata->monotonic_time_snsec = tk->tkr_mono.xtime_nsec +
(tk->wall_to_monotonic.tv_nsec <<
tk->tkr_mono.shift);
while (vdata->monotonic_time_snsec >=
(((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) {
vdata->monotonic_time_snsec -=
((u64)NSEC_PER_SEC) << tk->tkr_mono.shift;
vdata->monotonic_time_sec++;
}
vdata->wall_time_coarse_sec = tk->xtime_sec;
vdata->wall_time_coarse_nsec =
(long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
vdata->monotonic_time_coarse_sec =
vdata->wall_time_coarse_sec + tk->wall_to_monotonic.tv_sec;
vdata->monotonic_time_coarse_nsec =
vdata->wall_time_coarse_nsec + tk->wall_to_monotonic.tv_nsec;
while (vdata->monotonic_time_coarse_nsec >= NSEC_PER_SEC) {
vdata->monotonic_time_coarse_nsec -= NSEC_PER_SEC;
vdata->monotonic_time_coarse_sec++;
}
vvar_write_end(vdata);
}
| linux-master | arch/sparc/kernel/vdso.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* prom_common.c: OF device tree support common code.
*
* Paul Mackerras August 1996.
* Copyright (C) 1996-2005 Paul Mackerras.
*
* Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
* {engebret|bergner}@us.ibm.com
*
* Adapted for sparc by David S. Miller [email protected]
*/
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/errno.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/of_pdt.h>
#include <asm/prom.h>
#include <asm/oplib.h>
#include "prom.h"
struct device_node *of_console_device;
EXPORT_SYMBOL(of_console_device);
char *of_console_path;
EXPORT_SYMBOL(of_console_path);
char *of_console_options;
EXPORT_SYMBOL(of_console_options);
int of_getintprop_default(struct device_node *np, const char *name, int def)
{
struct property *prop;
int len;
prop = of_find_property(np, name, &len);
if (!prop || len != 4)
return def;
return *(int *) prop->value;
}
EXPORT_SYMBOL(of_getintprop_default);
DEFINE_MUTEX(of_set_property_mutex);
EXPORT_SYMBOL(of_set_property_mutex);
int of_set_property(struct device_node *dp, const char *name, void *val, int len)
{
struct property **prevp;
unsigned long flags;
void *new_val;
int err;
new_val = kmemdup(val, len, GFP_KERNEL);
if (!new_val)
return -ENOMEM;
err = -ENODEV;
mutex_lock(&of_set_property_mutex);
raw_spin_lock_irqsave(&devtree_lock, flags);
prevp = &dp->properties;
while (*prevp) {
struct property *prop = *prevp;
if (!strcasecmp(prop->name, name)) {
void *old_val = prop->value;
int ret;
ret = prom_setprop(dp->phandle, name, val, len);
err = -EINVAL;
if (ret >= 0) {
prop->value = new_val;
prop->length = len;
if (OF_IS_DYNAMIC(prop))
kfree(old_val);
OF_MARK_DYNAMIC(prop);
err = 0;
}
break;
}
prevp = &(*prevp)->next;
}
raw_spin_unlock_irqrestore(&devtree_lock, flags);
mutex_unlock(&of_set_property_mutex);
/* XXX Upate procfs if necessary... */
return err;
}
EXPORT_SYMBOL(of_set_property);
int of_find_in_proplist(const char *list, const char *match, int len)
{
while (len > 0) {
int l;
if (!strcmp(list, match))
return 1;
l = strlen(list) + 1;
list += l;
len -= l;
}
return 0;
}
EXPORT_SYMBOL(of_find_in_proplist);
/*
* SPARC32 and SPARC64's prom_nextprop() do things differently
* here, despite sharing the same interface. SPARC32 doesn't fill in 'buf',
* returning NULL on an error. SPARC64 fills in 'buf', but sets it to an
* empty string upon error.
*/
static int __init handle_nextprop_quirks(char *buf, const char *name)
{
if (!name || strlen(name) == 0)
return -1;
#ifdef CONFIG_SPARC32
strcpy(buf, name);
#endif
return 0;
}
static int __init prom_common_nextprop(phandle node, char *prev, char *buf)
{
const char *name;
buf[0] = '\0';
name = prom_nextprop(node, prev, buf);
return handle_nextprop_quirks(buf, name);
}
unsigned int prom_early_allocated __initdata;
static struct of_pdt_ops prom_sparc_ops __initdata = {
.nextprop = prom_common_nextprop,
.getproplen = prom_getproplen,
.getproperty = prom_getproperty,
.getchild = prom_getchild,
.getsibling = prom_getsibling,
};
void __init prom_build_devicetree(void)
{
of_pdt_build_devicetree(prom_root_node, &prom_sparc_ops);
of_console_init();
pr_info("PROM: Built device tree with %u bytes of memory.\n",
prom_early_allocated);
}
| linux-master | arch/sparc/kernel/prom_common.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Procedures for creating, accessing and interpreting the device tree.
*
* Paul Mackerras August 1996.
* Copyright (C) 1996-2005 Paul Mackerras.
*
* Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
* {engebret|bergner}@us.ibm.com
*
* Adapted for sparc32 by David S. Miller [email protected]
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/memblock.h>
#include <asm/prom.h>
#include <asm/oplib.h>
#include <asm/leon.h>
#include <asm/leon_amba.h>
#include "prom.h"
void * __init prom_early_alloc(unsigned long size)
{
void *ret;
ret = memblock_alloc(size, SMP_CACHE_BYTES);
if (!ret)
panic("%s: Failed to allocate %lu bytes\n", __func__, size);
prom_early_allocated += size;
return ret;
}
/* The following routines deal with the black magic of fully naming a
* node.
*
* Certain well known named nodes are just the simple name string.
*
* Actual devices have an address specifier appended to the base name
* string, like this "foo@addr". The "addr" can be in any number of
* formats, and the platform plus the type of the node determine the
* format and how it is constructed.
*
* For children of the ROOT node, the naming convention is fixed and
* determined by whether this is a sun4u or sun4v system.
*
* For children of other nodes, it is bus type specific. So
* we walk up the tree until we discover a "device_type" property
* we recognize and we go from there.
*/
static void __init sparc32_path_component(struct device_node *dp, char *tmp_buf)
{
const char *name = of_get_property(dp, "name", NULL);
struct linux_prom_registers *regs;
struct property *rprop;
rprop = of_find_property(dp, "reg", NULL);
if (!rprop)
return;
regs = rprop->value;
sprintf(tmp_buf, "%s@%x,%x",
name,
regs->which_io, regs->phys_addr);
}
/* "name@slot,offset" */
static void __init sbus_path_component(struct device_node *dp, char *tmp_buf)
{
const char *name = of_get_property(dp, "name", NULL);
struct linux_prom_registers *regs;
struct property *prop;
prop = of_find_property(dp, "reg", NULL);
if (!prop)
return;
regs = prop->value;
sprintf(tmp_buf, "%s@%x,%x",
name,
regs->which_io,
regs->phys_addr);
}
/* "name@devnum[,func]" */
static void __init pci_path_component(struct device_node *dp, char *tmp_buf)
{
const char *name = of_get_property(dp, "name", NULL);
struct linux_prom_pci_registers *regs;
struct property *prop;
unsigned int devfn;
prop = of_find_property(dp, "reg", NULL);
if (!prop)
return;
regs = prop->value;
devfn = (regs->phys_hi >> 8) & 0xff;
if (devfn & 0x07) {
sprintf(tmp_buf, "%s@%x,%x",
name,
devfn >> 3,
devfn & 0x07);
} else {
sprintf(tmp_buf, "%s@%x",
name,
devfn >> 3);
}
}
/* "name@addrhi,addrlo" */
static void __init ebus_path_component(struct device_node *dp, char *tmp_buf)
{
const char *name = of_get_property(dp, "name", NULL);
struct linux_prom_registers *regs;
struct property *prop;
prop = of_find_property(dp, "reg", NULL);
if (!prop)
return;
regs = prop->value;
sprintf(tmp_buf, "%s@%x,%x",
name,
regs->which_io, regs->phys_addr);
}
/* "name@irq,addrlo" */
static void __init ambapp_path_component(struct device_node *dp, char *tmp_buf)
{
const char *name = of_get_property(dp, "name", NULL);
struct amba_prom_registers *regs;
unsigned int *intr;
unsigned int reg0;
struct property *prop;
int interrupt = 0;
/* In order to get a unique ID in the device tree (multiple AMBA devices
* may have the same name) the node number is printed
*/
prop = of_find_property(dp, "reg", NULL);
if (!prop) {
reg0 = (unsigned int)dp->phandle;
} else {
regs = prop->value;
reg0 = regs->phys_addr;
}
/* Not all cores have Interrupt */
prop = of_find_property(dp, "interrupts", NULL);
if (!prop)
intr = &interrupt; /* IRQ0 does not exist */
else
intr = prop->value;
sprintf(tmp_buf, "%s@%x,%x", name, *intr, reg0);
}
static void __init __build_path_component(struct device_node *dp, char *tmp_buf)
{
struct device_node *parent = dp->parent;
if (parent != NULL) {
if (of_node_is_type(parent, "pci") ||
of_node_is_type(parent, "pciex"))
return pci_path_component(dp, tmp_buf);
if (of_node_is_type(parent, "sbus"))
return sbus_path_component(dp, tmp_buf);
if (of_node_is_type(parent, "ebus"))
return ebus_path_component(dp, tmp_buf);
if (of_node_is_type(parent, "ambapp"))
return ambapp_path_component(dp, tmp_buf);
/* "isa" is handled with platform naming */
}
/* Use platform naming convention. */
return sparc32_path_component(dp, tmp_buf);
}
char * __init build_path_component(struct device_node *dp)
{
const char *name = of_get_property(dp, "name", NULL);
char tmp_buf[64], *n;
tmp_buf[0] = '\0';
__build_path_component(dp, tmp_buf);
if (tmp_buf[0] == '\0')
strcpy(tmp_buf, name);
n = prom_early_alloc(strlen(tmp_buf) + 1);
strcpy(n, tmp_buf);
return n;
}
extern void restore_current(void);
void __init of_console_init(void)
{
char *msg = "OF stdout device is: %s\n";
struct device_node *dp;
unsigned long flags;
const char *type;
phandle node;
int skip, tmp, fd;
of_console_path = prom_early_alloc(256);
switch (prom_vers) {
case PROM_V0:
skip = 0;
switch (*romvec->pv_stdout) {
case PROMDEV_SCREEN:
type = "display";
break;
case PROMDEV_TTYB:
skip = 1;
fallthrough;
case PROMDEV_TTYA:
type = "serial";
break;
default:
prom_printf("Invalid PROM_V0 stdout value %u\n",
*romvec->pv_stdout);
prom_halt();
}
tmp = skip;
for_each_node_by_type(dp, type) {
if (!tmp--)
break;
}
if (!dp) {
prom_printf("Cannot find PROM_V0 console node.\n");
prom_halt();
}
of_console_device = dp;
sprintf(of_console_path, "%pOF", dp);
if (!strcmp(type, "serial")) {
strcat(of_console_path,
(skip ? ":b" : ":a"));
}
break;
default:
case PROM_V2:
case PROM_V3:
fd = *romvec->pv_v2bootargs.fd_stdout;
spin_lock_irqsave(&prom_lock, flags);
node = (*romvec->pv_v2devops.v2_inst2pkg)(fd);
restore_current();
spin_unlock_irqrestore(&prom_lock, flags);
if (!node) {
prom_printf("Cannot resolve stdout node from "
"instance %08x.\n", fd);
prom_halt();
}
dp = of_find_node_by_phandle(node);
if (!of_node_is_type(dp, "display") &&
!of_node_is_type(dp, "serial")) {
prom_printf("Console device_type is neither display "
"nor serial.\n");
prom_halt();
}
of_console_device = dp;
if (prom_vers == PROM_V2) {
sprintf(of_console_path, "%pOF", dp);
switch (*romvec->pv_stdout) {
case PROMDEV_TTYA:
strcat(of_console_path, ":a");
break;
case PROMDEV_TTYB:
strcat(of_console_path, ":b");
break;
}
} else {
const char *path;
dp = of_find_node_by_path("/");
path = of_get_property(dp, "stdout-path", NULL);
if (!path) {
prom_printf("No stdout-path in root node.\n");
prom_halt();
}
strcpy(of_console_path, path);
}
break;
}
of_console_options = strrchr(of_console_path, ':');
if (of_console_options) {
of_console_options++;
if (*of_console_options == '\0')
of_console_options = NULL;
}
printk(msg, of_console_path);
}
void __init of_fill_in_cpu_data(void)
{
}
void __init irq_trans_init(struct device_node *dp)
{
}
| linux-master | arch/sparc/kernel/prom_32.c |
// SPDX-License-Identifier: GPL-2.0
/* ldc.c: Logical Domain Channel link-layer protocol driver.
*
* Copyright (C) 2007, 2008 David S. Miller <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/scatterlist.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/init.h>
#include <linux/bitmap.h>
#include <asm/iommu-common.h>
#include <asm/hypervisor.h>
#include <asm/iommu.h>
#include <asm/page.h>
#include <asm/ldc.h>
#include <asm/mdesc.h>
#define DRV_MODULE_NAME "ldc"
#define PFX DRV_MODULE_NAME ": "
#define DRV_MODULE_VERSION "1.1"
#define DRV_MODULE_RELDATE "July 22, 2008"
#define COOKIE_PGSZ_CODE 0xf000000000000000ULL
#define COOKIE_PGSZ_CODE_SHIFT 60ULL
static char version[] =
DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
/* Packet header layout for unreliable and reliable mode frames.
* When in RAW mode, packets are simply straight 64-byte payloads
* with no headers.
*/
struct ldc_packet {
u8 type;
#define LDC_CTRL 0x01
#define LDC_DATA 0x02
#define LDC_ERR 0x10
u8 stype;
#define LDC_INFO 0x01
#define LDC_ACK 0x02
#define LDC_NACK 0x04
u8 ctrl;
#define LDC_VERS 0x01 /* Link Version */
#define LDC_RTS 0x02 /* Request To Send */
#define LDC_RTR 0x03 /* Ready To Receive */
#define LDC_RDX 0x04 /* Ready for Data eXchange */
#define LDC_CTRL_MSK 0x0f
u8 env;
#define LDC_LEN 0x3f
#define LDC_FRAG_MASK 0xc0
#define LDC_START 0x40
#define LDC_STOP 0x80
u32 seqid;
union {
u8 u_data[LDC_PACKET_SIZE - 8];
struct {
u32 pad;
u32 ackid;
u8 r_data[LDC_PACKET_SIZE - 8 - 8];
} r;
} u;
};
struct ldc_version {
u16 major;
u16 minor;
};
/* Ordered from largest major to lowest. */
static struct ldc_version ver_arr[] = {
{ .major = 1, .minor = 0 },
};
#define LDC_DEFAULT_MTU (4 * LDC_PACKET_SIZE)
#define LDC_DEFAULT_NUM_ENTRIES (PAGE_SIZE / LDC_PACKET_SIZE)
struct ldc_channel;
struct ldc_mode_ops {
int (*write)(struct ldc_channel *, const void *, unsigned int);
int (*read)(struct ldc_channel *, void *, unsigned int);
};
static const struct ldc_mode_ops raw_ops;
static const struct ldc_mode_ops nonraw_ops;
static const struct ldc_mode_ops stream_ops;
int ldom_domaining_enabled;
struct ldc_iommu {
/* Protects ldc_unmap. */
spinlock_t lock;
struct ldc_mtable_entry *page_table;
struct iommu_map_table iommu_map_table;
};
struct ldc_channel {
/* Protects all operations that depend upon channel state. */
spinlock_t lock;
unsigned long id;
u8 *mssbuf;
u32 mssbuf_len;
u32 mssbuf_off;
struct ldc_packet *tx_base;
unsigned long tx_head;
unsigned long tx_tail;
unsigned long tx_num_entries;
unsigned long tx_ra;
unsigned long tx_acked;
struct ldc_packet *rx_base;
unsigned long rx_head;
unsigned long rx_tail;
unsigned long rx_num_entries;
unsigned long rx_ra;
u32 rcv_nxt;
u32 snd_nxt;
unsigned long chan_state;
struct ldc_channel_config cfg;
void *event_arg;
const struct ldc_mode_ops *mops;
struct ldc_iommu iommu;
struct ldc_version ver;
u8 hs_state;
#define LDC_HS_CLOSED 0x00
#define LDC_HS_OPEN 0x01
#define LDC_HS_GOTVERS 0x02
#define LDC_HS_SENTRTR 0x03
#define LDC_HS_GOTRTR 0x04
#define LDC_HS_COMPLETE 0x10
u8 flags;
#define LDC_FLAG_ALLOCED_QUEUES 0x01
#define LDC_FLAG_REGISTERED_QUEUES 0x02
#define LDC_FLAG_REGISTERED_IRQS 0x04
#define LDC_FLAG_RESET 0x10
u8 mss;
u8 state;
#define LDC_IRQ_NAME_MAX 32
char rx_irq_name[LDC_IRQ_NAME_MAX];
char tx_irq_name[LDC_IRQ_NAME_MAX];
struct hlist_head mh_list;
struct hlist_node list;
};
#define ldcdbg(TYPE, f, a...) \
do { if (lp->cfg.debug & LDC_DEBUG_##TYPE) \
printk(KERN_INFO PFX "ID[%lu] " f, lp->id, ## a); \
} while (0)
#define LDC_ABORT(lp) ldc_abort((lp), __func__)
static const char *state_to_str(u8 state)
{
switch (state) {
case LDC_STATE_INVALID:
return "INVALID";
case LDC_STATE_INIT:
return "INIT";
case LDC_STATE_BOUND:
return "BOUND";
case LDC_STATE_READY:
return "READY";
case LDC_STATE_CONNECTED:
return "CONNECTED";
default:
return "<UNKNOWN>";
}
}
static unsigned long __advance(unsigned long off, unsigned long num_entries)
{
off += LDC_PACKET_SIZE;
if (off == (num_entries * LDC_PACKET_SIZE))
off = 0;
return off;
}
static unsigned long rx_advance(struct ldc_channel *lp, unsigned long off)
{
return __advance(off, lp->rx_num_entries);
}
static unsigned long tx_advance(struct ldc_channel *lp, unsigned long off)
{
return __advance(off, lp->tx_num_entries);
}
static struct ldc_packet *handshake_get_tx_packet(struct ldc_channel *lp,
unsigned long *new_tail)
{
struct ldc_packet *p;
unsigned long t;
t = tx_advance(lp, lp->tx_tail);
if (t == lp->tx_head)
return NULL;
*new_tail = t;
p = lp->tx_base;
return p + (lp->tx_tail / LDC_PACKET_SIZE);
}
/* When we are in reliable or stream mode, have to track the next packet
* we haven't gotten an ACK for in the TX queue using tx_acked. We have
* to be careful not to stomp over the queue past that point. During
* the handshake, we don't have TX data packets pending in the queue
* and that's why handshake_get_tx_packet() need not be mindful of
* lp->tx_acked.
*/
static unsigned long head_for_data(struct ldc_channel *lp)
{
if (lp->cfg.mode == LDC_MODE_STREAM)
return lp->tx_acked;
return lp->tx_head;
}
static int tx_has_space_for(struct ldc_channel *lp, unsigned int size)
{
unsigned long limit, tail, new_tail, diff;
unsigned int mss;
limit = head_for_data(lp);
tail = lp->tx_tail;
new_tail = tx_advance(lp, tail);
if (new_tail == limit)
return 0;
if (limit > new_tail)
diff = limit - new_tail;
else
diff = (limit +
((lp->tx_num_entries * LDC_PACKET_SIZE) - new_tail));
diff /= LDC_PACKET_SIZE;
mss = lp->mss;
if (diff * mss < size)
return 0;
return 1;
}
static struct ldc_packet *data_get_tx_packet(struct ldc_channel *lp,
unsigned long *new_tail)
{
struct ldc_packet *p;
unsigned long h, t;
h = head_for_data(lp);
t = tx_advance(lp, lp->tx_tail);
if (t == h)
return NULL;
*new_tail = t;
p = lp->tx_base;
return p + (lp->tx_tail / LDC_PACKET_SIZE);
}
static int set_tx_tail(struct ldc_channel *lp, unsigned long tail)
{
unsigned long orig_tail = lp->tx_tail;
int limit = 1000;
lp->tx_tail = tail;
while (limit-- > 0) {
unsigned long err;
err = sun4v_ldc_tx_set_qtail(lp->id, tail);
if (!err)
return 0;
if (err != HV_EWOULDBLOCK) {
lp->tx_tail = orig_tail;
return -EINVAL;
}
udelay(1);
}
lp->tx_tail = orig_tail;
return -EBUSY;
}
/* This just updates the head value in the hypervisor using
* a polling loop with a timeout. The caller takes care of
* upating software state representing the head change, if any.
*/
static int __set_rx_head(struct ldc_channel *lp, unsigned long head)
{
int limit = 1000;
while (limit-- > 0) {
unsigned long err;
err = sun4v_ldc_rx_set_qhead(lp->id, head);
if (!err)
return 0;
if (err != HV_EWOULDBLOCK)
return -EINVAL;
udelay(1);
}
return -EBUSY;
}
static int send_tx_packet(struct ldc_channel *lp,
struct ldc_packet *p,
unsigned long new_tail)
{
BUG_ON(p != (lp->tx_base + (lp->tx_tail / LDC_PACKET_SIZE)));
return set_tx_tail(lp, new_tail);
}
static struct ldc_packet *handshake_compose_ctrl(struct ldc_channel *lp,
u8 stype, u8 ctrl,
void *data, int dlen,
unsigned long *new_tail)
{
struct ldc_packet *p = handshake_get_tx_packet(lp, new_tail);
if (p) {
memset(p, 0, sizeof(*p));
p->type = LDC_CTRL;
p->stype = stype;
p->ctrl = ctrl;
if (data)
memcpy(p->u.u_data, data, dlen);
}
return p;
}
static int start_handshake(struct ldc_channel *lp)
{
struct ldc_packet *p;
struct ldc_version *ver;
unsigned long new_tail;
ver = &ver_arr[0];
ldcdbg(HS, "SEND VER INFO maj[%u] min[%u]\n",
ver->major, ver->minor);
p = handshake_compose_ctrl(lp, LDC_INFO, LDC_VERS,
ver, sizeof(*ver), &new_tail);
if (p) {
int err = send_tx_packet(lp, p, new_tail);
if (!err)
lp->flags &= ~LDC_FLAG_RESET;
return err;
}
return -EBUSY;
}
static int send_version_nack(struct ldc_channel *lp,
u16 major, u16 minor)
{
struct ldc_packet *p;
struct ldc_version ver;
unsigned long new_tail;
ver.major = major;
ver.minor = minor;
p = handshake_compose_ctrl(lp, LDC_NACK, LDC_VERS,
&ver, sizeof(ver), &new_tail);
if (p) {
ldcdbg(HS, "SEND VER NACK maj[%u] min[%u]\n",
ver.major, ver.minor);
return send_tx_packet(lp, p, new_tail);
}
return -EBUSY;
}
static int send_version_ack(struct ldc_channel *lp,
struct ldc_version *vp)
{
struct ldc_packet *p;
unsigned long new_tail;
p = handshake_compose_ctrl(lp, LDC_ACK, LDC_VERS,
vp, sizeof(*vp), &new_tail);
if (p) {
ldcdbg(HS, "SEND VER ACK maj[%u] min[%u]\n",
vp->major, vp->minor);
return send_tx_packet(lp, p, new_tail);
}
return -EBUSY;
}
static int send_rts(struct ldc_channel *lp)
{
struct ldc_packet *p;
unsigned long new_tail;
p = handshake_compose_ctrl(lp, LDC_INFO, LDC_RTS, NULL, 0,
&new_tail);
if (p) {
p->env = lp->cfg.mode;
p->seqid = 0;
lp->rcv_nxt = 0;
ldcdbg(HS, "SEND RTS env[0x%x] seqid[0x%x]\n",
p->env, p->seqid);
return send_tx_packet(lp, p, new_tail);
}
return -EBUSY;
}
static int send_rtr(struct ldc_channel *lp)
{
struct ldc_packet *p;
unsigned long new_tail;
p = handshake_compose_ctrl(lp, LDC_INFO, LDC_RTR, NULL, 0,
&new_tail);
if (p) {
p->env = lp->cfg.mode;
p->seqid = 0;
ldcdbg(HS, "SEND RTR env[0x%x] seqid[0x%x]\n",
p->env, p->seqid);
return send_tx_packet(lp, p, new_tail);
}
return -EBUSY;
}
static int send_rdx(struct ldc_channel *lp)
{
struct ldc_packet *p;
unsigned long new_tail;
p = handshake_compose_ctrl(lp, LDC_INFO, LDC_RDX, NULL, 0,
&new_tail);
if (p) {
p->env = 0;
p->seqid = ++lp->snd_nxt;
p->u.r.ackid = lp->rcv_nxt;
ldcdbg(HS, "SEND RDX env[0x%x] seqid[0x%x] ackid[0x%x]\n",
p->env, p->seqid, p->u.r.ackid);
return send_tx_packet(lp, p, new_tail);
}
return -EBUSY;
}
static int send_data_nack(struct ldc_channel *lp, struct ldc_packet *data_pkt)
{
struct ldc_packet *p;
unsigned long new_tail;
int err;
p = data_get_tx_packet(lp, &new_tail);
if (!p)
return -EBUSY;
memset(p, 0, sizeof(*p));
p->type = data_pkt->type;
p->stype = LDC_NACK;
p->ctrl = data_pkt->ctrl & LDC_CTRL_MSK;
p->seqid = lp->snd_nxt + 1;
p->u.r.ackid = lp->rcv_nxt;
ldcdbg(HS, "SEND DATA NACK type[0x%x] ctl[0x%x] seq[0x%x] ack[0x%x]\n",
p->type, p->ctrl, p->seqid, p->u.r.ackid);
err = send_tx_packet(lp, p, new_tail);
if (!err)
lp->snd_nxt++;
return err;
}
static int ldc_abort(struct ldc_channel *lp, const char *msg)
{
unsigned long hv_err;
ldcdbg(STATE, "ABORT[%s]\n", msg);
ldc_print(lp);
/* We report but do not act upon the hypervisor errors because
* there really isn't much we can do if they fail at this point.
*/
hv_err = sun4v_ldc_tx_qconf(lp->id, lp->tx_ra, lp->tx_num_entries);
if (hv_err)
printk(KERN_ERR PFX "ldc_abort: "
"sun4v_ldc_tx_qconf(%lx,%lx,%lx) failed, err=%lu\n",
lp->id, lp->tx_ra, lp->tx_num_entries, hv_err);
hv_err = sun4v_ldc_tx_get_state(lp->id,
&lp->tx_head,
&lp->tx_tail,
&lp->chan_state);
if (hv_err)
printk(KERN_ERR PFX "ldc_abort: "
"sun4v_ldc_tx_get_state(%lx,...) failed, err=%lu\n",
lp->id, hv_err);
hv_err = sun4v_ldc_rx_qconf(lp->id, lp->rx_ra, lp->rx_num_entries);
if (hv_err)
printk(KERN_ERR PFX "ldc_abort: "
"sun4v_ldc_rx_qconf(%lx,%lx,%lx) failed, err=%lu\n",
lp->id, lp->rx_ra, lp->rx_num_entries, hv_err);
/* Refetch the RX queue state as well, because we could be invoked
* here in the queue processing context.
*/
hv_err = sun4v_ldc_rx_get_state(lp->id,
&lp->rx_head,
&lp->rx_tail,
&lp->chan_state);
if (hv_err)
printk(KERN_ERR PFX "ldc_abort: "
"sun4v_ldc_rx_get_state(%lx,...) failed, err=%lu\n",
lp->id, hv_err);
return -ECONNRESET;
}
static struct ldc_version *find_by_major(u16 major)
{
struct ldc_version *ret = NULL;
int i;
for (i = 0; i < ARRAY_SIZE(ver_arr); i++) {
struct ldc_version *v = &ver_arr[i];
if (v->major <= major) {
ret = v;
break;
}
}
return ret;
}
static int process_ver_info(struct ldc_channel *lp, struct ldc_version *vp)
{
struct ldc_version *vap;
int err;
ldcdbg(HS, "GOT VERSION INFO major[%x] minor[%x]\n",
vp->major, vp->minor);
if (lp->hs_state == LDC_HS_GOTVERS) {
lp->hs_state = LDC_HS_OPEN;
memset(&lp->ver, 0, sizeof(lp->ver));
}
vap = find_by_major(vp->major);
if (!vap) {
err = send_version_nack(lp, 0, 0);
} else if (vap->major != vp->major) {
err = send_version_nack(lp, vap->major, vap->minor);
} else {
struct ldc_version ver = *vp;
if (ver.minor > vap->minor)
ver.minor = vap->minor;
err = send_version_ack(lp, &ver);
if (!err) {
lp->ver = ver;
lp->hs_state = LDC_HS_GOTVERS;
}
}
if (err)
return LDC_ABORT(lp);
return 0;
}
static int process_ver_ack(struct ldc_channel *lp, struct ldc_version *vp)
{
ldcdbg(HS, "GOT VERSION ACK major[%x] minor[%x]\n",
vp->major, vp->minor);
if (lp->hs_state == LDC_HS_GOTVERS) {
if (lp->ver.major != vp->major ||
lp->ver.minor != vp->minor)
return LDC_ABORT(lp);
} else {
lp->ver = *vp;
lp->hs_state = LDC_HS_GOTVERS;
}
if (send_rts(lp))
return LDC_ABORT(lp);
return 0;
}
static int process_ver_nack(struct ldc_channel *lp, struct ldc_version *vp)
{
struct ldc_version *vap;
struct ldc_packet *p;
unsigned long new_tail;
if (vp->major == 0 && vp->minor == 0)
return LDC_ABORT(lp);
vap = find_by_major(vp->major);
if (!vap)
return LDC_ABORT(lp);
p = handshake_compose_ctrl(lp, LDC_INFO, LDC_VERS,
vap, sizeof(*vap),
&new_tail);
if (!p)
return LDC_ABORT(lp);
return send_tx_packet(lp, p, new_tail);
}
static int process_version(struct ldc_channel *lp,
struct ldc_packet *p)
{
struct ldc_version *vp;
vp = (struct ldc_version *) p->u.u_data;
switch (p->stype) {
case LDC_INFO:
return process_ver_info(lp, vp);
case LDC_ACK:
return process_ver_ack(lp, vp);
case LDC_NACK:
return process_ver_nack(lp, vp);
default:
return LDC_ABORT(lp);
}
}
static int process_rts(struct ldc_channel *lp,
struct ldc_packet *p)
{
ldcdbg(HS, "GOT RTS stype[%x] seqid[%x] env[%x]\n",
p->stype, p->seqid, p->env);
if (p->stype != LDC_INFO ||
lp->hs_state != LDC_HS_GOTVERS ||
p->env != lp->cfg.mode)
return LDC_ABORT(lp);
lp->snd_nxt = p->seqid;
lp->rcv_nxt = p->seqid;
lp->hs_state = LDC_HS_SENTRTR;
if (send_rtr(lp))
return LDC_ABORT(lp);
return 0;
}
static int process_rtr(struct ldc_channel *lp,
struct ldc_packet *p)
{
ldcdbg(HS, "GOT RTR stype[%x] seqid[%x] env[%x]\n",
p->stype, p->seqid, p->env);
if (p->stype != LDC_INFO ||
p->env != lp->cfg.mode)
return LDC_ABORT(lp);
lp->snd_nxt = p->seqid;
lp->hs_state = LDC_HS_COMPLETE;
ldc_set_state(lp, LDC_STATE_CONNECTED);
send_rdx(lp);
return LDC_EVENT_UP;
}
static int rx_seq_ok(struct ldc_channel *lp, u32 seqid)
{
return lp->rcv_nxt + 1 == seqid;
}
static int process_rdx(struct ldc_channel *lp,
struct ldc_packet *p)
{
ldcdbg(HS, "GOT RDX stype[%x] seqid[%x] env[%x] ackid[%x]\n",
p->stype, p->seqid, p->env, p->u.r.ackid);
if (p->stype != LDC_INFO ||
!(rx_seq_ok(lp, p->seqid)))
return LDC_ABORT(lp);
lp->rcv_nxt = p->seqid;
lp->hs_state = LDC_HS_COMPLETE;
ldc_set_state(lp, LDC_STATE_CONNECTED);
return LDC_EVENT_UP;
}
static int process_control_frame(struct ldc_channel *lp,
struct ldc_packet *p)
{
switch (p->ctrl) {
case LDC_VERS:
return process_version(lp, p);
case LDC_RTS:
return process_rts(lp, p);
case LDC_RTR:
return process_rtr(lp, p);
case LDC_RDX:
return process_rdx(lp, p);
default:
return LDC_ABORT(lp);
}
}
static int process_error_frame(struct ldc_channel *lp,
struct ldc_packet *p)
{
return LDC_ABORT(lp);
}
static int process_data_ack(struct ldc_channel *lp,
struct ldc_packet *ack)
{
unsigned long head = lp->tx_acked;
u32 ackid = ack->u.r.ackid;
while (1) {
struct ldc_packet *p = lp->tx_base + (head / LDC_PACKET_SIZE);
head = tx_advance(lp, head);
if (p->seqid == ackid) {
lp->tx_acked = head;
return 0;
}
if (head == lp->tx_tail)
return LDC_ABORT(lp);
}
return 0;
}
static void send_events(struct ldc_channel *lp, unsigned int event_mask)
{
if (event_mask & LDC_EVENT_RESET)
lp->cfg.event(lp->event_arg, LDC_EVENT_RESET);
if (event_mask & LDC_EVENT_UP)
lp->cfg.event(lp->event_arg, LDC_EVENT_UP);
if (event_mask & LDC_EVENT_DATA_READY)
lp->cfg.event(lp->event_arg, LDC_EVENT_DATA_READY);
}
static irqreturn_t ldc_rx(int irq, void *dev_id)
{
struct ldc_channel *lp = dev_id;
unsigned long orig_state, flags;
unsigned int event_mask;
spin_lock_irqsave(&lp->lock, flags);
orig_state = lp->chan_state;
/* We should probably check for hypervisor errors here and
* reset the LDC channel if we get one.
*/
sun4v_ldc_rx_get_state(lp->id,
&lp->rx_head,
&lp->rx_tail,
&lp->chan_state);
ldcdbg(RX, "RX state[0x%02lx:0x%02lx] head[0x%04lx] tail[0x%04lx]\n",
orig_state, lp->chan_state, lp->rx_head, lp->rx_tail);
event_mask = 0;
if (lp->cfg.mode == LDC_MODE_RAW &&
lp->chan_state == LDC_CHANNEL_UP) {
lp->hs_state = LDC_HS_COMPLETE;
ldc_set_state(lp, LDC_STATE_CONNECTED);
/*
* Generate an LDC_EVENT_UP event if the channel
* was not already up.
*/
if (orig_state != LDC_CHANNEL_UP) {
event_mask |= LDC_EVENT_UP;
orig_state = lp->chan_state;
}
}
/* If we are in reset state, flush the RX queue and ignore
* everything.
*/
if (lp->flags & LDC_FLAG_RESET) {
(void) ldc_rx_reset(lp);
goto out;
}
/* Once we finish the handshake, we let the ldc_read()
* paths do all of the control frame and state management.
* Just trigger the callback.
*/
if (lp->hs_state == LDC_HS_COMPLETE) {
handshake_complete:
if (lp->chan_state != orig_state) {
unsigned int event = LDC_EVENT_RESET;
if (lp->chan_state == LDC_CHANNEL_UP)
event = LDC_EVENT_UP;
event_mask |= event;
}
if (lp->rx_head != lp->rx_tail)
event_mask |= LDC_EVENT_DATA_READY;
goto out;
}
if (lp->chan_state != orig_state)
goto out;
while (lp->rx_head != lp->rx_tail) {
struct ldc_packet *p;
unsigned long new;
int err;
p = lp->rx_base + (lp->rx_head / LDC_PACKET_SIZE);
switch (p->type) {
case LDC_CTRL:
err = process_control_frame(lp, p);
if (err > 0)
event_mask |= err;
break;
case LDC_DATA:
event_mask |= LDC_EVENT_DATA_READY;
err = 0;
break;
case LDC_ERR:
err = process_error_frame(lp, p);
break;
default:
err = LDC_ABORT(lp);
break;
}
if (err < 0)
break;
new = lp->rx_head;
new += LDC_PACKET_SIZE;
if (new == (lp->rx_num_entries * LDC_PACKET_SIZE))
new = 0;
lp->rx_head = new;
err = __set_rx_head(lp, new);
if (err < 0) {
(void) LDC_ABORT(lp);
break;
}
if (lp->hs_state == LDC_HS_COMPLETE)
goto handshake_complete;
}
out:
spin_unlock_irqrestore(&lp->lock, flags);
send_events(lp, event_mask);
return IRQ_HANDLED;
}
static irqreturn_t ldc_tx(int irq, void *dev_id)
{
struct ldc_channel *lp = dev_id;
unsigned long flags, orig_state;
unsigned int event_mask = 0;
spin_lock_irqsave(&lp->lock, flags);
orig_state = lp->chan_state;
/* We should probably check for hypervisor errors here and
* reset the LDC channel if we get one.
*/
sun4v_ldc_tx_get_state(lp->id,
&lp->tx_head,
&lp->tx_tail,
&lp->chan_state);
ldcdbg(TX, " TX state[0x%02lx:0x%02lx] head[0x%04lx] tail[0x%04lx]\n",
orig_state, lp->chan_state, lp->tx_head, lp->tx_tail);
if (lp->cfg.mode == LDC_MODE_RAW &&
lp->chan_state == LDC_CHANNEL_UP) {
lp->hs_state = LDC_HS_COMPLETE;
ldc_set_state(lp, LDC_STATE_CONNECTED);
/*
* Generate an LDC_EVENT_UP event if the channel
* was not already up.
*/
if (orig_state != LDC_CHANNEL_UP) {
event_mask |= LDC_EVENT_UP;
orig_state = lp->chan_state;
}
}
spin_unlock_irqrestore(&lp->lock, flags);
send_events(lp, event_mask);
return IRQ_HANDLED;
}
/* XXX ldc_alloc() and ldc_free() needs to run under a mutex so
* XXX that addition and removal from the ldc_channel_list has
* XXX atomicity, otherwise the __ldc_channel_exists() check is
* XXX totally pointless as another thread can slip into ldc_alloc()
* XXX and add a channel with the same ID. There also needs to be
* XXX a spinlock for ldc_channel_list.
*/
static HLIST_HEAD(ldc_channel_list);
static int __ldc_channel_exists(unsigned long id)
{
struct ldc_channel *lp;
hlist_for_each_entry(lp, &ldc_channel_list, list) {
if (lp->id == id)
return 1;
}
return 0;
}
static int alloc_queue(const char *name, unsigned long num_entries,
struct ldc_packet **base, unsigned long *ra)
{
unsigned long size, order;
void *q;
size = num_entries * LDC_PACKET_SIZE;
order = get_order(size);
q = (void *) __get_free_pages(GFP_KERNEL, order);
if (!q) {
printk(KERN_ERR PFX "Alloc of %s queue failed with "
"size=%lu order=%lu\n", name, size, order);
return -ENOMEM;
}
memset(q, 0, PAGE_SIZE << order);
*base = q;
*ra = __pa(q);
return 0;
}
static void free_queue(unsigned long num_entries, struct ldc_packet *q)
{
unsigned long size, order;
if (!q)
return;
size = num_entries * LDC_PACKET_SIZE;
order = get_order(size);
free_pages((unsigned long)q, order);
}
static unsigned long ldc_cookie_to_index(u64 cookie, void *arg)
{
u64 szcode = cookie >> COOKIE_PGSZ_CODE_SHIFT;
/* struct ldc_iommu *ldc_iommu = (struct ldc_iommu *)arg; */
cookie &= ~COOKIE_PGSZ_CODE;
return (cookie >> (13ULL + (szcode * 3ULL)));
}
static void ldc_demap(struct ldc_iommu *iommu, unsigned long id, u64 cookie,
unsigned long entry, unsigned long npages)
{
struct ldc_mtable_entry *base;
unsigned long i, shift;
shift = (cookie >> COOKIE_PGSZ_CODE_SHIFT) * 3;
base = iommu->page_table + entry;
for (i = 0; i < npages; i++) {
if (base->cookie)
sun4v_ldc_revoke(id, cookie + (i << shift),
base->cookie);
base->mte = 0;
}
}
/* XXX Make this configurable... XXX */
#define LDC_IOTABLE_SIZE (8 * 1024)
static int ldc_iommu_init(const char *name, struct ldc_channel *lp)
{
unsigned long sz, num_tsb_entries, tsbsize, order;
struct ldc_iommu *ldc_iommu = &lp->iommu;
struct iommu_map_table *iommu = &ldc_iommu->iommu_map_table;
struct ldc_mtable_entry *table;
unsigned long hv_err;
int err;
num_tsb_entries = LDC_IOTABLE_SIZE;
tsbsize = num_tsb_entries * sizeof(struct ldc_mtable_entry);
spin_lock_init(&ldc_iommu->lock);
sz = num_tsb_entries / 8;
sz = (sz + 7UL) & ~7UL;
iommu->map = kzalloc(sz, GFP_KERNEL);
if (!iommu->map) {
printk(KERN_ERR PFX "Alloc of arena map failed, sz=%lu\n", sz);
return -ENOMEM;
}
iommu_tbl_pool_init(iommu, num_tsb_entries, PAGE_SHIFT,
NULL, false /* no large pool */,
1 /* npools */,
true /* skip span boundary check */);
order = get_order(tsbsize);
table = (struct ldc_mtable_entry *)
__get_free_pages(GFP_KERNEL, order);
err = -ENOMEM;
if (!table) {
printk(KERN_ERR PFX "Alloc of MTE table failed, "
"size=%lu order=%lu\n", tsbsize, order);
goto out_free_map;
}
memset(table, 0, PAGE_SIZE << order);
ldc_iommu->page_table = table;
hv_err = sun4v_ldc_set_map_table(lp->id, __pa(table),
num_tsb_entries);
err = -EINVAL;
if (hv_err)
goto out_free_table;
return 0;
out_free_table:
free_pages((unsigned long) table, order);
ldc_iommu->page_table = NULL;
out_free_map:
kfree(iommu->map);
iommu->map = NULL;
return err;
}
static void ldc_iommu_release(struct ldc_channel *lp)
{
struct ldc_iommu *ldc_iommu = &lp->iommu;
struct iommu_map_table *iommu = &ldc_iommu->iommu_map_table;
unsigned long num_tsb_entries, tsbsize, order;
(void) sun4v_ldc_set_map_table(lp->id, 0, 0);
num_tsb_entries = iommu->poolsize * iommu->nr_pools;
tsbsize = num_tsb_entries * sizeof(struct ldc_mtable_entry);
order = get_order(tsbsize);
free_pages((unsigned long) ldc_iommu->page_table, order);
ldc_iommu->page_table = NULL;
kfree(iommu->map);
iommu->map = NULL;
}
struct ldc_channel *ldc_alloc(unsigned long id,
const struct ldc_channel_config *cfgp,
void *event_arg,
const char *name)
{
struct ldc_channel *lp;
const struct ldc_mode_ops *mops;
unsigned long dummy1, dummy2, hv_err;
u8 mss, *mssbuf;
int err;
err = -ENODEV;
if (!ldom_domaining_enabled)
goto out_err;
err = -EINVAL;
if (!cfgp)
goto out_err;
if (!name)
goto out_err;
switch (cfgp->mode) {
case LDC_MODE_RAW:
mops = &raw_ops;
mss = LDC_PACKET_SIZE;
break;
case LDC_MODE_UNRELIABLE:
mops = &nonraw_ops;
mss = LDC_PACKET_SIZE - 8;
break;
case LDC_MODE_STREAM:
mops = &stream_ops;
mss = LDC_PACKET_SIZE - 8 - 8;
break;
default:
goto out_err;
}
if (!cfgp->event || !event_arg || !cfgp->rx_irq || !cfgp->tx_irq)
goto out_err;
hv_err = sun4v_ldc_tx_qinfo(id, &dummy1, &dummy2);
err = -ENODEV;
if (hv_err == HV_ECHANNEL)
goto out_err;
err = -EEXIST;
if (__ldc_channel_exists(id))
goto out_err;
mssbuf = NULL;
lp = kzalloc(sizeof(*lp), GFP_KERNEL);
err = -ENOMEM;
if (!lp)
goto out_err;
spin_lock_init(&lp->lock);
lp->id = id;
err = ldc_iommu_init(name, lp);
if (err)
goto out_free_ldc;
lp->mops = mops;
lp->mss = mss;
lp->cfg = *cfgp;
if (!lp->cfg.mtu)
lp->cfg.mtu = LDC_DEFAULT_MTU;
if (lp->cfg.mode == LDC_MODE_STREAM) {
mssbuf = kzalloc(lp->cfg.mtu, GFP_KERNEL);
if (!mssbuf) {
err = -ENOMEM;
goto out_free_iommu;
}
lp->mssbuf = mssbuf;
}
lp->event_arg = event_arg;
/* XXX allow setting via ldc_channel_config to override defaults
* XXX or use some formula based upon mtu
*/
lp->tx_num_entries = LDC_DEFAULT_NUM_ENTRIES;
lp->rx_num_entries = LDC_DEFAULT_NUM_ENTRIES;
err = alloc_queue("TX", lp->tx_num_entries,
&lp->tx_base, &lp->tx_ra);
if (err)
goto out_free_mssbuf;
err = alloc_queue("RX", lp->rx_num_entries,
&lp->rx_base, &lp->rx_ra);
if (err)
goto out_free_txq;
lp->flags |= LDC_FLAG_ALLOCED_QUEUES;
lp->hs_state = LDC_HS_CLOSED;
ldc_set_state(lp, LDC_STATE_INIT);
INIT_HLIST_NODE(&lp->list);
hlist_add_head(&lp->list, &ldc_channel_list);
INIT_HLIST_HEAD(&lp->mh_list);
snprintf(lp->rx_irq_name, LDC_IRQ_NAME_MAX, "%s RX", name);
snprintf(lp->tx_irq_name, LDC_IRQ_NAME_MAX, "%s TX", name);
err = request_irq(lp->cfg.rx_irq, ldc_rx, 0,
lp->rx_irq_name, lp);
if (err)
goto out_free_txq;
err = request_irq(lp->cfg.tx_irq, ldc_tx, 0,
lp->tx_irq_name, lp);
if (err) {
free_irq(lp->cfg.rx_irq, lp);
goto out_free_txq;
}
return lp;
out_free_txq:
free_queue(lp->tx_num_entries, lp->tx_base);
out_free_mssbuf:
kfree(mssbuf);
out_free_iommu:
ldc_iommu_release(lp);
out_free_ldc:
kfree(lp);
out_err:
return ERR_PTR(err);
}
EXPORT_SYMBOL(ldc_alloc);
void ldc_unbind(struct ldc_channel *lp)
{
if (lp->flags & LDC_FLAG_REGISTERED_IRQS) {
free_irq(lp->cfg.rx_irq, lp);
free_irq(lp->cfg.tx_irq, lp);
lp->flags &= ~LDC_FLAG_REGISTERED_IRQS;
}
if (lp->flags & LDC_FLAG_REGISTERED_QUEUES) {
sun4v_ldc_tx_qconf(lp->id, 0, 0);
sun4v_ldc_rx_qconf(lp->id, 0, 0);
lp->flags &= ~LDC_FLAG_REGISTERED_QUEUES;
}
if (lp->flags & LDC_FLAG_ALLOCED_QUEUES) {
free_queue(lp->tx_num_entries, lp->tx_base);
free_queue(lp->rx_num_entries, lp->rx_base);
lp->flags &= ~LDC_FLAG_ALLOCED_QUEUES;
}
ldc_set_state(lp, LDC_STATE_INIT);
}
EXPORT_SYMBOL(ldc_unbind);
void ldc_free(struct ldc_channel *lp)
{
ldc_unbind(lp);
hlist_del(&lp->list);
kfree(lp->mssbuf);
ldc_iommu_release(lp);
kfree(lp);
}
EXPORT_SYMBOL(ldc_free);
/* Bind the channel. This registers the LDC queues with
* the hypervisor and puts the channel into a pseudo-listening
* state. This does not initiate a handshake, ldc_connect() does
* that.
*/
int ldc_bind(struct ldc_channel *lp)
{
unsigned long hv_err, flags;
int err = -EINVAL;
if (lp->state != LDC_STATE_INIT)
return -EINVAL;
spin_lock_irqsave(&lp->lock, flags);
enable_irq(lp->cfg.rx_irq);
enable_irq(lp->cfg.tx_irq);
lp->flags |= LDC_FLAG_REGISTERED_IRQS;
err = -ENODEV;
hv_err = sun4v_ldc_tx_qconf(lp->id, 0, 0);
if (hv_err)
goto out_free_irqs;
hv_err = sun4v_ldc_tx_qconf(lp->id, lp->tx_ra, lp->tx_num_entries);
if (hv_err)
goto out_free_irqs;
hv_err = sun4v_ldc_rx_qconf(lp->id, 0, 0);
if (hv_err)
goto out_unmap_tx;
hv_err = sun4v_ldc_rx_qconf(lp->id, lp->rx_ra, lp->rx_num_entries);
if (hv_err)
goto out_unmap_tx;
lp->flags |= LDC_FLAG_REGISTERED_QUEUES;
hv_err = sun4v_ldc_tx_get_state(lp->id,
&lp->tx_head,
&lp->tx_tail,
&lp->chan_state);
err = -EBUSY;
if (hv_err)
goto out_unmap_rx;
lp->tx_acked = lp->tx_head;
lp->hs_state = LDC_HS_OPEN;
ldc_set_state(lp, LDC_STATE_BOUND);
if (lp->cfg.mode == LDC_MODE_RAW) {
/*
* There is no handshake in RAW mode, so handshake
* is completed.
*/
lp->hs_state = LDC_HS_COMPLETE;
}
spin_unlock_irqrestore(&lp->lock, flags);
return 0;
out_unmap_rx:
lp->flags &= ~LDC_FLAG_REGISTERED_QUEUES;
sun4v_ldc_rx_qconf(lp->id, 0, 0);
out_unmap_tx:
sun4v_ldc_tx_qconf(lp->id, 0, 0);
out_free_irqs:
lp->flags &= ~LDC_FLAG_REGISTERED_IRQS;
free_irq(lp->cfg.tx_irq, lp);
free_irq(lp->cfg.rx_irq, lp);
spin_unlock_irqrestore(&lp->lock, flags);
return err;
}
EXPORT_SYMBOL(ldc_bind);
int ldc_connect(struct ldc_channel *lp)
{
unsigned long flags;
int err;
if (lp->cfg.mode == LDC_MODE_RAW)
return -EINVAL;
spin_lock_irqsave(&lp->lock, flags);
if (!(lp->flags & LDC_FLAG_ALLOCED_QUEUES) ||
!(lp->flags & LDC_FLAG_REGISTERED_QUEUES) ||
lp->hs_state != LDC_HS_OPEN)
err = ((lp->hs_state > LDC_HS_OPEN) ? 0 : -EINVAL);
else
err = start_handshake(lp);
spin_unlock_irqrestore(&lp->lock, flags);
return err;
}
EXPORT_SYMBOL(ldc_connect);
int ldc_disconnect(struct ldc_channel *lp)
{
unsigned long hv_err, flags;
int err;
if (lp->cfg.mode == LDC_MODE_RAW)
return -EINVAL;
if (!(lp->flags & LDC_FLAG_ALLOCED_QUEUES) ||
!(lp->flags & LDC_FLAG_REGISTERED_QUEUES))
return -EINVAL;
spin_lock_irqsave(&lp->lock, flags);
err = -ENODEV;
hv_err = sun4v_ldc_tx_qconf(lp->id, 0, 0);
if (hv_err)
goto out_err;
hv_err = sun4v_ldc_tx_qconf(lp->id, lp->tx_ra, lp->tx_num_entries);
if (hv_err)
goto out_err;
hv_err = sun4v_ldc_rx_qconf(lp->id, 0, 0);
if (hv_err)
goto out_err;
hv_err = sun4v_ldc_rx_qconf(lp->id, lp->rx_ra, lp->rx_num_entries);
if (hv_err)
goto out_err;
ldc_set_state(lp, LDC_STATE_BOUND);
lp->hs_state = LDC_HS_OPEN;
lp->flags |= LDC_FLAG_RESET;
spin_unlock_irqrestore(&lp->lock, flags);
return 0;
out_err:
sun4v_ldc_tx_qconf(lp->id, 0, 0);
sun4v_ldc_rx_qconf(lp->id, 0, 0);
free_irq(lp->cfg.tx_irq, lp);
free_irq(lp->cfg.rx_irq, lp);
lp->flags &= ~(LDC_FLAG_REGISTERED_IRQS |
LDC_FLAG_REGISTERED_QUEUES);
ldc_set_state(lp, LDC_STATE_INIT);
spin_unlock_irqrestore(&lp->lock, flags);
return err;
}
EXPORT_SYMBOL(ldc_disconnect);
int ldc_state(struct ldc_channel *lp)
{
return lp->state;
}
EXPORT_SYMBOL(ldc_state);
void ldc_set_state(struct ldc_channel *lp, u8 state)
{
ldcdbg(STATE, "STATE (%s) --> (%s)\n",
state_to_str(lp->state),
state_to_str(state));
lp->state = state;
}
EXPORT_SYMBOL(ldc_set_state);
int ldc_mode(struct ldc_channel *lp)
{
return lp->cfg.mode;
}
EXPORT_SYMBOL(ldc_mode);
int ldc_rx_reset(struct ldc_channel *lp)
{
return __set_rx_head(lp, lp->rx_tail);
}
EXPORT_SYMBOL(ldc_rx_reset);
void __ldc_print(struct ldc_channel *lp, const char *caller)
{
pr_info("%s: id=0x%lx flags=0x%x state=%s cstate=0x%lx hsstate=0x%x\n"
"\trx_h=0x%lx rx_t=0x%lx rx_n=%ld\n"
"\ttx_h=0x%lx tx_t=0x%lx tx_n=%ld\n"
"\trcv_nxt=%u snd_nxt=%u\n",
caller, lp->id, lp->flags, state_to_str(lp->state),
lp->chan_state, lp->hs_state,
lp->rx_head, lp->rx_tail, lp->rx_num_entries,
lp->tx_head, lp->tx_tail, lp->tx_num_entries,
lp->rcv_nxt, lp->snd_nxt);
}
EXPORT_SYMBOL(__ldc_print);
static int write_raw(struct ldc_channel *lp, const void *buf, unsigned int size)
{
struct ldc_packet *p;
unsigned long new_tail, hv_err;
int err;
hv_err = sun4v_ldc_tx_get_state(lp->id, &lp->tx_head, &lp->tx_tail,
&lp->chan_state);
if (unlikely(hv_err))
return -EBUSY;
if (unlikely(lp->chan_state != LDC_CHANNEL_UP))
return LDC_ABORT(lp);
if (size > LDC_PACKET_SIZE)
return -EMSGSIZE;
p = data_get_tx_packet(lp, &new_tail);
if (!p)
return -EAGAIN;
memcpy(p, buf, size);
err = send_tx_packet(lp, p, new_tail);
if (!err)
err = size;
return err;
}
static int read_raw(struct ldc_channel *lp, void *buf, unsigned int size)
{
struct ldc_packet *p;
unsigned long hv_err, new;
int err;
if (size < LDC_PACKET_SIZE)
return -EINVAL;
hv_err = sun4v_ldc_rx_get_state(lp->id,
&lp->rx_head,
&lp->rx_tail,
&lp->chan_state);
if (hv_err)
return LDC_ABORT(lp);
if (lp->chan_state == LDC_CHANNEL_DOWN ||
lp->chan_state == LDC_CHANNEL_RESETTING)
return -ECONNRESET;
if (lp->rx_head == lp->rx_tail)
return 0;
p = lp->rx_base + (lp->rx_head / LDC_PACKET_SIZE);
memcpy(buf, p, LDC_PACKET_SIZE);
new = rx_advance(lp, lp->rx_head);
lp->rx_head = new;
err = __set_rx_head(lp, new);
if (err < 0)
err = -ECONNRESET;
else
err = LDC_PACKET_SIZE;
return err;
}
static const struct ldc_mode_ops raw_ops = {
.write = write_raw,
.read = read_raw,
};
static int write_nonraw(struct ldc_channel *lp, const void *buf,
unsigned int size)
{
unsigned long hv_err, tail;
unsigned int copied;
u32 seq;
int err;
hv_err = sun4v_ldc_tx_get_state(lp->id, &lp->tx_head, &lp->tx_tail,
&lp->chan_state);
if (unlikely(hv_err))
return -EBUSY;
if (unlikely(lp->chan_state != LDC_CHANNEL_UP))
return LDC_ABORT(lp);
if (!tx_has_space_for(lp, size))
return -EAGAIN;
seq = lp->snd_nxt;
copied = 0;
tail = lp->tx_tail;
while (copied < size) {
struct ldc_packet *p = lp->tx_base + (tail / LDC_PACKET_SIZE);
u8 *data = ((lp->cfg.mode == LDC_MODE_UNRELIABLE) ?
p->u.u_data :
p->u.r.r_data);
int data_len;
p->type = LDC_DATA;
p->stype = LDC_INFO;
p->ctrl = 0;
data_len = size - copied;
if (data_len > lp->mss)
data_len = lp->mss;
BUG_ON(data_len > LDC_LEN);
p->env = (data_len |
(copied == 0 ? LDC_START : 0) |
(data_len == size - copied ? LDC_STOP : 0));
p->seqid = ++seq;
ldcdbg(DATA, "SENT DATA [%02x:%02x:%02x:%02x:%08x]\n",
p->type,
p->stype,
p->ctrl,
p->env,
p->seqid);
memcpy(data, buf, data_len);
buf += data_len;
copied += data_len;
tail = tx_advance(lp, tail);
}
err = set_tx_tail(lp, tail);
if (!err) {
lp->snd_nxt = seq;
err = size;
}
return err;
}
static int rx_bad_seq(struct ldc_channel *lp, struct ldc_packet *p,
struct ldc_packet *first_frag)
{
int err;
if (first_frag)
lp->rcv_nxt = first_frag->seqid - 1;
err = send_data_nack(lp, p);
if (err)
return err;
err = ldc_rx_reset(lp);
if (err < 0)
return LDC_ABORT(lp);
return 0;
}
static int data_ack_nack(struct ldc_channel *lp, struct ldc_packet *p)
{
if (p->stype & LDC_ACK) {
int err = process_data_ack(lp, p);
if (err)
return err;
}
if (p->stype & LDC_NACK)
return LDC_ABORT(lp);
return 0;
}
static int rx_data_wait(struct ldc_channel *lp, unsigned long cur_head)
{
unsigned long dummy;
int limit = 1000;
ldcdbg(DATA, "DATA WAIT cur_head[%lx] rx_head[%lx] rx_tail[%lx]\n",
cur_head, lp->rx_head, lp->rx_tail);
while (limit-- > 0) {
unsigned long hv_err;
hv_err = sun4v_ldc_rx_get_state(lp->id,
&dummy,
&lp->rx_tail,
&lp->chan_state);
if (hv_err)
return LDC_ABORT(lp);
if (lp->chan_state == LDC_CHANNEL_DOWN ||
lp->chan_state == LDC_CHANNEL_RESETTING)
return -ECONNRESET;
if (cur_head != lp->rx_tail) {
ldcdbg(DATA, "DATA WAIT DONE "
"head[%lx] tail[%lx] chan_state[%lx]\n",
dummy, lp->rx_tail, lp->chan_state);
return 0;
}
udelay(1);
}
return -EAGAIN;
}
static int rx_set_head(struct ldc_channel *lp, unsigned long head)
{
int err = __set_rx_head(lp, head);
if (err < 0)
return LDC_ABORT(lp);
lp->rx_head = head;
return 0;
}
static void send_data_ack(struct ldc_channel *lp)
{
unsigned long new_tail;
struct ldc_packet *p;
p = data_get_tx_packet(lp, &new_tail);
if (likely(p)) {
int err;
memset(p, 0, sizeof(*p));
p->type = LDC_DATA;
p->stype = LDC_ACK;
p->ctrl = 0;
p->seqid = lp->snd_nxt + 1;
p->u.r.ackid = lp->rcv_nxt;
err = send_tx_packet(lp, p, new_tail);
if (!err)
lp->snd_nxt++;
}
}
static int read_nonraw(struct ldc_channel *lp, void *buf, unsigned int size)
{
struct ldc_packet *first_frag;
unsigned long hv_err, new;
int err, copied;
hv_err = sun4v_ldc_rx_get_state(lp->id,
&lp->rx_head,
&lp->rx_tail,
&lp->chan_state);
if (hv_err)
return LDC_ABORT(lp);
if (lp->chan_state == LDC_CHANNEL_DOWN ||
lp->chan_state == LDC_CHANNEL_RESETTING)
return -ECONNRESET;
if (lp->rx_head == lp->rx_tail)
return 0;
first_frag = NULL;
copied = err = 0;
new = lp->rx_head;
while (1) {
struct ldc_packet *p;
int pkt_len;
BUG_ON(new == lp->rx_tail);
p = lp->rx_base + (new / LDC_PACKET_SIZE);
ldcdbg(RX, "RX read pkt[%02x:%02x:%02x:%02x:%08x:%08x] "
"rcv_nxt[%08x]\n",
p->type,
p->stype,
p->ctrl,
p->env,
p->seqid,
p->u.r.ackid,
lp->rcv_nxt);
if (unlikely(!rx_seq_ok(lp, p->seqid))) {
err = rx_bad_seq(lp, p, first_frag);
copied = 0;
break;
}
if (p->type & LDC_CTRL) {
err = process_control_frame(lp, p);
if (err < 0)
break;
err = 0;
}
lp->rcv_nxt = p->seqid;
/*
* If this is a control-only packet, there is nothing
* else to do but advance the rx queue since the packet
* was already processed above.
*/
if (!(p->type & LDC_DATA)) {
new = rx_advance(lp, new);
break;
}
if (p->stype & (LDC_ACK | LDC_NACK)) {
err = data_ack_nack(lp, p);
if (err)
break;
}
if (!(p->stype & LDC_INFO)) {
new = rx_advance(lp, new);
err = rx_set_head(lp, new);
if (err)
break;
goto no_data;
}
pkt_len = p->env & LDC_LEN;
/* Every initial packet starts with the START bit set.
*
* Singleton packets will have both START+STOP set.
*
* Fragments will have START set in the first frame, STOP
* set in the last frame, and neither bit set in middle
* frames of the packet.
*
* Therefore if we are at the beginning of a packet and
* we don't see START, or we are in the middle of a fragmented
* packet and do see START, we are unsynchronized and should
* flush the RX queue.
*/
if ((first_frag == NULL && !(p->env & LDC_START)) ||
(first_frag != NULL && (p->env & LDC_START))) {
if (!first_frag)
new = rx_advance(lp, new);
err = rx_set_head(lp, new);
if (err)
break;
if (!first_frag)
goto no_data;
}
if (!first_frag)
first_frag = p;
if (pkt_len > size - copied) {
/* User didn't give us a big enough buffer,
* what to do? This is a pretty serious error.
*
* Since we haven't updated the RX ring head to
* consume any of the packets, signal the error
* to the user and just leave the RX ring alone.
*
* This seems the best behavior because this allows
* a user of the LDC layer to start with a small
* RX buffer for ldc_read() calls and use -EMSGSIZE
* as a cue to enlarge it's read buffer.
*/
err = -EMSGSIZE;
break;
}
/* Ok, we are gonna eat this one. */
new = rx_advance(lp, new);
memcpy(buf,
(lp->cfg.mode == LDC_MODE_UNRELIABLE ?
p->u.u_data : p->u.r.r_data), pkt_len);
buf += pkt_len;
copied += pkt_len;
if (p->env & LDC_STOP)
break;
no_data:
if (new == lp->rx_tail) {
err = rx_data_wait(lp, new);
if (err)
break;
}
}
if (!err)
err = rx_set_head(lp, new);
if (err && first_frag)
lp->rcv_nxt = first_frag->seqid - 1;
if (!err) {
err = copied;
if (err > 0 && lp->cfg.mode != LDC_MODE_UNRELIABLE)
send_data_ack(lp);
}
return err;
}
static const struct ldc_mode_ops nonraw_ops = {
.write = write_nonraw,
.read = read_nonraw,
};
static int write_stream(struct ldc_channel *lp, const void *buf,
unsigned int size)
{
if (size > lp->cfg.mtu)
size = lp->cfg.mtu;
return write_nonraw(lp, buf, size);
}
static int read_stream(struct ldc_channel *lp, void *buf, unsigned int size)
{
if (!lp->mssbuf_len) {
int err = read_nonraw(lp, lp->mssbuf, lp->cfg.mtu);
if (err < 0)
return err;
lp->mssbuf_len = err;
lp->mssbuf_off = 0;
}
if (size > lp->mssbuf_len)
size = lp->mssbuf_len;
memcpy(buf, lp->mssbuf + lp->mssbuf_off, size);
lp->mssbuf_off += size;
lp->mssbuf_len -= size;
return size;
}
static const struct ldc_mode_ops stream_ops = {
.write = write_stream,
.read = read_stream,
};
int ldc_write(struct ldc_channel *lp, const void *buf, unsigned int size)
{
unsigned long flags;
int err;
if (!buf)
return -EINVAL;
if (!size)
return 0;
spin_lock_irqsave(&lp->lock, flags);
if (lp->hs_state != LDC_HS_COMPLETE)
err = -ENOTCONN;
else
err = lp->mops->write(lp, buf, size);
spin_unlock_irqrestore(&lp->lock, flags);
return err;
}
EXPORT_SYMBOL(ldc_write);
int ldc_read(struct ldc_channel *lp, void *buf, unsigned int size)
{
unsigned long flags;
int err;
ldcdbg(RX, "%s: entered size=%d\n", __func__, size);
if (!buf)
return -EINVAL;
if (!size)
return 0;
spin_lock_irqsave(&lp->lock, flags);
if (lp->hs_state != LDC_HS_COMPLETE)
err = -ENOTCONN;
else
err = lp->mops->read(lp, buf, size);
spin_unlock_irqrestore(&lp->lock, flags);
ldcdbg(RX, "%s: mode=%d, head=%lu, tail=%lu rv=%d\n", __func__,
lp->cfg.mode, lp->rx_head, lp->rx_tail, err);
return err;
}
EXPORT_SYMBOL(ldc_read);
static u64 pagesize_code(void)
{
switch (PAGE_SIZE) {
default:
case (8ULL * 1024ULL):
return 0;
case (64ULL * 1024ULL):
return 1;
case (512ULL * 1024ULL):
return 2;
case (4ULL * 1024ULL * 1024ULL):
return 3;
case (32ULL * 1024ULL * 1024ULL):
return 4;
case (256ULL * 1024ULL * 1024ULL):
return 5;
}
}
static u64 make_cookie(u64 index, u64 pgsz_code, u64 page_offset)
{
return ((pgsz_code << COOKIE_PGSZ_CODE_SHIFT) |
(index << PAGE_SHIFT) |
page_offset);
}
static struct ldc_mtable_entry *alloc_npages(struct ldc_iommu *iommu,
unsigned long npages)
{
long entry;
entry = iommu_tbl_range_alloc(NULL, &iommu->iommu_map_table,
npages, NULL, (unsigned long)-1, 0);
if (unlikely(entry == IOMMU_ERROR_CODE))
return NULL;
return iommu->page_table + entry;
}
static u64 perm_to_mte(unsigned int map_perm)
{
u64 mte_base;
mte_base = pagesize_code();
if (map_perm & LDC_MAP_SHADOW) {
if (map_perm & LDC_MAP_R)
mte_base |= LDC_MTE_COPY_R;
if (map_perm & LDC_MAP_W)
mte_base |= LDC_MTE_COPY_W;
}
if (map_perm & LDC_MAP_DIRECT) {
if (map_perm & LDC_MAP_R)
mte_base |= LDC_MTE_READ;
if (map_perm & LDC_MAP_W)
mte_base |= LDC_MTE_WRITE;
if (map_perm & LDC_MAP_X)
mte_base |= LDC_MTE_EXEC;
}
if (map_perm & LDC_MAP_IO) {
if (map_perm & LDC_MAP_R)
mte_base |= LDC_MTE_IOMMU_R;
if (map_perm & LDC_MAP_W)
mte_base |= LDC_MTE_IOMMU_W;
}
return mte_base;
}
static int pages_in_region(unsigned long base, long len)
{
int count = 0;
do {
unsigned long new = (base + PAGE_SIZE) & PAGE_MASK;
len -= (new - base);
base = new;
count++;
} while (len > 0);
return count;
}
struct cookie_state {
struct ldc_mtable_entry *page_table;
struct ldc_trans_cookie *cookies;
u64 mte_base;
u64 prev_cookie;
u32 pte_idx;
u32 nc;
};
static void fill_cookies(struct cookie_state *sp, unsigned long pa,
unsigned long off, unsigned long len)
{
do {
unsigned long tlen, new = pa + PAGE_SIZE;
u64 this_cookie;
sp->page_table[sp->pte_idx].mte = sp->mte_base | pa;
tlen = PAGE_SIZE;
if (off)
tlen = PAGE_SIZE - off;
if (tlen > len)
tlen = len;
this_cookie = make_cookie(sp->pte_idx,
pagesize_code(), off);
off = 0;
if (this_cookie == sp->prev_cookie) {
sp->cookies[sp->nc - 1].cookie_size += tlen;
} else {
sp->cookies[sp->nc].cookie_addr = this_cookie;
sp->cookies[sp->nc].cookie_size = tlen;
sp->nc++;
}
sp->prev_cookie = this_cookie + tlen;
sp->pte_idx++;
len -= tlen;
pa = new;
} while (len > 0);
}
static int sg_count_one(struct scatterlist *sg)
{
unsigned long base = page_to_pfn(sg_page(sg)) << PAGE_SHIFT;
long len = sg->length;
if ((sg->offset | len) & (8UL - 1))
return -EFAULT;
return pages_in_region(base + sg->offset, len);
}
static int sg_count_pages(struct scatterlist *sg, int num_sg)
{
int count;
int i;
count = 0;
for (i = 0; i < num_sg; i++) {
int err = sg_count_one(sg + i);
if (err < 0)
return err;
count += err;
}
return count;
}
int ldc_map_sg(struct ldc_channel *lp,
struct scatterlist *sg, int num_sg,
struct ldc_trans_cookie *cookies, int ncookies,
unsigned int map_perm)
{
unsigned long i, npages;
struct ldc_mtable_entry *base;
struct cookie_state state;
struct ldc_iommu *iommu;
int err;
struct scatterlist *s;
if (map_perm & ~LDC_MAP_ALL)
return -EINVAL;
err = sg_count_pages(sg, num_sg);
if (err < 0)
return err;
npages = err;
if (err > ncookies)
return -EMSGSIZE;
iommu = &lp->iommu;
base = alloc_npages(iommu, npages);
if (!base)
return -ENOMEM;
state.page_table = iommu->page_table;
state.cookies = cookies;
state.mte_base = perm_to_mte(map_perm);
state.prev_cookie = ~(u64)0;
state.pte_idx = (base - iommu->page_table);
state.nc = 0;
for_each_sg(sg, s, num_sg, i) {
fill_cookies(&state, page_to_pfn(sg_page(s)) << PAGE_SHIFT,
s->offset, s->length);
}
return state.nc;
}
EXPORT_SYMBOL(ldc_map_sg);
int ldc_map_single(struct ldc_channel *lp,
void *buf, unsigned int len,
struct ldc_trans_cookie *cookies, int ncookies,
unsigned int map_perm)
{
unsigned long npages, pa;
struct ldc_mtable_entry *base;
struct cookie_state state;
struct ldc_iommu *iommu;
if ((map_perm & ~LDC_MAP_ALL) || (ncookies < 1))
return -EINVAL;
pa = __pa(buf);
if ((pa | len) & (8UL - 1))
return -EFAULT;
npages = pages_in_region(pa, len);
iommu = &lp->iommu;
base = alloc_npages(iommu, npages);
if (!base)
return -ENOMEM;
state.page_table = iommu->page_table;
state.cookies = cookies;
state.mte_base = perm_to_mte(map_perm);
state.prev_cookie = ~(u64)0;
state.pte_idx = (base - iommu->page_table);
state.nc = 0;
fill_cookies(&state, (pa & PAGE_MASK), (pa & ~PAGE_MASK), len);
BUG_ON(state.nc > ncookies);
return state.nc;
}
EXPORT_SYMBOL(ldc_map_single);
static void free_npages(unsigned long id, struct ldc_iommu *iommu,
u64 cookie, u64 size)
{
unsigned long npages, entry;
npages = PAGE_ALIGN(((cookie & ~PAGE_MASK) + size)) >> PAGE_SHIFT;
entry = ldc_cookie_to_index(cookie, iommu);
ldc_demap(iommu, id, cookie, entry, npages);
iommu_tbl_range_free(&iommu->iommu_map_table, cookie, npages, entry);
}
void ldc_unmap(struct ldc_channel *lp, struct ldc_trans_cookie *cookies,
int ncookies)
{
struct ldc_iommu *iommu = &lp->iommu;
int i;
unsigned long flags;
spin_lock_irqsave(&iommu->lock, flags);
for (i = 0; i < ncookies; i++) {
u64 addr = cookies[i].cookie_addr;
u64 size = cookies[i].cookie_size;
free_npages(lp->id, iommu, addr, size);
}
spin_unlock_irqrestore(&iommu->lock, flags);
}
EXPORT_SYMBOL(ldc_unmap);
int ldc_copy(struct ldc_channel *lp, int copy_dir,
void *buf, unsigned int len, unsigned long offset,
struct ldc_trans_cookie *cookies, int ncookies)
{
unsigned int orig_len;
unsigned long ra;
int i;
if (copy_dir != LDC_COPY_IN && copy_dir != LDC_COPY_OUT) {
printk(KERN_ERR PFX "ldc_copy: ID[%lu] Bad copy_dir[%d]\n",
lp->id, copy_dir);
return -EINVAL;
}
ra = __pa(buf);
if ((ra | len | offset) & (8UL - 1)) {
printk(KERN_ERR PFX "ldc_copy: ID[%lu] Unaligned buffer "
"ra[%lx] len[%x] offset[%lx]\n",
lp->id, ra, len, offset);
return -EFAULT;
}
if (lp->hs_state != LDC_HS_COMPLETE ||
(lp->flags & LDC_FLAG_RESET)) {
printk(KERN_ERR PFX "ldc_copy: ID[%lu] Link down hs_state[%x] "
"flags[%x]\n", lp->id, lp->hs_state, lp->flags);
return -ECONNRESET;
}
orig_len = len;
for (i = 0; i < ncookies; i++) {
unsigned long cookie_raddr = cookies[i].cookie_addr;
unsigned long this_len = cookies[i].cookie_size;
unsigned long actual_len;
if (unlikely(offset)) {
unsigned long this_off = offset;
if (this_off > this_len)
this_off = this_len;
offset -= this_off;
this_len -= this_off;
if (!this_len)
continue;
cookie_raddr += this_off;
}
if (this_len > len)
this_len = len;
while (1) {
unsigned long hv_err;
hv_err = sun4v_ldc_copy(lp->id, copy_dir,
cookie_raddr, ra,
this_len, &actual_len);
if (unlikely(hv_err)) {
printk(KERN_ERR PFX "ldc_copy: ID[%lu] "
"HV error %lu\n",
lp->id, hv_err);
if (lp->hs_state != LDC_HS_COMPLETE ||
(lp->flags & LDC_FLAG_RESET))
return -ECONNRESET;
else
return -EFAULT;
}
cookie_raddr += actual_len;
ra += actual_len;
len -= actual_len;
if (actual_len == this_len)
break;
this_len -= actual_len;
}
if (!len)
break;
}
/* It is caller policy what to do about short copies.
* For example, a networking driver can declare the
* packet a runt and drop it.
*/
return orig_len - len;
}
EXPORT_SYMBOL(ldc_copy);
void *ldc_alloc_exp_dring(struct ldc_channel *lp, unsigned int len,
struct ldc_trans_cookie *cookies, int *ncookies,
unsigned int map_perm)
{
void *buf;
int err;
if (len & (8UL - 1))
return ERR_PTR(-EINVAL);
buf = kzalloc(len, GFP_ATOMIC);
if (!buf)
return ERR_PTR(-ENOMEM);
err = ldc_map_single(lp, buf, len, cookies, *ncookies, map_perm);
if (err < 0) {
kfree(buf);
return ERR_PTR(err);
}
*ncookies = err;
return buf;
}
EXPORT_SYMBOL(ldc_alloc_exp_dring);
void ldc_free_exp_dring(struct ldc_channel *lp, void *buf, unsigned int len,
struct ldc_trans_cookie *cookies, int ncookies)
{
ldc_unmap(lp, cookies, ncookies);
kfree(buf);
}
EXPORT_SYMBOL(ldc_free_exp_dring);
static int __init ldc_init(void)
{
unsigned long major, minor;
struct mdesc_handle *hp;
const u64 *v;
int err;
u64 mp;
hp = mdesc_grab();
if (!hp)
return -ENODEV;
mp = mdesc_node_by_name(hp, MDESC_NODE_NULL, "platform");
err = -ENODEV;
if (mp == MDESC_NODE_NULL)
goto out;
v = mdesc_get_property(hp, mp, "domaining-enabled", NULL);
if (!v)
goto out;
major = 1;
minor = 0;
if (sun4v_hvapi_register(HV_GRP_LDOM, major, &minor)) {
printk(KERN_INFO PFX "Could not register LDOM hvapi.\n");
goto out;
}
printk(KERN_INFO "%s", version);
if (!*v) {
printk(KERN_INFO PFX "Domaining disabled.\n");
goto out;
}
ldom_domaining_enabled = 1;
err = 0;
out:
mdesc_release(hp);
return err;
}
core_initcall(ldc_init);
| linux-master | arch/sparc/kernel/ldc.c |
// SPDX-License-Identifier: GPL-2.0
/* viohs.c: LDOM Virtual I/O handshake helper layer.
*
* Copyright (C) 2007 David S. Miller <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/sched/clock.h>
#include <linux/slab.h>
#include <asm/ldc.h>
#include <asm/vio.h>
int vio_ldc_send(struct vio_driver_state *vio, void *data, int len)
{
int err, limit = 1000;
err = -EINVAL;
while (limit-- > 0) {
err = ldc_write(vio->lp, data, len);
if (!err || (err != -EAGAIN))
break;
udelay(1);
}
return err;
}
EXPORT_SYMBOL(vio_ldc_send);
static int send_ctrl(struct vio_driver_state *vio,
struct vio_msg_tag *tag, int len)
{
tag->sid = vio_send_sid(vio);
return vio_ldc_send(vio, tag, len);
}
static void init_tag(struct vio_msg_tag *tag, u8 type, u8 stype, u16 stype_env)
{
tag->type = type;
tag->stype = stype;
tag->stype_env = stype_env;
}
static int send_version(struct vio_driver_state *vio, u16 major, u16 minor)
{
struct vio_ver_info pkt;
vio->_local_sid = (u32) sched_clock();
memset(&pkt, 0, sizeof(pkt));
init_tag(&pkt.tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, VIO_VER_INFO);
pkt.major = major;
pkt.minor = minor;
pkt.dev_class = vio->dev_class;
viodbg(HS, "SEND VERSION INFO maj[%u] min[%u] devclass[%u]\n",
major, minor, vio->dev_class);
return send_ctrl(vio, &pkt.tag, sizeof(pkt));
}
static int start_handshake(struct vio_driver_state *vio)
{
int err;
viodbg(HS, "START HANDSHAKE\n");
vio->hs_state = VIO_HS_INVALID;
err = send_version(vio,
vio->ver_table[0].major,
vio->ver_table[0].minor);
if (err < 0)
return err;
return 0;
}
static void flush_rx_dring(struct vio_driver_state *vio)
{
struct vio_dring_state *dr;
u64 ident;
BUG_ON(!(vio->dr_state & VIO_DR_STATE_RXREG));
dr = &vio->drings[VIO_DRIVER_RX_RING];
ident = dr->ident;
BUG_ON(!vio->desc_buf);
kfree(vio->desc_buf);
vio->desc_buf = NULL;
memset(dr, 0, sizeof(*dr));
dr->ident = ident;
}
void vio_link_state_change(struct vio_driver_state *vio, int event)
{
if (event == LDC_EVENT_UP) {
vio->hs_state = VIO_HS_INVALID;
switch (vio->dev_class) {
case VDEV_NETWORK:
case VDEV_NETWORK_SWITCH:
vio->dr_state = (VIO_DR_STATE_TXREQ |
VIO_DR_STATE_RXREQ);
break;
case VDEV_DISK:
vio->dr_state = VIO_DR_STATE_TXREQ;
break;
case VDEV_DISK_SERVER:
vio->dr_state = VIO_DR_STATE_RXREQ;
break;
}
start_handshake(vio);
} else if (event == LDC_EVENT_RESET) {
vio->hs_state = VIO_HS_INVALID;
if (vio->dr_state & VIO_DR_STATE_RXREG)
flush_rx_dring(vio);
vio->dr_state = 0x00;
memset(&vio->ver, 0, sizeof(vio->ver));
ldc_disconnect(vio->lp);
}
}
EXPORT_SYMBOL(vio_link_state_change);
static int handshake_failure(struct vio_driver_state *vio)
{
struct vio_dring_state *dr;
/* XXX Put policy here... Perhaps start a timer to fire
* XXX in 100 ms, which will bring the link up and retry
* XXX the handshake.
*/
viodbg(HS, "HANDSHAKE FAILURE\n");
vio->dr_state &= ~(VIO_DR_STATE_TXREG |
VIO_DR_STATE_RXREG);
dr = &vio->drings[VIO_DRIVER_RX_RING];
memset(dr, 0, sizeof(*dr));
kfree(vio->desc_buf);
vio->desc_buf = NULL;
vio->desc_buf_len = 0;
vio->hs_state = VIO_HS_INVALID;
return -ECONNRESET;
}
static int process_unknown(struct vio_driver_state *vio, void *arg)
{
struct vio_msg_tag *pkt = arg;
viodbg(HS, "UNKNOWN CONTROL [%02x:%02x:%04x:%08x]\n",
pkt->type, pkt->stype, pkt->stype_env, pkt->sid);
printk(KERN_ERR "vio: ID[%lu] Resetting connection.\n",
vio->vdev->channel_id);
ldc_disconnect(vio->lp);
return -ECONNRESET;
}
static int send_dreg(struct vio_driver_state *vio)
{
struct vio_dring_state *dr = &vio->drings[VIO_DRIVER_TX_RING];
union {
struct vio_dring_register pkt;
char all[sizeof(struct vio_dring_register) +
(sizeof(struct ldc_trans_cookie) *
VIO_MAX_RING_COOKIES)];
} u;
size_t bytes = sizeof(struct vio_dring_register) +
(sizeof(struct ldc_trans_cookie) *
dr->ncookies);
int i;
if (WARN_ON(bytes > sizeof(u)))
return -EINVAL;
memset(&u, 0, bytes);
init_tag(&u.pkt.tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, VIO_DRING_REG);
u.pkt.dring_ident = 0;
u.pkt.num_descr = dr->num_entries;
u.pkt.descr_size = dr->entry_size;
u.pkt.options = VIO_TX_DRING;
u.pkt.num_cookies = dr->ncookies;
viodbg(HS, "SEND DRING_REG INFO ndesc[%u] dsz[%u] opt[0x%x] "
"ncookies[%u]\n",
u.pkt.num_descr, u.pkt.descr_size, u.pkt.options,
u.pkt.num_cookies);
for (i = 0; i < dr->ncookies; i++) {
u.pkt.cookies[i] = dr->cookies[i];
viodbg(HS, "DRING COOKIE(%d) [%016llx:%016llx]\n",
i,
(unsigned long long) u.pkt.cookies[i].cookie_addr,
(unsigned long long) u.pkt.cookies[i].cookie_size);
}
return send_ctrl(vio, &u.pkt.tag, bytes);
}
static int send_rdx(struct vio_driver_state *vio)
{
struct vio_rdx pkt;
memset(&pkt, 0, sizeof(pkt));
init_tag(&pkt.tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, VIO_RDX);
viodbg(HS, "SEND RDX INFO\n");
return send_ctrl(vio, &pkt.tag, sizeof(pkt));
}
static int send_attr(struct vio_driver_state *vio)
{
if (!vio->ops)
return -EINVAL;
return vio->ops->send_attr(vio);
}
static struct vio_version *find_by_major(struct vio_driver_state *vio,
u16 major)
{
struct vio_version *ret = NULL;
int i;
for (i = 0; i < vio->ver_table_entries; i++) {
struct vio_version *v = &vio->ver_table[i];
if (v->major <= major) {
ret = v;
break;
}
}
return ret;
}
static int process_ver_info(struct vio_driver_state *vio,
struct vio_ver_info *pkt)
{
struct vio_version *vap;
int err;
viodbg(HS, "GOT VERSION INFO maj[%u] min[%u] devclass[%u]\n",
pkt->major, pkt->minor, pkt->dev_class);
if (vio->hs_state != VIO_HS_INVALID) {
/* XXX Perhaps invoke start_handshake? XXX */
memset(&vio->ver, 0, sizeof(vio->ver));
vio->hs_state = VIO_HS_INVALID;
}
vap = find_by_major(vio, pkt->major);
vio->_peer_sid = pkt->tag.sid;
if (!vap) {
pkt->tag.stype = VIO_SUBTYPE_NACK;
pkt->major = 0;
pkt->minor = 0;
viodbg(HS, "SEND VERSION NACK maj[0] min[0]\n");
err = send_ctrl(vio, &pkt->tag, sizeof(*pkt));
} else if (vap->major != pkt->major) {
pkt->tag.stype = VIO_SUBTYPE_NACK;
pkt->major = vap->major;
pkt->minor = vap->minor;
viodbg(HS, "SEND VERSION NACK maj[%u] min[%u]\n",
pkt->major, pkt->minor);
err = send_ctrl(vio, &pkt->tag, sizeof(*pkt));
} else {
struct vio_version ver = {
.major = pkt->major,
.minor = pkt->minor,
};
if (ver.minor > vap->minor)
ver.minor = vap->minor;
pkt->minor = ver.minor;
pkt->tag.stype = VIO_SUBTYPE_ACK;
pkt->dev_class = vio->dev_class;
viodbg(HS, "SEND VERSION ACK maj[%u] min[%u]\n",
pkt->major, pkt->minor);
err = send_ctrl(vio, &pkt->tag, sizeof(*pkt));
if (err > 0) {
vio->ver = ver;
vio->hs_state = VIO_HS_GOTVERS;
}
}
if (err < 0)
return handshake_failure(vio);
return 0;
}
static int process_ver_ack(struct vio_driver_state *vio,
struct vio_ver_info *pkt)
{
viodbg(HS, "GOT VERSION ACK maj[%u] min[%u] devclass[%u]\n",
pkt->major, pkt->minor, pkt->dev_class);
if (vio->hs_state & VIO_HS_GOTVERS) {
if (vio->ver.major != pkt->major ||
vio->ver.minor != pkt->minor) {
pkt->tag.stype = VIO_SUBTYPE_NACK;
(void) send_ctrl(vio, &pkt->tag, sizeof(*pkt));
return handshake_failure(vio);
}
} else {
vio->ver.major = pkt->major;
vio->ver.minor = pkt->minor;
vio->hs_state = VIO_HS_GOTVERS;
}
switch (vio->dev_class) {
case VDEV_NETWORK:
case VDEV_DISK:
if (send_attr(vio) < 0)
return handshake_failure(vio);
break;
default:
break;
}
return 0;
}
static int process_ver_nack(struct vio_driver_state *vio,
struct vio_ver_info *pkt)
{
struct vio_version *nver;
viodbg(HS, "GOT VERSION NACK maj[%u] min[%u] devclass[%u]\n",
pkt->major, pkt->minor, pkt->dev_class);
if (pkt->major == 0 && pkt->minor == 0)
return handshake_failure(vio);
nver = find_by_major(vio, pkt->major);
if (!nver)
return handshake_failure(vio);
if (send_version(vio, nver->major, nver->minor) < 0)
return handshake_failure(vio);
return 0;
}
static int process_ver(struct vio_driver_state *vio, struct vio_ver_info *pkt)
{
switch (pkt->tag.stype) {
case VIO_SUBTYPE_INFO:
return process_ver_info(vio, pkt);
case VIO_SUBTYPE_ACK:
return process_ver_ack(vio, pkt);
case VIO_SUBTYPE_NACK:
return process_ver_nack(vio, pkt);
default:
return handshake_failure(vio);
}
}
static int process_attr(struct vio_driver_state *vio, void *pkt)
{
int err;
if (!(vio->hs_state & VIO_HS_GOTVERS))
return handshake_failure(vio);
if (!vio->ops)
return 0;
err = vio->ops->handle_attr(vio, pkt);
if (err < 0) {
return handshake_failure(vio);
} else {
vio->hs_state |= VIO_HS_GOT_ATTR;
if ((vio->dr_state & VIO_DR_STATE_TXREQ) &&
!(vio->hs_state & VIO_HS_SENT_DREG)) {
if (send_dreg(vio) < 0)
return handshake_failure(vio);
vio->hs_state |= VIO_HS_SENT_DREG;
}
}
return 0;
}
static int all_drings_registered(struct vio_driver_state *vio)
{
int need_rx, need_tx;
need_rx = (vio->dr_state & VIO_DR_STATE_RXREQ);
need_tx = (vio->dr_state & VIO_DR_STATE_TXREQ);
if (need_rx &&
!(vio->dr_state & VIO_DR_STATE_RXREG))
return 0;
if (need_tx &&
!(vio->dr_state & VIO_DR_STATE_TXREG))
return 0;
return 1;
}
static int process_dreg_info(struct vio_driver_state *vio,
struct vio_dring_register *pkt)
{
struct vio_dring_state *dr;
int i;
viodbg(HS, "GOT DRING_REG INFO ident[%llx] "
"ndesc[%u] dsz[%u] opt[0x%x] ncookies[%u]\n",
(unsigned long long) pkt->dring_ident,
pkt->num_descr, pkt->descr_size, pkt->options,
pkt->num_cookies);
if (!(vio->dr_state & VIO_DR_STATE_RXREQ))
goto send_nack;
if (vio->dr_state & VIO_DR_STATE_RXREG)
goto send_nack;
/* v1.6 and higher, ACK with desired, supported mode, or NACK */
if (vio_version_after_eq(vio, 1, 6)) {
if (!(pkt->options & VIO_TX_DRING))
goto send_nack;
pkt->options = VIO_TX_DRING;
}
BUG_ON(vio->desc_buf);
vio->desc_buf = kzalloc(pkt->descr_size, GFP_ATOMIC);
if (!vio->desc_buf)
goto send_nack;
vio->desc_buf_len = pkt->descr_size;
dr = &vio->drings[VIO_DRIVER_RX_RING];
dr->num_entries = pkt->num_descr;
dr->entry_size = pkt->descr_size;
dr->ncookies = pkt->num_cookies;
for (i = 0; i < dr->ncookies; i++) {
dr->cookies[i] = pkt->cookies[i];
viodbg(HS, "DRING COOKIE(%d) [%016llx:%016llx]\n",
i,
(unsigned long long)
pkt->cookies[i].cookie_addr,
(unsigned long long)
pkt->cookies[i].cookie_size);
}
pkt->tag.stype = VIO_SUBTYPE_ACK;
pkt->dring_ident = ++dr->ident;
viodbg(HS, "SEND DRING_REG ACK ident[%llx] "
"ndesc[%u] dsz[%u] opt[0x%x] ncookies[%u]\n",
(unsigned long long) pkt->dring_ident,
pkt->num_descr, pkt->descr_size, pkt->options,
pkt->num_cookies);
if (send_ctrl(vio, &pkt->tag, struct_size(pkt, cookies, dr->ncookies)) < 0)
goto send_nack;
vio->dr_state |= VIO_DR_STATE_RXREG;
return 0;
send_nack:
pkt->tag.stype = VIO_SUBTYPE_NACK;
viodbg(HS, "SEND DRING_REG NACK\n");
(void) send_ctrl(vio, &pkt->tag, sizeof(*pkt));
return handshake_failure(vio);
}
static int process_dreg_ack(struct vio_driver_state *vio,
struct vio_dring_register *pkt)
{
struct vio_dring_state *dr;
viodbg(HS, "GOT DRING_REG ACK ident[%llx] "
"ndesc[%u] dsz[%u] opt[0x%x] ncookies[%u]\n",
(unsigned long long) pkt->dring_ident,
pkt->num_descr, pkt->descr_size, pkt->options,
pkt->num_cookies);
dr = &vio->drings[VIO_DRIVER_TX_RING];
if (!(vio->dr_state & VIO_DR_STATE_TXREQ))
return handshake_failure(vio);
dr->ident = pkt->dring_ident;
vio->dr_state |= VIO_DR_STATE_TXREG;
if (all_drings_registered(vio)) {
if (send_rdx(vio) < 0)
return handshake_failure(vio);
vio->hs_state = VIO_HS_SENT_RDX;
}
return 0;
}
static int process_dreg_nack(struct vio_driver_state *vio,
struct vio_dring_register *pkt)
{
viodbg(HS, "GOT DRING_REG NACK ident[%llx] "
"ndesc[%u] dsz[%u] opt[0x%x] ncookies[%u]\n",
(unsigned long long) pkt->dring_ident,
pkt->num_descr, pkt->descr_size, pkt->options,
pkt->num_cookies);
return handshake_failure(vio);
}
static int process_dreg(struct vio_driver_state *vio,
struct vio_dring_register *pkt)
{
if (!(vio->hs_state & VIO_HS_GOTVERS))
return handshake_failure(vio);
switch (pkt->tag.stype) {
case VIO_SUBTYPE_INFO:
return process_dreg_info(vio, pkt);
case VIO_SUBTYPE_ACK:
return process_dreg_ack(vio, pkt);
case VIO_SUBTYPE_NACK:
return process_dreg_nack(vio, pkt);
default:
return handshake_failure(vio);
}
}
static int process_dunreg(struct vio_driver_state *vio,
struct vio_dring_unregister *pkt)
{
struct vio_dring_state *dr = &vio->drings[VIO_DRIVER_RX_RING];
viodbg(HS, "GOT DRING_UNREG\n");
if (pkt->dring_ident != dr->ident)
return 0;
vio->dr_state &= ~VIO_DR_STATE_RXREG;
memset(dr, 0, sizeof(*dr));
kfree(vio->desc_buf);
vio->desc_buf = NULL;
vio->desc_buf_len = 0;
return 0;
}
static int process_rdx_info(struct vio_driver_state *vio, struct vio_rdx *pkt)
{
viodbg(HS, "GOT RDX INFO\n");
pkt->tag.stype = VIO_SUBTYPE_ACK;
viodbg(HS, "SEND RDX ACK\n");
if (send_ctrl(vio, &pkt->tag, sizeof(*pkt)) < 0)
return handshake_failure(vio);
vio->hs_state |= VIO_HS_SENT_RDX_ACK;
return 0;
}
static int process_rdx_ack(struct vio_driver_state *vio, struct vio_rdx *pkt)
{
viodbg(HS, "GOT RDX ACK\n");
if (!(vio->hs_state & VIO_HS_SENT_RDX))
return handshake_failure(vio);
vio->hs_state |= VIO_HS_GOT_RDX_ACK;
return 0;
}
static int process_rdx_nack(struct vio_driver_state *vio, struct vio_rdx *pkt)
{
viodbg(HS, "GOT RDX NACK\n");
return handshake_failure(vio);
}
static int process_rdx(struct vio_driver_state *vio, struct vio_rdx *pkt)
{
if (!all_drings_registered(vio))
handshake_failure(vio);
switch (pkt->tag.stype) {
case VIO_SUBTYPE_INFO:
return process_rdx_info(vio, pkt);
case VIO_SUBTYPE_ACK:
return process_rdx_ack(vio, pkt);
case VIO_SUBTYPE_NACK:
return process_rdx_nack(vio, pkt);
default:
return handshake_failure(vio);
}
}
int vio_control_pkt_engine(struct vio_driver_state *vio, void *pkt)
{
struct vio_msg_tag *tag = pkt;
u8 prev_state = vio->hs_state;
int err;
switch (tag->stype_env) {
case VIO_VER_INFO:
err = process_ver(vio, pkt);
break;
case VIO_ATTR_INFO:
err = process_attr(vio, pkt);
break;
case VIO_DRING_REG:
err = process_dreg(vio, pkt);
break;
case VIO_DRING_UNREG:
err = process_dunreg(vio, pkt);
break;
case VIO_RDX:
err = process_rdx(vio, pkt);
break;
default:
err = process_unknown(vio, pkt);
break;
}
if (!err &&
vio->hs_state != prev_state &&
(vio->hs_state & VIO_HS_COMPLETE)) {
if (vio->ops)
vio->ops->handshake_complete(vio);
}
return err;
}
EXPORT_SYMBOL(vio_control_pkt_engine);
void vio_conn_reset(struct vio_driver_state *vio)
{
}
EXPORT_SYMBOL(vio_conn_reset);
/* The issue is that the Solaris virtual disk server just mirrors the
* SID values it gets from the client peer. So we work around that
* here in vio_{validate,send}_sid() so that the drivers don't need
* to be aware of this crap.
*/
int vio_validate_sid(struct vio_driver_state *vio, struct vio_msg_tag *tp)
{
u32 sid;
/* Always let VERSION+INFO packets through unchecked, they
* define the new SID.
*/
if (tp->type == VIO_TYPE_CTRL &&
tp->stype == VIO_SUBTYPE_INFO &&
tp->stype_env == VIO_VER_INFO)
return 0;
/* Ok, now figure out which SID to use. */
switch (vio->dev_class) {
case VDEV_NETWORK:
case VDEV_NETWORK_SWITCH:
case VDEV_DISK_SERVER:
default:
sid = vio->_peer_sid;
break;
case VDEV_DISK:
sid = vio->_local_sid;
break;
}
if (sid == tp->sid)
return 0;
viodbg(DATA, "BAD SID tag->sid[%08x] peer_sid[%08x] local_sid[%08x]\n",
tp->sid, vio->_peer_sid, vio->_local_sid);
return -EINVAL;
}
EXPORT_SYMBOL(vio_validate_sid);
u32 vio_send_sid(struct vio_driver_state *vio)
{
switch (vio->dev_class) {
case VDEV_NETWORK:
case VDEV_NETWORK_SWITCH:
case VDEV_DISK:
default:
return vio->_local_sid;
case VDEV_DISK_SERVER:
return vio->_peer_sid;
}
}
EXPORT_SYMBOL(vio_send_sid);
int vio_ldc_alloc(struct vio_driver_state *vio,
struct ldc_channel_config *base_cfg,
void *event_arg)
{
struct ldc_channel_config cfg = *base_cfg;
struct ldc_channel *lp;
cfg.tx_irq = vio->vdev->tx_irq;
cfg.rx_irq = vio->vdev->rx_irq;
lp = ldc_alloc(vio->vdev->channel_id, &cfg, event_arg, vio->name);
if (IS_ERR(lp))
return PTR_ERR(lp);
vio->lp = lp;
return 0;
}
EXPORT_SYMBOL(vio_ldc_alloc);
void vio_ldc_free(struct vio_driver_state *vio)
{
ldc_free(vio->lp);
vio->lp = NULL;
kfree(vio->desc_buf);
vio->desc_buf = NULL;
vio->desc_buf_len = 0;
}
EXPORT_SYMBOL(vio_ldc_free);
void vio_port_up(struct vio_driver_state *vio)
{
unsigned long flags;
int err, state;
spin_lock_irqsave(&vio->lock, flags);
state = ldc_state(vio->lp);
err = 0;
if (state == LDC_STATE_INIT) {
err = ldc_bind(vio->lp);
if (err)
printk(KERN_WARNING "%s: Port %lu bind failed, "
"err=%d\n",
vio->name, vio->vdev->channel_id, err);
}
if (!err) {
if (ldc_mode(vio->lp) == LDC_MODE_RAW)
ldc_set_state(vio->lp, LDC_STATE_CONNECTED);
else
err = ldc_connect(vio->lp);
if (err)
printk(KERN_WARNING "%s: Port %lu connect failed, "
"err=%d\n",
vio->name, vio->vdev->channel_id, err);
}
if (err) {
unsigned long expires = jiffies + HZ;
expires = round_jiffies(expires);
mod_timer(&vio->timer, expires);
}
spin_unlock_irqrestore(&vio->lock, flags);
}
EXPORT_SYMBOL(vio_port_up);
static void vio_port_timer(struct timer_list *t)
{
struct vio_driver_state *vio = from_timer(vio, t, timer);
vio_port_up(vio);
}
int vio_driver_init(struct vio_driver_state *vio, struct vio_dev *vdev,
u8 dev_class, struct vio_version *ver_table,
int ver_table_size, struct vio_driver_ops *ops,
char *name)
{
switch (dev_class) {
case VDEV_NETWORK:
case VDEV_NETWORK_SWITCH:
case VDEV_DISK:
case VDEV_DISK_SERVER:
case VDEV_CONSOLE_CON:
break;
default:
return -EINVAL;
}
if (dev_class == VDEV_NETWORK ||
dev_class == VDEV_NETWORK_SWITCH ||
dev_class == VDEV_DISK ||
dev_class == VDEV_DISK_SERVER) {
if (!ops || !ops->send_attr || !ops->handle_attr ||
!ops->handshake_complete)
return -EINVAL;
}
if (!ver_table || ver_table_size < 0)
return -EINVAL;
if (!name)
return -EINVAL;
spin_lock_init(&vio->lock);
vio->name = name;
vio->dev_class = dev_class;
vio->vdev = vdev;
vio->ver_table = ver_table;
vio->ver_table_entries = ver_table_size;
vio->ops = ops;
timer_setup(&vio->timer, vio_port_timer, 0);
return 0;
}
EXPORT_SYMBOL(vio_driver_init);
| linux-master | arch/sparc/kernel/viohs.c |
// SPDX-License-Identifier: GPL-2.0
/* arch/sparc64/kernel/process.c
*
* Copyright (C) 1995, 1996, 2008 David S. Miller ([email protected])
* Copyright (C) 1996 Eddie C. Dost ([email protected])
* Copyright (C) 1997, 1998 Jakub Jelinek ([email protected])
*/
/*
* This file handles the architecture-dependent parts of process handling..
*/
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/sched/task.h>
#include <linux/sched/task_stack.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/smp.h>
#include <linux/stddef.h>
#include <linux/ptrace.h>
#include <linux/slab.h>
#include <linux/user.h>
#include <linux/delay.h>
#include <linux/compat.h>
#include <linux/tick.h>
#include <linux/init.h>
#include <linux/cpu.h>
#include <linux/perf_event.h>
#include <linux/elfcore.h>
#include <linux/sysrq.h>
#include <linux/nmi.h>
#include <linux/context_tracking.h>
#include <linux/signal.h>
#include <linux/uaccess.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/processor.h>
#include <asm/pstate.h>
#include <asm/elf.h>
#include <asm/fpumacro.h>
#include <asm/head.h>
#include <asm/cpudata.h>
#include <asm/mmu_context.h>
#include <asm/unistd.h>
#include <asm/hypervisor.h>
#include <asm/syscalls.h>
#include <asm/irq_regs.h>
#include <asm/smp.h>
#include <asm/pcr.h>
#include "kstack.h"
/* Idle loop support on sparc64. */
void arch_cpu_idle(void)
{
if (tlb_type != hypervisor) {
touch_nmi_watchdog();
} else {
unsigned long pstate;
raw_local_irq_enable();
/* The sun4v sleeping code requires that we have PSTATE.IE cleared over
* the cpu sleep hypervisor call.
*/
__asm__ __volatile__(
"rdpr %%pstate, %0\n\t"
"andn %0, %1, %0\n\t"
"wrpr %0, %%g0, %%pstate"
: "=&r" (pstate)
: "i" (PSTATE_IE));
if (!need_resched() && !cpu_is_offline(smp_processor_id())) {
sun4v_cpu_yield();
/* If resumed by cpu_poke then we need to explicitly
* call scheduler_ipi().
*/
scheduler_poke();
}
/* Re-enable interrupts. */
__asm__ __volatile__(
"rdpr %%pstate, %0\n\t"
"or %0, %1, %0\n\t"
"wrpr %0, %%g0, %%pstate"
: "=&r" (pstate)
: "i" (PSTATE_IE));
raw_local_irq_disable();
}
}
#ifdef CONFIG_HOTPLUG_CPU
void __noreturn arch_cpu_idle_dead(void)
{
sched_preempt_enable_no_resched();
cpu_play_dead();
}
#endif
#ifdef CONFIG_COMPAT
static void show_regwindow32(struct pt_regs *regs)
{
struct reg_window32 __user *rw;
struct reg_window32 r_w;
__asm__ __volatile__ ("flushw");
rw = compat_ptr((unsigned int)regs->u_regs[14]);
if (copy_from_user (&r_w, rw, sizeof(r_w))) {
return;
}
printk("l0: %08x l1: %08x l2: %08x l3: %08x "
"l4: %08x l5: %08x l6: %08x l7: %08x\n",
r_w.locals[0], r_w.locals[1], r_w.locals[2], r_w.locals[3],
r_w.locals[4], r_w.locals[5], r_w.locals[6], r_w.locals[7]);
printk("i0: %08x i1: %08x i2: %08x i3: %08x "
"i4: %08x i5: %08x i6: %08x i7: %08x\n",
r_w.ins[0], r_w.ins[1], r_w.ins[2], r_w.ins[3],
r_w.ins[4], r_w.ins[5], r_w.ins[6], r_w.ins[7]);
}
#else
#define show_regwindow32(regs) do { } while (0)
#endif
static void show_regwindow(struct pt_regs *regs)
{
struct reg_window __user *rw;
struct reg_window *rwk;
struct reg_window r_w;
if ((regs->tstate & TSTATE_PRIV) || !(test_thread_flag(TIF_32BIT))) {
__asm__ __volatile__ ("flushw");
rw = (struct reg_window __user *)
(regs->u_regs[14] + STACK_BIAS);
rwk = (struct reg_window *)
(regs->u_regs[14] + STACK_BIAS);
if (!(regs->tstate & TSTATE_PRIV)) {
if (copy_from_user (&r_w, rw, sizeof(r_w))) {
return;
}
rwk = &r_w;
}
} else {
show_regwindow32(regs);
return;
}
printk("l0: %016lx l1: %016lx l2: %016lx l3: %016lx\n",
rwk->locals[0], rwk->locals[1], rwk->locals[2], rwk->locals[3]);
printk("l4: %016lx l5: %016lx l6: %016lx l7: %016lx\n",
rwk->locals[4], rwk->locals[5], rwk->locals[6], rwk->locals[7]);
printk("i0: %016lx i1: %016lx i2: %016lx i3: %016lx\n",
rwk->ins[0], rwk->ins[1], rwk->ins[2], rwk->ins[3]);
printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
if (regs->tstate & TSTATE_PRIV)
printk("I7: <%pS>\n", (void *) rwk->ins[7]);
}
void show_regs(struct pt_regs *regs)
{
show_regs_print_info(KERN_DEFAULT);
printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
regs->tpc, regs->tnpc, regs->y, print_tainted());
printk("TPC: <%pS>\n", (void *) regs->tpc);
printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
regs->u_regs[3]);
printk("g4: %016lx g5: %016lx g6: %016lx g7: %016lx\n",
regs->u_regs[4], regs->u_regs[5], regs->u_regs[6],
regs->u_regs[7]);
printk("o0: %016lx o1: %016lx o2: %016lx o3: %016lx\n",
regs->u_regs[8], regs->u_regs[9], regs->u_regs[10],
regs->u_regs[11]);
printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
regs->u_regs[15]);
printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
show_regwindow(regs);
show_stack(current, (unsigned long *)regs->u_regs[UREG_FP], KERN_DEFAULT);
}
union global_cpu_snapshot global_cpu_snapshot[NR_CPUS];
static DEFINE_SPINLOCK(global_cpu_snapshot_lock);
static void __global_reg_self(struct thread_info *tp, struct pt_regs *regs,
int this_cpu)
{
struct global_reg_snapshot *rp;
flushw_all();
rp = &global_cpu_snapshot[this_cpu].reg;
rp->tstate = regs->tstate;
rp->tpc = regs->tpc;
rp->tnpc = regs->tnpc;
rp->o7 = regs->u_regs[UREG_I7];
if (regs->tstate & TSTATE_PRIV) {
struct reg_window *rw;
rw = (struct reg_window *)
(regs->u_regs[UREG_FP] + STACK_BIAS);
if (kstack_valid(tp, (unsigned long) rw)) {
rp->i7 = rw->ins[7];
rw = (struct reg_window *)
(rw->ins[6] + STACK_BIAS);
if (kstack_valid(tp, (unsigned long) rw))
rp->rpc = rw->ins[7];
}
} else {
rp->i7 = 0;
rp->rpc = 0;
}
rp->thread = tp;
}
/* In order to avoid hangs we do not try to synchronize with the
* global register dump client cpus. The last store they make is to
* the thread pointer, so do a short poll waiting for that to become
* non-NULL.
*/
static void __global_reg_poll(struct global_reg_snapshot *gp)
{
int limit = 0;
while (!gp->thread && ++limit < 100) {
barrier();
udelay(1);
}
}
void arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu)
{
struct thread_info *tp = current_thread_info();
struct pt_regs *regs = get_irq_regs();
unsigned long flags;
int this_cpu, cpu;
if (!regs)
regs = tp->kregs;
spin_lock_irqsave(&global_cpu_snapshot_lock, flags);
this_cpu = raw_smp_processor_id();
memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
if (cpumask_test_cpu(this_cpu, mask) && this_cpu != exclude_cpu)
__global_reg_self(tp, regs, this_cpu);
smp_fetch_global_regs();
for_each_cpu(cpu, mask) {
struct global_reg_snapshot *gp;
if (cpu == exclude_cpu)
continue;
gp = &global_cpu_snapshot[cpu].reg;
__global_reg_poll(gp);
tp = gp->thread;
printk("%c CPU[%3d]: TSTATE[%016lx] TPC[%016lx] TNPC[%016lx] TASK[%s:%d]\n",
(cpu == this_cpu ? '*' : ' '), cpu,
gp->tstate, gp->tpc, gp->tnpc,
((tp && tp->task) ? tp->task->comm : "NULL"),
((tp && tp->task) ? tp->task->pid : -1));
if (gp->tstate & TSTATE_PRIV) {
printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
(void *) gp->tpc,
(void *) gp->o7,
(void *) gp->i7,
(void *) gp->rpc);
} else {
printk(" TPC[%lx] O7[%lx] I7[%lx] RPC[%lx]\n",
gp->tpc, gp->o7, gp->i7, gp->rpc);
}
touch_nmi_watchdog();
}
memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
spin_unlock_irqrestore(&global_cpu_snapshot_lock, flags);
}
#ifdef CONFIG_MAGIC_SYSRQ
static void sysrq_handle_globreg(u8 key)
{
trigger_all_cpu_backtrace();
}
static const struct sysrq_key_op sparc_globalreg_op = {
.handler = sysrq_handle_globreg,
.help_msg = "global-regs(y)",
.action_msg = "Show Global CPU Regs",
};
static void __global_pmu_self(int this_cpu)
{
struct global_pmu_snapshot *pp;
int i, num;
if (!pcr_ops)
return;
pp = &global_cpu_snapshot[this_cpu].pmu;
num = 1;
if (tlb_type == hypervisor &&
sun4v_chip_type >= SUN4V_CHIP_NIAGARA4)
num = 4;
for (i = 0; i < num; i++) {
pp->pcr[i] = pcr_ops->read_pcr(i);
pp->pic[i] = pcr_ops->read_pic(i);
}
}
static void __global_pmu_poll(struct global_pmu_snapshot *pp)
{
int limit = 0;
while (!pp->pcr[0] && ++limit < 100) {
barrier();
udelay(1);
}
}
static void pmu_snapshot_all_cpus(void)
{
unsigned long flags;
int this_cpu, cpu;
spin_lock_irqsave(&global_cpu_snapshot_lock, flags);
memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
this_cpu = raw_smp_processor_id();
__global_pmu_self(this_cpu);
smp_fetch_global_pmu();
for_each_online_cpu(cpu) {
struct global_pmu_snapshot *pp = &global_cpu_snapshot[cpu].pmu;
__global_pmu_poll(pp);
printk("%c CPU[%3d]: PCR[%08lx:%08lx:%08lx:%08lx] PIC[%08lx:%08lx:%08lx:%08lx]\n",
(cpu == this_cpu ? '*' : ' '), cpu,
pp->pcr[0], pp->pcr[1], pp->pcr[2], pp->pcr[3],
pp->pic[0], pp->pic[1], pp->pic[2], pp->pic[3]);
touch_nmi_watchdog();
}
memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
spin_unlock_irqrestore(&global_cpu_snapshot_lock, flags);
}
static void sysrq_handle_globpmu(u8 key)
{
pmu_snapshot_all_cpus();
}
static const struct sysrq_key_op sparc_globalpmu_op = {
.handler = sysrq_handle_globpmu,
.help_msg = "global-pmu(x)",
.action_msg = "Show Global PMU Regs",
};
static int __init sparc_sysrq_init(void)
{
int ret = register_sysrq_key('y', &sparc_globalreg_op);
if (!ret)
ret = register_sysrq_key('x', &sparc_globalpmu_op);
return ret;
}
core_initcall(sparc_sysrq_init);
#endif
/* Free current thread data structures etc.. */
void exit_thread(struct task_struct *tsk)
{
struct thread_info *t = task_thread_info(tsk);
if (t->utraps) {
if (t->utraps[0] < 2)
kfree (t->utraps);
else
t->utraps[0]--;
}
}
void flush_thread(void)
{
struct thread_info *t = current_thread_info();
struct mm_struct *mm;
mm = t->task->mm;
if (mm)
tsb_context_switch(mm);
set_thread_wsaved(0);
/* Clear FPU register state. */
t->fpsaved[0] = 0;
}
/* It's a bit more tricky when 64-bit tasks are involved... */
static unsigned long clone_stackframe(unsigned long csp, unsigned long psp)
{
bool stack_64bit = test_thread_64bit_stack(psp);
unsigned long fp, distance, rval;
if (stack_64bit) {
csp += STACK_BIAS;
psp += STACK_BIAS;
__get_user(fp, &(((struct reg_window __user *)psp)->ins[6]));
fp += STACK_BIAS;
if (test_thread_flag(TIF_32BIT))
fp &= 0xffffffff;
} else
__get_user(fp, &(((struct reg_window32 __user *)psp)->ins[6]));
/* Now align the stack as this is mandatory in the Sparc ABI
* due to how register windows work. This hides the
* restriction from thread libraries etc.
*/
csp &= ~15UL;
distance = fp - psp;
rval = (csp - distance);
if (raw_copy_in_user((void __user *)rval, (void __user *)psp, distance))
rval = 0;
else if (!stack_64bit) {
if (put_user(((u32)csp),
&(((struct reg_window32 __user *)rval)->ins[6])))
rval = 0;
} else {
if (put_user(((u64)csp - STACK_BIAS),
&(((struct reg_window __user *)rval)->ins[6])))
rval = 0;
else
rval = rval - STACK_BIAS;
}
return rval;
}
/* Standard stuff. */
static inline void shift_window_buffer(int first_win, int last_win,
struct thread_info *t)
{
int i;
for (i = first_win; i < last_win; i++) {
t->rwbuf_stkptrs[i] = t->rwbuf_stkptrs[i+1];
memcpy(&t->reg_window[i], &t->reg_window[i+1],
sizeof(struct reg_window));
}
}
void synchronize_user_stack(void)
{
struct thread_info *t = current_thread_info();
unsigned long window;
flush_user_windows();
if ((window = get_thread_wsaved()) != 0) {
window -= 1;
do {
struct reg_window *rwin = &t->reg_window[window];
int winsize = sizeof(struct reg_window);
unsigned long sp;
sp = t->rwbuf_stkptrs[window];
if (test_thread_64bit_stack(sp))
sp += STACK_BIAS;
else
winsize = sizeof(struct reg_window32);
if (!copy_to_user((char __user *)sp, rwin, winsize)) {
shift_window_buffer(window, get_thread_wsaved() - 1, t);
set_thread_wsaved(get_thread_wsaved() - 1);
}
} while (window--);
}
}
static void stack_unaligned(unsigned long sp)
{
force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *) sp);
}
static const char uwfault32[] = KERN_INFO \
"%s[%d]: bad register window fault: SP %08lx (orig_sp %08lx) TPC %08lx O7 %08lx\n";
static const char uwfault64[] = KERN_INFO \
"%s[%d]: bad register window fault: SP %016lx (orig_sp %016lx) TPC %08lx O7 %016lx\n";
void fault_in_user_windows(struct pt_regs *regs)
{
struct thread_info *t = current_thread_info();
unsigned long window;
flush_user_windows();
window = get_thread_wsaved();
if (likely(window != 0)) {
window -= 1;
do {
struct reg_window *rwin = &t->reg_window[window];
int winsize = sizeof(struct reg_window);
unsigned long sp, orig_sp;
orig_sp = sp = t->rwbuf_stkptrs[window];
if (test_thread_64bit_stack(sp))
sp += STACK_BIAS;
else
winsize = sizeof(struct reg_window32);
if (unlikely(sp & 0x7UL))
stack_unaligned(sp);
if (unlikely(copy_to_user((char __user *)sp,
rwin, winsize))) {
if (show_unhandled_signals)
printk_ratelimited(is_compat_task() ?
uwfault32 : uwfault64,
current->comm, current->pid,
sp, orig_sp,
regs->tpc,
regs->u_regs[UREG_I7]);
goto barf;
}
} while (window--);
}
set_thread_wsaved(0);
return;
barf:
set_thread_wsaved(window + 1);
force_sig(SIGSEGV);
}
/* Copy a Sparc thread. The fork() return value conventions
* under SunOS are nothing short of bletcherous:
* Parent --> %o0 == childs pid, %o1 == 0
* Child --> %o0 == parents pid, %o1 == 1
*/
int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
{
unsigned long clone_flags = args->flags;
unsigned long sp = args->stack;
unsigned long tls = args->tls;
struct thread_info *t = task_thread_info(p);
struct pt_regs *regs = current_pt_regs();
struct sparc_stackf *parent_sf;
unsigned long child_stack_sz;
char *child_trap_frame;
/* Calculate offset to stack_frame & pt_regs */
child_stack_sz = (STACKFRAME_SZ + TRACEREG_SZ);
child_trap_frame = (task_stack_page(p) +
(THREAD_SIZE - child_stack_sz));
t->new_child = 1;
t->ksp = ((unsigned long) child_trap_frame) - STACK_BIAS;
t->kregs = (struct pt_regs *) (child_trap_frame +
sizeof(struct sparc_stackf));
t->fpsaved[0] = 0;
if (unlikely(args->fn)) {
memset(child_trap_frame, 0, child_stack_sz);
__thread_flag_byte_ptr(t)[TI_FLAG_BYTE_CWP] =
(current_pt_regs()->tstate + 1) & TSTATE_CWP;
t->kregs->u_regs[UREG_G1] = (unsigned long) args->fn;
t->kregs->u_regs[UREG_G2] = (unsigned long) args->fn_arg;
return 0;
}
parent_sf = ((struct sparc_stackf *) regs) - 1;
memcpy(child_trap_frame, parent_sf, child_stack_sz);
if (t->flags & _TIF_32BIT) {
sp &= 0x00000000ffffffffUL;
regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL;
}
t->kregs->u_regs[UREG_FP] = sp;
__thread_flag_byte_ptr(t)[TI_FLAG_BYTE_CWP] =
(regs->tstate + 1) & TSTATE_CWP;
if (sp != regs->u_regs[UREG_FP]) {
unsigned long csp;
csp = clone_stackframe(sp, regs->u_regs[UREG_FP]);
if (!csp)
return -EFAULT;
t->kregs->u_regs[UREG_FP] = csp;
}
if (t->utraps)
t->utraps[0]++;
/* Set the return value for the child. */
t->kregs->u_regs[UREG_I0] = current->pid;
t->kregs->u_regs[UREG_I1] = 1;
/* Set the second return value for the parent. */
regs->u_regs[UREG_I1] = 0;
if (clone_flags & CLONE_SETTLS)
t->kregs->u_regs[UREG_G7] = tls;
return 0;
}
/* TIF_MCDPER in thread info flags for current task is updated lazily upon
* a context switch. Update this flag in current task's thread flags
* before dup so the dup'd task will inherit the current TIF_MCDPER flag.
*/
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{
if (adi_capable()) {
register unsigned long tmp_mcdper;
__asm__ __volatile__(
".word 0x83438000\n\t" /* rd %mcdper, %g1 */
"mov %%g1, %0\n\t"
: "=r" (tmp_mcdper)
:
: "g1");
if (tmp_mcdper)
set_thread_flag(TIF_MCDPER);
else
clear_thread_flag(TIF_MCDPER);
}
*dst = *src;
return 0;
}
unsigned long __get_wchan(struct task_struct *task)
{
unsigned long pc, fp, bias = 0;
struct thread_info *tp;
struct reg_window *rw;
unsigned long ret = 0;
int count = 0;
tp = task_thread_info(task);
bias = STACK_BIAS;
fp = task_thread_info(task)->ksp + bias;
do {
if (!kstack_valid(tp, fp))
break;
rw = (struct reg_window *) fp;
pc = rw->ins[7];
if (!in_sched_functions(pc)) {
ret = pc;
goto out;
}
fp = rw->ins[6] + bias;
} while (++count < 16);
out:
return ret;
}
| linux-master | arch/sparc/kernel/process_64.c |
// SPDX-License-Identifier: GPL-2.0
/* reboot.c: reboot/shutdown/halt/poweroff handling
*
* Copyright (C) 2008 David S. Miller <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/reboot.h>
#include <linux/export.h>
#include <linux/pm.h>
#include <linux/of.h>
#include <asm/oplib.h>
#include <asm/prom.h>
#include <asm/setup.h>
/* sysctl - toggle power-off restriction for serial console
* systems in machine_power_off()
*/
int scons_pwroff = 1;
/* This isn't actually used, it exists merely to satisfy the
* reference in kernel/sys.c
*/
void (*pm_power_off)(void) = machine_power_off;
EXPORT_SYMBOL(pm_power_off);
void machine_power_off(void)
{
if (!of_node_is_type(of_console_device, "serial") || scons_pwroff)
prom_halt_power_off();
prom_halt();
}
void machine_halt(void)
{
prom_halt();
panic("Halt failed!");
}
void machine_restart(char *cmd)
{
char *p;
p = strchr(reboot_command, '\n');
if (p)
*p = 0;
if (cmd)
prom_reboot(cmd);
if (*reboot_command)
prom_reboot(reboot_command);
prom_reboot("");
panic("Reboot failed!");
}
| linux-master | arch/sparc/kernel/reboot.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Interrupt request handling routines. On the
* Sparc the IRQs are basically 'cast in stone'
* and you are supposed to probe the prom's device
* node trees to find out who's got which IRQ.
*
* Copyright (C) 1995 David S. Miller ([email protected])
* Copyright (C) 1995 Miguel de Icaza ([email protected])
* Copyright (C) 1995,2002 Pete A. Zaitcev ([email protected])
* Copyright (C) 1996 Dave Redman ([email protected])
* Copyright (C) 1998-2000 Anton Blanchard ([email protected])
*/
#include <linux/kernel_stat.h>
#include <linux/seq_file.h>
#include <linux/export.h>
#include <asm/cacheflush.h>
#include <asm/cpudata.h>
#include <asm/setup.h>
#include <asm/pcic.h>
#include <asm/leon.h>
#include "kernel.h"
#include "irq.h"
/* platform specific irq setup */
struct sparc_config sparc_config;
unsigned long arch_local_irq_save(void)
{
unsigned long retval;
unsigned long tmp;
__asm__ __volatile__(
"rd %%psr, %0\n\t"
"or %0, %2, %1\n\t"
"wr %1, 0, %%psr\n\t"
"nop; nop; nop\n"
: "=&r" (retval), "=r" (tmp)
: "i" (PSR_PIL)
: "memory");
return retval;
}
EXPORT_SYMBOL(arch_local_irq_save);
void arch_local_irq_enable(void)
{
unsigned long tmp;
__asm__ __volatile__(
"rd %%psr, %0\n\t"
"andn %0, %1, %0\n\t"
"wr %0, 0, %%psr\n\t"
"nop; nop; nop\n"
: "=&r" (tmp)
: "i" (PSR_PIL)
: "memory");
}
EXPORT_SYMBOL(arch_local_irq_enable);
void arch_local_irq_restore(unsigned long old_psr)
{
unsigned long tmp;
__asm__ __volatile__(
"rd %%psr, %0\n\t"
"and %2, %1, %2\n\t"
"andn %0, %1, %0\n\t"
"wr %0, %2, %%psr\n\t"
"nop; nop; nop\n"
: "=&r" (tmp)
: "i" (PSR_PIL), "r" (old_psr)
: "memory");
}
EXPORT_SYMBOL(arch_local_irq_restore);
/*
* Dave Redman ([email protected])
*
* IRQ numbers.. These are no longer restricted to 15..
*
* this is done to enable SBUS cards and onboard IO to be masked
* correctly. using the interrupt level isn't good enough.
*
* For example:
* A device interrupting at sbus level6 and the Floppy both come in
* at IRQ11, but enabling and disabling them requires writing to
* different bits in the SLAVIO/SEC.
*
* As a result of these changes sun4m machines could now support
* directed CPU interrupts using the existing enable/disable irq code
* with tweaks.
*
* Sun4d complicates things even further. IRQ numbers are arbitrary
* 32-bit values in that case. Since this is similar to sparc64,
* we adopt a virtual IRQ numbering scheme as is done there.
* Virutal interrupt numbers are allocated by build_irq(). So NR_IRQS
* just becomes a limit of how many interrupt sources we can handle in
* a single system. Even fully loaded SS2000 machines top off at
* about 32 interrupt sources or so, therefore a NR_IRQS value of 64
* is more than enough.
*
* We keep a map of per-PIL enable interrupts. These get wired
* up via the irq_chip->startup() method which gets invoked by
* the generic IRQ layer during request_irq().
*/
/* Table of allocated irqs. Unused entries has irq == 0 */
static struct irq_bucket irq_table[NR_IRQS];
/* Protect access to irq_table */
static DEFINE_SPINLOCK(irq_table_lock);
/* Map between the irq identifier used in hw to the irq_bucket. */
struct irq_bucket *irq_map[SUN4D_MAX_IRQ];
/* Protect access to irq_map */
static DEFINE_SPINLOCK(irq_map_lock);
/* Allocate a new irq from the irq_table */
unsigned int irq_alloc(unsigned int real_irq, unsigned int pil)
{
unsigned long flags;
unsigned int i;
spin_lock_irqsave(&irq_table_lock, flags);
for (i = 1; i < NR_IRQS; i++) {
if (irq_table[i].real_irq == real_irq && irq_table[i].pil == pil)
goto found;
}
for (i = 1; i < NR_IRQS; i++) {
if (!irq_table[i].irq)
break;
}
if (i < NR_IRQS) {
irq_table[i].real_irq = real_irq;
irq_table[i].irq = i;
irq_table[i].pil = pil;
} else {
printk(KERN_ERR "IRQ: Out of virtual IRQs.\n");
i = 0;
}
found:
spin_unlock_irqrestore(&irq_table_lock, flags);
return i;
}
/* Based on a single pil handler_irq may need to call several
* interrupt handlers. Use irq_map as entry to irq_table,
* and let each entry in irq_table point to the next entry.
*/
void irq_link(unsigned int irq)
{
struct irq_bucket *p;
unsigned long flags;
unsigned int pil;
BUG_ON(irq >= NR_IRQS);
spin_lock_irqsave(&irq_map_lock, flags);
p = &irq_table[irq];
pil = p->pil;
BUG_ON(pil >= SUN4D_MAX_IRQ);
p->next = irq_map[pil];
irq_map[pil] = p;
spin_unlock_irqrestore(&irq_map_lock, flags);
}
void irq_unlink(unsigned int irq)
{
struct irq_bucket *p, **pnext;
unsigned long flags;
BUG_ON(irq >= NR_IRQS);
spin_lock_irqsave(&irq_map_lock, flags);
p = &irq_table[irq];
BUG_ON(p->pil >= SUN4D_MAX_IRQ);
pnext = &irq_map[p->pil];
while (*pnext != p)
pnext = &(*pnext)->next;
*pnext = p->next;
spin_unlock_irqrestore(&irq_map_lock, flags);
}
/* /proc/interrupts printing */
int arch_show_interrupts(struct seq_file *p, int prec)
{
int j;
#ifdef CONFIG_SMP
seq_printf(p, "RES: ");
for_each_online_cpu(j)
seq_printf(p, "%10u ", cpu_data(j).irq_resched_count);
seq_printf(p, " IPI rescheduling interrupts\n");
seq_printf(p, "CAL: ");
for_each_online_cpu(j)
seq_printf(p, "%10u ", cpu_data(j).irq_call_count);
seq_printf(p, " IPI function call interrupts\n");
#endif
seq_printf(p, "NMI: ");
for_each_online_cpu(j)
seq_printf(p, "%10u ", cpu_data(j).counter);
seq_printf(p, " Non-maskable interrupts\n");
return 0;
}
void handler_irq(unsigned int pil, struct pt_regs *regs)
{
struct pt_regs *old_regs;
struct irq_bucket *p;
BUG_ON(pil > 15);
old_regs = set_irq_regs(regs);
irq_enter();
p = irq_map[pil];
while (p) {
struct irq_bucket *next = p->next;
generic_handle_irq(p->irq);
p = next;
}
irq_exit();
set_irq_regs(old_regs);
}
#if defined(CONFIG_BLK_DEV_FD) || defined(CONFIG_BLK_DEV_FD_MODULE)
static unsigned int floppy_irq;
int sparc_floppy_request_irq(unsigned int irq, irq_handler_t irq_handler)
{
unsigned int cpu_irq;
int err;
err = request_irq(irq, irq_handler, 0, "floppy", NULL);
if (err)
return -1;
/* Save for later use in floppy interrupt handler */
floppy_irq = irq;
cpu_irq = (irq & (NR_IRQS - 1));
/* Dork with trap table if we get this far. */
#define INSTANTIATE(table) \
table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_one = SPARC_RD_PSR_L0; \
table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_two = \
SPARC_BRANCH((unsigned long) floppy_hardint, \
(unsigned long) &table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_two);\
table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_three = SPARC_RD_WIM_L3; \
table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_four = SPARC_NOP;
INSTANTIATE(sparc_ttable)
#if defined CONFIG_SMP
if (sparc_cpu_model != sparc_leon) {
struct tt_entry *trap_table;
trap_table = &trapbase_cpu1;
INSTANTIATE(trap_table)
trap_table = &trapbase_cpu2;
INSTANTIATE(trap_table)
trap_table = &trapbase_cpu3;
INSTANTIATE(trap_table)
}
#endif
#undef INSTANTIATE
/*
* XXX Correct thing whould be to flush only I- and D-cache lines
* which contain the handler in question. But as of time of the
* writing we have no CPU-neutral interface to fine-grained flushes.
*/
flush_cache_all();
return 0;
}
EXPORT_SYMBOL(sparc_floppy_request_irq);
/*
* These variables are used to access state from the assembler
* interrupt handler, floppy_hardint, so we cannot put these in
* the floppy driver image because that would not work in the
* modular case.
*/
volatile unsigned char *fdc_status;
EXPORT_SYMBOL(fdc_status);
char *pdma_vaddr;
EXPORT_SYMBOL(pdma_vaddr);
unsigned long pdma_size;
EXPORT_SYMBOL(pdma_size);
volatile int doing_pdma;
EXPORT_SYMBOL(doing_pdma);
char *pdma_base;
EXPORT_SYMBOL(pdma_base);
unsigned long pdma_areasize;
EXPORT_SYMBOL(pdma_areasize);
/* Use the generic irq support to call floppy_interrupt
* which was setup using request_irq() in sparc_floppy_request_irq().
* We only have one floppy interrupt so we do not need to check
* for additional handlers being wired up by irq_link()
*/
void sparc_floppy_irq(int irq, void *dev_id, struct pt_regs *regs)
{
struct pt_regs *old_regs;
old_regs = set_irq_regs(regs);
irq_enter();
generic_handle_irq(floppy_irq);
irq_exit();
set_irq_regs(old_regs);
}
#endif
/* djhr
* This could probably be made indirect too and assigned in the CPU
* bits of the code. That would be much nicer I think and would also
* fit in with the idea of being able to tune your kernel for your machine
* by removing unrequired machine and device support.
*
*/
void __init init_IRQ(void)
{
switch (sparc_cpu_model) {
case sun4m:
pcic_probe();
if (pcic_present())
sun4m_pci_init_IRQ();
else
sun4m_init_IRQ();
break;
case sun4d:
sun4d_init_IRQ();
break;
case sparc_leon:
leon_init_IRQ();
break;
default:
prom_printf("Cannot initialize IRQs on this Sun machine...");
break;
}
}
| linux-master | arch/sparc/kernel/irq_32.c |
// SPDX-License-Identifier: GPL-2.0
/*
* sun4m irq support
*
* djhr: Hacked out of irq.c into a CPU dependent version.
*
* Copyright (C) 1995 David S. Miller ([email protected])
* Copyright (C) 1995 Miguel de Icaza ([email protected])
* Copyright (C) 1995 Pete A. Zaitcev ([email protected])
* Copyright (C) 1996 Dave Redman ([email protected])
*/
#include <linux/slab.h>
#include <linux/sched/debug.h>
#include <linux/pgtable.h>
#include <asm/timer.h>
#include <asm/traps.h>
#include <asm/irq.h>
#include <asm/io.h>
#include <asm/cacheflush.h>
#include "irq.h"
#include "kernel.h"
/* Sample sun4m IRQ layout:
*
* 0x22 - Power
* 0x24 - ESP SCSI
* 0x26 - Lance ethernet
* 0x2b - Floppy
* 0x2c - Zilog uart
* 0x32 - SBUS level 0
* 0x33 - Parallel port, SBUS level 1
* 0x35 - SBUS level 2
* 0x37 - SBUS level 3
* 0x39 - Audio, Graphics card, SBUS level 4
* 0x3b - SBUS level 5
* 0x3d - SBUS level 6
*
* Each interrupt source has a mask bit in the interrupt registers.
* When the mask bit is set, this blocks interrupt deliver. So you
* clear the bit to enable the interrupt.
*
* Interrupts numbered less than 0x10 are software triggered interrupts
* and unused by Linux.
*
* Interrupt level assignment on sun4m:
*
* level source
* ------------------------------------------------------------
* 1 softint-1
* 2 softint-2, VME/SBUS level 1
* 3 softint-3, VME/SBUS level 2
* 4 softint-4, onboard SCSI
* 5 softint-5, VME/SBUS level 3
* 6 softint-6, onboard ETHERNET
* 7 softint-7, VME/SBUS level 4
* 8 softint-8, onboard VIDEO
* 9 softint-9, VME/SBUS level 5, Module Interrupt
* 10 softint-10, system counter/timer
* 11 softint-11, VME/SBUS level 6, Floppy
* 12 softint-12, Keyboard/Mouse, Serial
* 13 softint-13, VME/SBUS level 7, ISDN Audio
* 14 softint-14, per-processor counter/timer
* 15 softint-15, Asynchronous Errors (broadcast)
*
* Each interrupt source is masked distinctly in the sun4m interrupt
* registers. The PIL level alone is therefore ambiguous, since multiple
* interrupt sources map to a single PIL.
*
* This ambiguity is resolved in the 'intr' property for device nodes
* in the OF device tree. Each 'intr' property entry is composed of
* two 32-bit words. The first word is the IRQ priority value, which
* is what we're intersted in. The second word is the IRQ vector, which
* is unused.
*
* The low 4 bits of the IRQ priority indicate the PIL, and the upper
* 4 bits indicate onboard vs. SBUS leveled vs. VME leveled. 0x20
* means onboard, 0x30 means SBUS leveled, and 0x40 means VME leveled.
*
* For example, an 'intr' IRQ priority value of 0x24 is onboard SCSI
* whereas a value of 0x33 is SBUS level 2. Here are some sample
* 'intr' property IRQ priority values from ss4, ss5, ss10, ss20, and
* Tadpole S3 GX systems.
*
* esp: 0x24 onboard ESP SCSI
* le: 0x26 onboard Lance ETHERNET
* p9100: 0x32 SBUS level 1 P9100 video
* bpp: 0x33 SBUS level 2 BPP parallel port device
* DBRI: 0x39 SBUS level 5 DBRI ISDN audio
* SUNW,leo: 0x39 SBUS level 5 LEO video
* pcmcia: 0x3b SBUS level 6 PCMCIA controller
* uctrl: 0x3b SBUS level 6 UCTRL device
* modem: 0x3d SBUS level 7 MODEM
* zs: 0x2c onboard keyboard/mouse/serial
* floppy: 0x2b onboard Floppy
* power: 0x22 onboard power device (XXX unknown mask bit XXX)
*/
/* Code in entry.S needs to get at these register mappings. */
struct sun4m_irq_percpu __iomem *sun4m_irq_percpu[SUN4M_NCPUS];
struct sun4m_irq_global __iomem *sun4m_irq_global;
struct sun4m_handler_data {
bool percpu;
long mask;
};
/* Dave Redman ([email protected])
* The sun4m interrupt registers.
*/
#define SUN4M_INT_ENABLE 0x80000000
#define SUN4M_INT_E14 0x00000080
#define SUN4M_INT_E10 0x00080000
#define SUN4M_INT_MASKALL 0x80000000 /* mask all interrupts */
#define SUN4M_INT_MODULE_ERR 0x40000000 /* module error */
#define SUN4M_INT_M2S_WRITE_ERR 0x20000000 /* write buffer error */
#define SUN4M_INT_ECC_ERR 0x10000000 /* ecc memory error */
#define SUN4M_INT_VME_ERR 0x08000000 /* vme async error */
#define SUN4M_INT_FLOPPY 0x00400000 /* floppy disk */
#define SUN4M_INT_MODULE 0x00200000 /* module interrupt */
#define SUN4M_INT_VIDEO 0x00100000 /* onboard video */
#define SUN4M_INT_REALTIME 0x00080000 /* system timer */
#define SUN4M_INT_SCSI 0x00040000 /* onboard scsi */
#define SUN4M_INT_AUDIO 0x00020000 /* audio/isdn */
#define SUN4M_INT_ETHERNET 0x00010000 /* onboard ethernet */
#define SUN4M_INT_SERIAL 0x00008000 /* serial ports */
#define SUN4M_INT_KBDMS 0x00004000 /* keyboard/mouse */
#define SUN4M_INT_SBUSBITS 0x00003F80 /* sbus int bits */
#define SUN4M_INT_VMEBITS 0x0000007F /* vme int bits */
#define SUN4M_INT_ERROR (SUN4M_INT_MODULE_ERR | \
SUN4M_INT_M2S_WRITE_ERR | \
SUN4M_INT_ECC_ERR | \
SUN4M_INT_VME_ERR)
#define SUN4M_INT_SBUS(x) (1 << (x+7))
#define SUN4M_INT_VME(x) (1 << (x))
/* Interrupt levels used by OBP */
#define OBP_INT_LEVEL_SOFT 0x10
#define OBP_INT_LEVEL_ONBOARD 0x20
#define OBP_INT_LEVEL_SBUS 0x30
#define OBP_INT_LEVEL_VME 0x40
#define SUN4M_TIMER_IRQ (OBP_INT_LEVEL_ONBOARD | 10)
#define SUN4M_PROFILE_IRQ (OBP_INT_LEVEL_ONBOARD | 14)
static unsigned long sun4m_imask[0x50] = {
/* 0x00 - SMP */
0, SUN4M_SOFT_INT(1),
SUN4M_SOFT_INT(2), SUN4M_SOFT_INT(3),
SUN4M_SOFT_INT(4), SUN4M_SOFT_INT(5),
SUN4M_SOFT_INT(6), SUN4M_SOFT_INT(7),
SUN4M_SOFT_INT(8), SUN4M_SOFT_INT(9),
SUN4M_SOFT_INT(10), SUN4M_SOFT_INT(11),
SUN4M_SOFT_INT(12), SUN4M_SOFT_INT(13),
SUN4M_SOFT_INT(14), SUN4M_SOFT_INT(15),
/* 0x10 - soft */
0, SUN4M_SOFT_INT(1),
SUN4M_SOFT_INT(2), SUN4M_SOFT_INT(3),
SUN4M_SOFT_INT(4), SUN4M_SOFT_INT(5),
SUN4M_SOFT_INT(6), SUN4M_SOFT_INT(7),
SUN4M_SOFT_INT(8), SUN4M_SOFT_INT(9),
SUN4M_SOFT_INT(10), SUN4M_SOFT_INT(11),
SUN4M_SOFT_INT(12), SUN4M_SOFT_INT(13),
SUN4M_SOFT_INT(14), SUN4M_SOFT_INT(15),
/* 0x20 - onboard */
0, 0, 0, 0,
SUN4M_INT_SCSI, 0, SUN4M_INT_ETHERNET, 0,
SUN4M_INT_VIDEO, SUN4M_INT_MODULE,
SUN4M_INT_REALTIME, SUN4M_INT_FLOPPY,
(SUN4M_INT_SERIAL | SUN4M_INT_KBDMS),
SUN4M_INT_AUDIO, SUN4M_INT_E14, SUN4M_INT_MODULE_ERR,
/* 0x30 - sbus */
0, 0, SUN4M_INT_SBUS(0), SUN4M_INT_SBUS(1),
0, SUN4M_INT_SBUS(2), 0, SUN4M_INT_SBUS(3),
0, SUN4M_INT_SBUS(4), 0, SUN4M_INT_SBUS(5),
0, SUN4M_INT_SBUS(6), 0, 0,
/* 0x40 - vme */
0, 0, SUN4M_INT_VME(0), SUN4M_INT_VME(1),
0, SUN4M_INT_VME(2), 0, SUN4M_INT_VME(3),
0, SUN4M_INT_VME(4), 0, SUN4M_INT_VME(5),
0, SUN4M_INT_VME(6), 0, 0
};
static void sun4m_mask_irq(struct irq_data *data)
{
struct sun4m_handler_data *handler_data;
int cpu = smp_processor_id();
handler_data = irq_data_get_irq_handler_data(data);
if (handler_data->mask) {
unsigned long flags;
local_irq_save(flags);
if (handler_data->percpu) {
sbus_writel(handler_data->mask, &sun4m_irq_percpu[cpu]->set);
} else {
sbus_writel(handler_data->mask, &sun4m_irq_global->mask_set);
}
local_irq_restore(flags);
}
}
static void sun4m_unmask_irq(struct irq_data *data)
{
struct sun4m_handler_data *handler_data;
int cpu = smp_processor_id();
handler_data = irq_data_get_irq_handler_data(data);
if (handler_data->mask) {
unsigned long flags;
local_irq_save(flags);
if (handler_data->percpu) {
sbus_writel(handler_data->mask, &sun4m_irq_percpu[cpu]->clear);
} else {
sbus_writel(handler_data->mask, &sun4m_irq_global->mask_clear);
}
local_irq_restore(flags);
}
}
static unsigned int sun4m_startup_irq(struct irq_data *data)
{
irq_link(data->irq);
sun4m_unmask_irq(data);
return 0;
}
static void sun4m_shutdown_irq(struct irq_data *data)
{
sun4m_mask_irq(data);
irq_unlink(data->irq);
}
static struct irq_chip sun4m_irq = {
.name = "sun4m",
.irq_startup = sun4m_startup_irq,
.irq_shutdown = sun4m_shutdown_irq,
.irq_mask = sun4m_mask_irq,
.irq_unmask = sun4m_unmask_irq,
};
static unsigned int sun4m_build_device_irq(struct platform_device *op,
unsigned int real_irq)
{
struct sun4m_handler_data *handler_data;
unsigned int irq;
unsigned int pil;
if (real_irq >= OBP_INT_LEVEL_VME) {
prom_printf("Bogus sun4m IRQ %u\n", real_irq);
prom_halt();
}
pil = (real_irq & 0xf);
irq = irq_alloc(real_irq, pil);
if (irq == 0)
goto out;
handler_data = irq_get_handler_data(irq);
if (unlikely(handler_data))
goto out;
handler_data = kzalloc(sizeof(struct sun4m_handler_data), GFP_ATOMIC);
if (unlikely(!handler_data)) {
prom_printf("IRQ: kzalloc(sun4m_handler_data) failed.\n");
prom_halt();
}
handler_data->mask = sun4m_imask[real_irq];
handler_data->percpu = real_irq < OBP_INT_LEVEL_ONBOARD;
irq_set_chip_and_handler_name(irq, &sun4m_irq,
handle_level_irq, "level");
irq_set_handler_data(irq, handler_data);
out:
return irq;
}
struct sun4m_timer_percpu {
u32 l14_limit;
u32 l14_count;
u32 l14_limit_noclear;
u32 user_timer_start_stop;
};
static struct sun4m_timer_percpu __iomem *timers_percpu[SUN4M_NCPUS];
struct sun4m_timer_global {
u32 l10_limit;
u32 l10_count;
u32 l10_limit_noclear;
u32 reserved;
u32 timer_config;
};
static struct sun4m_timer_global __iomem *timers_global;
static void sun4m_clear_clock_irq(void)
{
sbus_readl(&timers_global->l10_limit);
}
void sun4m_nmi(struct pt_regs *regs)
{
unsigned long afsr, afar, si;
printk(KERN_ERR "Aieee: sun4m NMI received!\n");
/* XXX HyperSparc hack XXX */
__asm__ __volatile__("mov 0x500, %%g1\n\t"
"lda [%%g1] 0x4, %0\n\t"
"mov 0x600, %%g1\n\t"
"lda [%%g1] 0x4, %1\n\t" :
"=r" (afsr), "=r" (afar));
printk(KERN_ERR "afsr=%08lx afar=%08lx\n", afsr, afar);
si = sbus_readl(&sun4m_irq_global->pending);
printk(KERN_ERR "si=%08lx\n", si);
if (si & SUN4M_INT_MODULE_ERR)
printk(KERN_ERR "Module async error\n");
if (si & SUN4M_INT_M2S_WRITE_ERR)
printk(KERN_ERR "MBus/SBus async error\n");
if (si & SUN4M_INT_ECC_ERR)
printk(KERN_ERR "ECC memory error\n");
if (si & SUN4M_INT_VME_ERR)
printk(KERN_ERR "VME async error\n");
printk(KERN_ERR "you lose buddy boy...\n");
show_regs(regs);
prom_halt();
}
void sun4m_unmask_profile_irq(void)
{
unsigned long flags;
local_irq_save(flags);
sbus_writel(sun4m_imask[SUN4M_PROFILE_IRQ], &sun4m_irq_global->mask_clear);
local_irq_restore(flags);
}
void sun4m_clear_profile_irq(int cpu)
{
sbus_readl(&timers_percpu[cpu]->l14_limit);
}
static void sun4m_load_profile_irq(int cpu, unsigned int limit)
{
unsigned int value = limit ? timer_value(limit) : 0;
sbus_writel(value, &timers_percpu[cpu]->l14_limit);
}
static void __init sun4m_init_timers(void)
{
struct device_node *dp = of_find_node_by_name(NULL, "counter");
int i, err, len, num_cpu_timers;
unsigned int irq;
const u32 *addr;
if (!dp) {
printk(KERN_ERR "sun4m_init_timers: No 'counter' node.\n");
return;
}
addr = of_get_property(dp, "address", &len);
of_node_put(dp);
if (!addr) {
printk(KERN_ERR "sun4m_init_timers: No 'address' prop.\n");
return;
}
num_cpu_timers = (len / sizeof(u32)) - 1;
for (i = 0; i < num_cpu_timers; i++) {
timers_percpu[i] = (void __iomem *)
(unsigned long) addr[i];
}
timers_global = (void __iomem *)
(unsigned long) addr[num_cpu_timers];
/* Every per-cpu timer works in timer mode */
sbus_writel(0x00000000, &timers_global->timer_config);
#ifdef CONFIG_SMP
sparc_config.cs_period = SBUS_CLOCK_RATE * 2; /* 2 seconds */
sparc_config.features |= FEAT_L14_ONESHOT;
#else
sparc_config.cs_period = SBUS_CLOCK_RATE / HZ; /* 1/HZ sec */
sparc_config.features |= FEAT_L10_CLOCKEVENT;
#endif
sparc_config.features |= FEAT_L10_CLOCKSOURCE;
sbus_writel(timer_value(sparc_config.cs_period),
&timers_global->l10_limit);
master_l10_counter = &timers_global->l10_count;
irq = sun4m_build_device_irq(NULL, SUN4M_TIMER_IRQ);
err = request_irq(irq, timer_interrupt, IRQF_TIMER, "timer", NULL);
if (err) {
printk(KERN_ERR "sun4m_init_timers: Register IRQ error %d.\n",
err);
return;
}
for (i = 0; i < num_cpu_timers; i++)
sbus_writel(0, &timers_percpu[i]->l14_limit);
if (num_cpu_timers == 4)
sbus_writel(SUN4M_INT_E14, &sun4m_irq_global->mask_set);
#ifdef CONFIG_SMP
{
unsigned long flags;
struct tt_entry *trap_table = &sparc_ttable[SP_TRAP_IRQ1 + (14 - 1)];
/* For SMP we use the level 14 ticker, however the bootup code
* has copied the firmware's level 14 vector into the boot cpu's
* trap table, we must fix this now or we get squashed.
*/
local_irq_save(flags);
trap_table->inst_one = lvl14_save[0];
trap_table->inst_two = lvl14_save[1];
trap_table->inst_three = lvl14_save[2];
trap_table->inst_four = lvl14_save[3];
local_ops->cache_all();
local_irq_restore(flags);
}
#endif
}
void __init sun4m_init_IRQ(void)
{
struct device_node *dp = of_find_node_by_name(NULL, "interrupt");
int len, i, mid, num_cpu_iregs;
const u32 *addr;
if (!dp) {
printk(KERN_ERR "sun4m_init_IRQ: No 'interrupt' node.\n");
return;
}
addr = of_get_property(dp, "address", &len);
of_node_put(dp);
if (!addr) {
printk(KERN_ERR "sun4m_init_IRQ: No 'address' prop.\n");
return;
}
num_cpu_iregs = (len / sizeof(u32)) - 1;
for (i = 0; i < num_cpu_iregs; i++) {
sun4m_irq_percpu[i] = (void __iomem *)
(unsigned long) addr[i];
}
sun4m_irq_global = (void __iomem *)
(unsigned long) addr[num_cpu_iregs];
local_irq_disable();
sbus_writel(~SUN4M_INT_MASKALL, &sun4m_irq_global->mask_set);
for (i = 0; !cpu_find_by_instance(i, NULL, &mid); i++)
sbus_writel(~0x17fff, &sun4m_irq_percpu[mid]->clear);
if (num_cpu_iregs == 4)
sbus_writel(0, &sun4m_irq_global->interrupt_target);
sparc_config.init_timers = sun4m_init_timers;
sparc_config.build_device_irq = sun4m_build_device_irq;
sparc_config.clock_rate = SBUS_CLOCK_RATE;
sparc_config.clear_clock_irq = sun4m_clear_clock_irq;
sparc_config.load_profile_irq = sun4m_load_profile_irq;
/* Cannot enable interrupts until OBP ticker is disabled. */
}
| linux-master | arch/sparc/kernel/sun4m_irq.c |
// SPDX-License-Identifier: GPL-2.0
/* linux/arch/sparc/kernel/sys_sparc.c
*
* This file contains various random system calls that
* have a non-standard calling sequence on the Linux/sparc
* platform.
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/sched/signal.h>
#include <linux/sched/mm.h>
#include <linux/sched/debug.h>
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/sem.h>
#include <linux/msg.h>
#include <linux/shm.h>
#include <linux/stat.h>
#include <linux/syscalls.h>
#include <linux/mman.h>
#include <linux/utsname.h>
#include <linux/smp.h>
#include <linux/ipc.h>
#include <linux/uaccess.h>
#include <asm/unistd.h>
#include "systbls.h"
/* #define DEBUG_UNIMP_SYSCALL */
/* XXX Make this per-binary type, this way we can detect the type of
* XXX a binary. Every Sparc executable calls this very early on.
*/
SYSCALL_DEFINE0(getpagesize)
{
return PAGE_SIZE; /* Possibly older binaries want 8192 on sun4's? */
}
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
{
struct vm_unmapped_area_info info;
if (flags & MAP_FIXED) {
/* We do not accept a shared mapping if it would violate
* cache aliasing constraints.
*/
if ((flags & MAP_SHARED) &&
((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
return -EINVAL;
return addr;
}
/* See asm-sparc/uaccess.h */
if (len > TASK_SIZE - PAGE_SIZE)
return -ENOMEM;
if (!addr)
addr = TASK_UNMAPPED_BASE;
info.flags = 0;
info.length = len;
info.low_limit = addr;
info.high_limit = TASK_SIZE;
info.align_mask = (flags & MAP_SHARED) ?
(PAGE_MASK & (SHMLBA - 1)) : 0;
info.align_offset = pgoff << PAGE_SHIFT;
return vm_unmapped_area(&info);
}
/*
* sys_pipe() is the normal C calling standard for creating
* a pipe. It's not the way unix traditionally does this, though.
*/
SYSCALL_DEFINE0(sparc_pipe)
{
int fd[2];
int error;
error = do_pipe_flags(fd, 0);
if (error)
goto out;
current_pt_regs()->u_regs[UREG_I1] = fd[1];
error = fd[0];
out:
return error;
}
int sparc_mmap_check(unsigned long addr, unsigned long len)
{
/* See asm-sparc/uaccess.h */
if (len > TASK_SIZE - PAGE_SIZE || addr + len > TASK_SIZE - PAGE_SIZE)
return -EINVAL;
return 0;
}
/* Linux version of mmap */
SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len,
unsigned long, prot, unsigned long, flags, unsigned long, fd,
unsigned long, pgoff)
{
/* Make sure the shift for mmap2 is constant (12), no matter what PAGE_SIZE
we have. */
return ksys_mmap_pgoff(addr, len, prot, flags, fd,
pgoff >> (PAGE_SHIFT - 12));
}
SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
unsigned long, prot, unsigned long, flags, unsigned long, fd,
unsigned long, off)
{
/* no alignment check? */
return ksys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
}
SYSCALL_DEFINE5(sparc_remap_file_pages, unsigned long, start, unsigned long, size,
unsigned long, prot, unsigned long, pgoff,
unsigned long, flags)
{
/* This works on an existing mmap so we don't need to validate
* the range as that was done at the original mmap call.
*/
return sys_remap_file_pages(start, size, prot,
(pgoff >> (PAGE_SHIFT - 12)), flags);
}
SYSCALL_DEFINE0(nis_syscall)
{
static int count = 0;
struct pt_regs *regs = current_pt_regs();
if (count++ > 5)
return -ENOSYS;
printk ("%s[%d]: Unimplemented SPARC system call %d\n",
current->comm, task_pid_nr(current), (int)regs->u_regs[1]);
#ifdef DEBUG_UNIMP_SYSCALL
show_regs (regs);
#endif
return -ENOSYS;
}
/* #define DEBUG_SPARC_BREAKPOINT */
asmlinkage void
sparc_breakpoint (struct pt_regs *regs)
{
#ifdef DEBUG_SPARC_BREAKPOINT
printk ("TRAP: Entering kernel PC=%x, nPC=%x\n", regs->pc, regs->npc);
#endif
force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->pc);
#ifdef DEBUG_SPARC_BREAKPOINT
printk ("TRAP: Returning to space: PC=%x nPC=%x\n", regs->pc, regs->npc);
#endif
}
SYSCALL_DEFINE3(sparc_sigaction, int, sig,
struct old_sigaction __user *,act,
struct old_sigaction __user *,oact)
{
WARN_ON_ONCE(sig >= 0);
return sys_sigaction(-sig, act, oact);
}
SYSCALL_DEFINE5(rt_sigaction, int, sig,
const struct sigaction __user *, act,
struct sigaction __user *, oact,
void __user *, restorer,
size_t, sigsetsize)
{
struct k_sigaction new_ka, old_ka;
int ret;
/* XXX: Don't preclude handling different sized sigset_t's. */
if (sigsetsize != sizeof(sigset_t))
return -EINVAL;
if (act) {
new_ka.ka_restorer = restorer;
if (copy_from_user(&new_ka.sa, act, sizeof(*act)))
return -EFAULT;
}
ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
if (!ret && oact) {
if (copy_to_user(oact, &old_ka.sa, sizeof(*oact)))
return -EFAULT;
}
return ret;
}
SYSCALL_DEFINE2(getdomainname, char __user *, name, int, len)
{
int nlen, err;
char tmp[__NEW_UTS_LEN + 1];
if (len < 0)
return -EINVAL;
down_read(&uts_sem);
nlen = strlen(utsname()->domainname) + 1;
err = -EINVAL;
if (nlen > len)
goto out_unlock;
memcpy(tmp, utsname()->domainname, nlen);
up_read(&uts_sem);
if (copy_to_user(name, tmp, nlen))
return -EFAULT;
return 0;
out_unlock:
up_read(&uts_sem);
return err;
}
| linux-master | arch/sparc/kernel/sys_sparc_32.c |
// SPDX-License-Identifier: GPL-2.0
/* visemul.c: Emulation of VIS instructions.
*
* Copyright (C) 2006 David S. Miller ([email protected])
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/thread_info.h>
#include <linux/perf_event.h>
#include <asm/ptrace.h>
#include <asm/pstate.h>
#include <asm/fpumacro.h>
#include <linux/uaccess.h>
#include <asm/cacheflush.h>
/* OPF field of various VIS instructions. */
/* 000111011 - four 16-bit packs */
#define FPACK16_OPF 0x03b
/* 000111010 - two 32-bit packs */
#define FPACK32_OPF 0x03a
/* 000111101 - four 16-bit packs */
#define FPACKFIX_OPF 0x03d
/* 001001101 - four 16-bit expands */
#define FEXPAND_OPF 0x04d
/* 001001011 - two 32-bit merges */
#define FPMERGE_OPF 0x04b
/* 000110001 - 8-by-16-bit partitioned product */
#define FMUL8x16_OPF 0x031
/* 000110011 - 8-by-16-bit upper alpha partitioned product */
#define FMUL8x16AU_OPF 0x033
/* 000110101 - 8-by-16-bit lower alpha partitioned product */
#define FMUL8x16AL_OPF 0x035
/* 000110110 - upper 8-by-16-bit partitioned product */
#define FMUL8SUx16_OPF 0x036
/* 000110111 - lower 8-by-16-bit partitioned product */
#define FMUL8ULx16_OPF 0x037
/* 000111000 - upper 8-by-16-bit partitioned product */
#define FMULD8SUx16_OPF 0x038
/* 000111001 - lower unsigned 8-by-16-bit partitioned product */
#define FMULD8ULx16_OPF 0x039
/* 000101000 - four 16-bit compare; set rd if src1 > src2 */
#define FCMPGT16_OPF 0x028
/* 000101100 - two 32-bit compare; set rd if src1 > src2 */
#define FCMPGT32_OPF 0x02c
/* 000100000 - four 16-bit compare; set rd if src1 <= src2 */
#define FCMPLE16_OPF 0x020
/* 000100100 - two 32-bit compare; set rd if src1 <= src2 */
#define FCMPLE32_OPF 0x024
/* 000100010 - four 16-bit compare; set rd if src1 != src2 */
#define FCMPNE16_OPF 0x022
/* 000100110 - two 32-bit compare; set rd if src1 != src2 */
#define FCMPNE32_OPF 0x026
/* 000101010 - four 16-bit compare; set rd if src1 == src2 */
#define FCMPEQ16_OPF 0x02a
/* 000101110 - two 32-bit compare; set rd if src1 == src2 */
#define FCMPEQ32_OPF 0x02e
/* 000000000 - Eight 8-bit edge boundary processing */
#define EDGE8_OPF 0x000
/* 000000001 - Eight 8-bit edge boundary processing, no CC */
#define EDGE8N_OPF 0x001
/* 000000010 - Eight 8-bit edge boundary processing, little-endian */
#define EDGE8L_OPF 0x002
/* 000000011 - Eight 8-bit edge boundary processing, little-endian, no CC */
#define EDGE8LN_OPF 0x003
/* 000000100 - Four 16-bit edge boundary processing */
#define EDGE16_OPF 0x004
/* 000000101 - Four 16-bit edge boundary processing, no CC */
#define EDGE16N_OPF 0x005
/* 000000110 - Four 16-bit edge boundary processing, little-endian */
#define EDGE16L_OPF 0x006
/* 000000111 - Four 16-bit edge boundary processing, little-endian, no CC */
#define EDGE16LN_OPF 0x007
/* 000001000 - Two 32-bit edge boundary processing */
#define EDGE32_OPF 0x008
/* 000001001 - Two 32-bit edge boundary processing, no CC */
#define EDGE32N_OPF 0x009
/* 000001010 - Two 32-bit edge boundary processing, little-endian */
#define EDGE32L_OPF 0x00a
/* 000001011 - Two 32-bit edge boundary processing, little-endian, no CC */
#define EDGE32LN_OPF 0x00b
/* 000111110 - distance between 8 8-bit components */
#define PDIST_OPF 0x03e
/* 000010000 - convert 8-bit 3-D address to blocked byte address */
#define ARRAY8_OPF 0x010
/* 000010010 - convert 16-bit 3-D address to blocked byte address */
#define ARRAY16_OPF 0x012
/* 000010100 - convert 32-bit 3-D address to blocked byte address */
#define ARRAY32_OPF 0x014
/* 000011001 - Set the GSR.MASK field in preparation for a BSHUFFLE */
#define BMASK_OPF 0x019
/* 001001100 - Permute bytes as specified by GSR.MASK */
#define BSHUFFLE_OPF 0x04c
#define VIS_OPF_SHIFT 5
#define VIS_OPF_MASK (0x1ff << VIS_OPF_SHIFT)
#define RS1(INSN) (((INSN) >> 14) & 0x1f)
#define RS2(INSN) (((INSN) >> 0) & 0x1f)
#define RD(INSN) (((INSN) >> 25) & 0x1f)
static inline void maybe_flush_windows(unsigned int rs1, unsigned int rs2,
unsigned int rd, int from_kernel)
{
if (rs2 >= 16 || rs1 >= 16 || rd >= 16) {
if (from_kernel != 0)
__asm__ __volatile__("flushw");
else
flushw_user();
}
}
static unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs)
{
unsigned long value, fp;
if (reg < 16)
return (!reg ? 0 : regs->u_regs[reg]);
fp = regs->u_regs[UREG_FP];
if (regs->tstate & TSTATE_PRIV) {
struct reg_window *win;
win = (struct reg_window *)(fp + STACK_BIAS);
value = win->locals[reg - 16];
} else if (!test_thread_64bit_stack(fp)) {
struct reg_window32 __user *win32;
win32 = (struct reg_window32 __user *)((unsigned long)((u32)fp));
get_user(value, &win32->locals[reg - 16]);
} else {
struct reg_window __user *win;
win = (struct reg_window __user *)(fp + STACK_BIAS);
get_user(value, &win->locals[reg - 16]);
}
return value;
}
static inline unsigned long __user *__fetch_reg_addr_user(unsigned int reg,
struct pt_regs *regs)
{
unsigned long fp = regs->u_regs[UREG_FP];
BUG_ON(reg < 16);
BUG_ON(regs->tstate & TSTATE_PRIV);
if (!test_thread_64bit_stack(fp)) {
struct reg_window32 __user *win32;
win32 = (struct reg_window32 __user *)((unsigned long)((u32)fp));
return (unsigned long __user *)&win32->locals[reg - 16];
} else {
struct reg_window __user *win;
win = (struct reg_window __user *)(fp + STACK_BIAS);
return &win->locals[reg - 16];
}
}
static inline unsigned long *__fetch_reg_addr_kern(unsigned int reg,
struct pt_regs *regs)
{
BUG_ON(reg >= 16);
BUG_ON(regs->tstate & TSTATE_PRIV);
return ®s->u_regs[reg];
}
static void store_reg(struct pt_regs *regs, unsigned long val, unsigned long rd)
{
if (rd < 16) {
unsigned long *rd_kern = __fetch_reg_addr_kern(rd, regs);
*rd_kern = val;
} else {
unsigned long __user *rd_user = __fetch_reg_addr_user(rd, regs);
if (!test_thread_64bit_stack(regs->u_regs[UREG_FP]))
__put_user((u32)val, (u32 __user *)rd_user);
else
__put_user(val, rd_user);
}
}
static inline unsigned long fpd_regval(struct fpustate *f,
unsigned int insn_regnum)
{
insn_regnum = (((insn_regnum & 1) << 5) |
(insn_regnum & 0x1e));
return *(unsigned long *) &f->regs[insn_regnum];
}
static inline unsigned long *fpd_regaddr(struct fpustate *f,
unsigned int insn_regnum)
{
insn_regnum = (((insn_regnum & 1) << 5) |
(insn_regnum & 0x1e));
return (unsigned long *) &f->regs[insn_regnum];
}
static inline unsigned int fps_regval(struct fpustate *f,
unsigned int insn_regnum)
{
return f->regs[insn_regnum];
}
static inline unsigned int *fps_regaddr(struct fpustate *f,
unsigned int insn_regnum)
{
return &f->regs[insn_regnum];
}
struct edge_tab {
u16 left, right;
};
static struct edge_tab edge8_tab[8] = {
{ 0xff, 0x80 },
{ 0x7f, 0xc0 },
{ 0x3f, 0xe0 },
{ 0x1f, 0xf0 },
{ 0x0f, 0xf8 },
{ 0x07, 0xfc },
{ 0x03, 0xfe },
{ 0x01, 0xff },
};
static struct edge_tab edge8_tab_l[8] = {
{ 0xff, 0x01 },
{ 0xfe, 0x03 },
{ 0xfc, 0x07 },
{ 0xf8, 0x0f },
{ 0xf0, 0x1f },
{ 0xe0, 0x3f },
{ 0xc0, 0x7f },
{ 0x80, 0xff },
};
static struct edge_tab edge16_tab[4] = {
{ 0xf, 0x8 },
{ 0x7, 0xc },
{ 0x3, 0xe },
{ 0x1, 0xf },
};
static struct edge_tab edge16_tab_l[4] = {
{ 0xf, 0x1 },
{ 0xe, 0x3 },
{ 0xc, 0x7 },
{ 0x8, 0xf },
};
static struct edge_tab edge32_tab[2] = {
{ 0x3, 0x2 },
{ 0x1, 0x3 },
};
static struct edge_tab edge32_tab_l[2] = {
{ 0x3, 0x1 },
{ 0x2, 0x3 },
};
static void edge(struct pt_regs *regs, unsigned int insn, unsigned int opf)
{
unsigned long orig_rs1, rs1, orig_rs2, rs2, rd_val;
u16 left, right;
maybe_flush_windows(RS1(insn), RS2(insn), RD(insn), 0);
orig_rs1 = rs1 = fetch_reg(RS1(insn), regs);
orig_rs2 = rs2 = fetch_reg(RS2(insn), regs);
if (test_thread_flag(TIF_32BIT)) {
rs1 = rs1 & 0xffffffff;
rs2 = rs2 & 0xffffffff;
}
switch (opf) {
default:
case EDGE8_OPF:
case EDGE8N_OPF:
left = edge8_tab[rs1 & 0x7].left;
right = edge8_tab[rs2 & 0x7].right;
break;
case EDGE8L_OPF:
case EDGE8LN_OPF:
left = edge8_tab_l[rs1 & 0x7].left;
right = edge8_tab_l[rs2 & 0x7].right;
break;
case EDGE16_OPF:
case EDGE16N_OPF:
left = edge16_tab[(rs1 >> 1) & 0x3].left;
right = edge16_tab[(rs2 >> 1) & 0x3].right;
break;
case EDGE16L_OPF:
case EDGE16LN_OPF:
left = edge16_tab_l[(rs1 >> 1) & 0x3].left;
right = edge16_tab_l[(rs2 >> 1) & 0x3].right;
break;
case EDGE32_OPF:
case EDGE32N_OPF:
left = edge32_tab[(rs1 >> 2) & 0x1].left;
right = edge32_tab[(rs2 >> 2) & 0x1].right;
break;
case EDGE32L_OPF:
case EDGE32LN_OPF:
left = edge32_tab_l[(rs1 >> 2) & 0x1].left;
right = edge32_tab_l[(rs2 >> 2) & 0x1].right;
break;
}
if ((rs1 & ~0x7UL) == (rs2 & ~0x7UL))
rd_val = right & left;
else
rd_val = left;
store_reg(regs, rd_val, RD(insn));
switch (opf) {
case EDGE8_OPF:
case EDGE8L_OPF:
case EDGE16_OPF:
case EDGE16L_OPF:
case EDGE32_OPF:
case EDGE32L_OPF: {
unsigned long ccr, tstate;
__asm__ __volatile__("subcc %1, %2, %%g0\n\t"
"rd %%ccr, %0"
: "=r" (ccr)
: "r" (orig_rs1), "r" (orig_rs2)
: "cc");
tstate = regs->tstate & ~(TSTATE_XCC | TSTATE_ICC);
regs->tstate = tstate | (ccr << 32UL);
}
}
}
static void array(struct pt_regs *regs, unsigned int insn, unsigned int opf)
{
unsigned long rs1, rs2, rd_val;
unsigned int bits, bits_mask;
maybe_flush_windows(RS1(insn), RS2(insn), RD(insn), 0);
rs1 = fetch_reg(RS1(insn), regs);
rs2 = fetch_reg(RS2(insn), regs);
bits = (rs2 > 5 ? 5 : rs2);
bits_mask = (1UL << bits) - 1UL;
rd_val = ((((rs1 >> 11) & 0x3) << 0) |
(((rs1 >> 33) & 0x3) << 2) |
(((rs1 >> 55) & 0x1) << 4) |
(((rs1 >> 13) & 0xf) << 5) |
(((rs1 >> 35) & 0xf) << 9) |
(((rs1 >> 56) & 0xf) << 13) |
(((rs1 >> 17) & bits_mask) << 17) |
(((rs1 >> 39) & bits_mask) << (17 + bits)) |
(((rs1 >> 60) & 0xf) << (17 + (2*bits))));
switch (opf) {
case ARRAY16_OPF:
rd_val <<= 1;
break;
case ARRAY32_OPF:
rd_val <<= 2;
}
store_reg(regs, rd_val, RD(insn));
}
static void bmask(struct pt_regs *regs, unsigned int insn)
{
unsigned long rs1, rs2, rd_val, gsr;
maybe_flush_windows(RS1(insn), RS2(insn), RD(insn), 0);
rs1 = fetch_reg(RS1(insn), regs);
rs2 = fetch_reg(RS2(insn), regs);
rd_val = rs1 + rs2;
store_reg(regs, rd_val, RD(insn));
gsr = current_thread_info()->gsr[0] & 0xffffffff;
gsr |= rd_val << 32UL;
current_thread_info()->gsr[0] = gsr;
}
static void bshuffle(struct pt_regs *regs, unsigned int insn)
{
struct fpustate *f = FPUSTATE;
unsigned long rs1, rs2, rd_val;
unsigned long bmask, i;
bmask = current_thread_info()->gsr[0] >> 32UL;
rs1 = fpd_regval(f, RS1(insn));
rs2 = fpd_regval(f, RS2(insn));
rd_val = 0UL;
for (i = 0; i < 8; i++) {
unsigned long which = (bmask >> (i * 4)) & 0xf;
unsigned long byte;
if (which < 8)
byte = (rs1 >> (which * 8)) & 0xff;
else
byte = (rs2 >> ((which-8)*8)) & 0xff;
rd_val |= (byte << (i * 8));
}
*fpd_regaddr(f, RD(insn)) = rd_val;
}
static void pdist(struct pt_regs *regs, unsigned int insn)
{
struct fpustate *f = FPUSTATE;
unsigned long rs1, rs2, *rd, rd_val;
unsigned long i;
rs1 = fpd_regval(f, RS1(insn));
rs2 = fpd_regval(f, RS2(insn));
rd = fpd_regaddr(f, RD(insn));
rd_val = *rd;
for (i = 0; i < 8; i++) {
s16 s1, s2;
s1 = (rs1 >> (56 - (i * 8))) & 0xff;
s2 = (rs2 >> (56 - (i * 8))) & 0xff;
/* Absolute value of difference. */
s1 -= s2;
if (s1 < 0)
s1 = ~s1 + 1;
rd_val += s1;
}
*rd = rd_val;
}
static void pformat(struct pt_regs *regs, unsigned int insn, unsigned int opf)
{
struct fpustate *f = FPUSTATE;
unsigned long rs1, rs2, gsr, scale, rd_val;
gsr = current_thread_info()->gsr[0];
scale = (gsr >> 3) & (opf == FPACK16_OPF ? 0xf : 0x1f);
switch (opf) {
case FPACK16_OPF: {
unsigned long byte;
rs2 = fpd_regval(f, RS2(insn));
rd_val = 0;
for (byte = 0; byte < 4; byte++) {
unsigned int val;
s16 src = (rs2 >> (byte * 16UL)) & 0xffffUL;
int scaled = src << scale;
int from_fixed = scaled >> 7;
val = ((from_fixed < 0) ?
0 :
(from_fixed > 255) ?
255 : from_fixed);
rd_val |= (val << (8 * byte));
}
*fps_regaddr(f, RD(insn)) = rd_val;
break;
}
case FPACK32_OPF: {
unsigned long word;
rs1 = fpd_regval(f, RS1(insn));
rs2 = fpd_regval(f, RS2(insn));
rd_val = (rs1 << 8) & ~(0x000000ff000000ffUL);
for (word = 0; word < 2; word++) {
unsigned long val;
s32 src = (rs2 >> (word * 32UL));
s64 scaled = src << scale;
s64 from_fixed = scaled >> 23;
val = ((from_fixed < 0) ?
0 :
(from_fixed > 255) ?
255 : from_fixed);
rd_val |= (val << (32 * word));
}
*fpd_regaddr(f, RD(insn)) = rd_val;
break;
}
case FPACKFIX_OPF: {
unsigned long word;
rs2 = fpd_regval(f, RS2(insn));
rd_val = 0;
for (word = 0; word < 2; word++) {
long val;
s32 src = (rs2 >> (word * 32UL));
s64 scaled = src << scale;
s64 from_fixed = scaled >> 16;
val = ((from_fixed < -32768) ?
-32768 :
(from_fixed > 32767) ?
32767 : from_fixed);
rd_val |= ((val & 0xffff) << (word * 16));
}
*fps_regaddr(f, RD(insn)) = rd_val;
break;
}
case FEXPAND_OPF: {
unsigned long byte;
rs2 = fps_regval(f, RS2(insn));
rd_val = 0;
for (byte = 0; byte < 4; byte++) {
unsigned long val;
u8 src = (rs2 >> (byte * 8)) & 0xff;
val = src << 4;
rd_val |= (val << (byte * 16));
}
*fpd_regaddr(f, RD(insn)) = rd_val;
break;
}
case FPMERGE_OPF: {
rs1 = fps_regval(f, RS1(insn));
rs2 = fps_regval(f, RS2(insn));
rd_val = (((rs2 & 0x000000ff) << 0) |
((rs1 & 0x000000ff) << 8) |
((rs2 & 0x0000ff00) << 8) |
((rs1 & 0x0000ff00) << 16) |
((rs2 & 0x00ff0000) << 16) |
((rs1 & 0x00ff0000) << 24) |
((rs2 & 0xff000000) << 24) |
((rs1 & 0xff000000) << 32));
*fpd_regaddr(f, RD(insn)) = rd_val;
break;
}
}
}
static void pmul(struct pt_regs *regs, unsigned int insn, unsigned int opf)
{
struct fpustate *f = FPUSTATE;
unsigned long rs1, rs2, rd_val;
switch (opf) {
case FMUL8x16_OPF: {
unsigned long byte;
rs1 = fps_regval(f, RS1(insn));
rs2 = fpd_regval(f, RS2(insn));
rd_val = 0;
for (byte = 0; byte < 4; byte++) {
u16 src1 = (rs1 >> (byte * 8)) & 0x00ff;
s16 src2 = (rs2 >> (byte * 16)) & 0xffff;
u32 prod = src1 * src2;
u16 scaled = ((prod & 0x00ffff00) >> 8);
/* Round up. */
if (prod & 0x80)
scaled++;
rd_val |= ((scaled & 0xffffUL) << (byte * 16UL));
}
*fpd_regaddr(f, RD(insn)) = rd_val;
break;
}
case FMUL8x16AU_OPF:
case FMUL8x16AL_OPF: {
unsigned long byte;
s16 src2;
rs1 = fps_regval(f, RS1(insn));
rs2 = fps_regval(f, RS2(insn));
rd_val = 0;
src2 = rs2 >> (opf == FMUL8x16AU_OPF ? 16 : 0);
for (byte = 0; byte < 4; byte++) {
u16 src1 = (rs1 >> (byte * 8)) & 0x00ff;
u32 prod = src1 * src2;
u16 scaled = ((prod & 0x00ffff00) >> 8);
/* Round up. */
if (prod & 0x80)
scaled++;
rd_val |= ((scaled & 0xffffUL) << (byte * 16UL));
}
*fpd_regaddr(f, RD(insn)) = rd_val;
break;
}
case FMUL8SUx16_OPF:
case FMUL8ULx16_OPF: {
unsigned long byte, ushift;
rs1 = fpd_regval(f, RS1(insn));
rs2 = fpd_regval(f, RS2(insn));
rd_val = 0;
ushift = (opf == FMUL8SUx16_OPF) ? 8 : 0;
for (byte = 0; byte < 4; byte++) {
u16 src1;
s16 src2;
u32 prod;
u16 scaled;
src1 = ((rs1 >> ((16 * byte) + ushift)) & 0x00ff);
src2 = ((rs2 >> (16 * byte)) & 0xffff);
prod = src1 * src2;
scaled = ((prod & 0x00ffff00) >> 8);
/* Round up. */
if (prod & 0x80)
scaled++;
rd_val |= ((scaled & 0xffffUL) << (byte * 16UL));
}
*fpd_regaddr(f, RD(insn)) = rd_val;
break;
}
case FMULD8SUx16_OPF:
case FMULD8ULx16_OPF: {
unsigned long byte, ushift;
rs1 = fps_regval(f, RS1(insn));
rs2 = fps_regval(f, RS2(insn));
rd_val = 0;
ushift = (opf == FMULD8SUx16_OPF) ? 8 : 0;
for (byte = 0; byte < 2; byte++) {
u16 src1;
s16 src2;
u32 prod;
u16 scaled;
src1 = ((rs1 >> ((16 * byte) + ushift)) & 0x00ff);
src2 = ((rs2 >> (16 * byte)) & 0xffff);
prod = src1 * src2;
scaled = ((prod & 0x00ffff00) >> 8);
/* Round up. */
if (prod & 0x80)
scaled++;
rd_val |= ((scaled & 0xffffUL) <<
((byte * 32UL) + 7UL));
}
*fpd_regaddr(f, RD(insn)) = rd_val;
break;
}
}
}
static void pcmp(struct pt_regs *regs, unsigned int insn, unsigned int opf)
{
struct fpustate *f = FPUSTATE;
unsigned long rs1, rs2, rd_val, i;
rs1 = fpd_regval(f, RS1(insn));
rs2 = fpd_regval(f, RS2(insn));
rd_val = 0;
switch (opf) {
case FCMPGT16_OPF:
for (i = 0; i < 4; i++) {
s16 a = (rs1 >> (i * 16)) & 0xffff;
s16 b = (rs2 >> (i * 16)) & 0xffff;
if (a > b)
rd_val |= 8 >> i;
}
break;
case FCMPGT32_OPF:
for (i = 0; i < 2; i++) {
s32 a = (rs1 >> (i * 32)) & 0xffffffff;
s32 b = (rs2 >> (i * 32)) & 0xffffffff;
if (a > b)
rd_val |= 2 >> i;
}
break;
case FCMPLE16_OPF:
for (i = 0; i < 4; i++) {
s16 a = (rs1 >> (i * 16)) & 0xffff;
s16 b = (rs2 >> (i * 16)) & 0xffff;
if (a <= b)
rd_val |= 8 >> i;
}
break;
case FCMPLE32_OPF:
for (i = 0; i < 2; i++) {
s32 a = (rs1 >> (i * 32)) & 0xffffffff;
s32 b = (rs2 >> (i * 32)) & 0xffffffff;
if (a <= b)
rd_val |= 2 >> i;
}
break;
case FCMPNE16_OPF:
for (i = 0; i < 4; i++) {
s16 a = (rs1 >> (i * 16)) & 0xffff;
s16 b = (rs2 >> (i * 16)) & 0xffff;
if (a != b)
rd_val |= 8 >> i;
}
break;
case FCMPNE32_OPF:
for (i = 0; i < 2; i++) {
s32 a = (rs1 >> (i * 32)) & 0xffffffff;
s32 b = (rs2 >> (i * 32)) & 0xffffffff;
if (a != b)
rd_val |= 2 >> i;
}
break;
case FCMPEQ16_OPF:
for (i = 0; i < 4; i++) {
s16 a = (rs1 >> (i * 16)) & 0xffff;
s16 b = (rs2 >> (i * 16)) & 0xffff;
if (a == b)
rd_val |= 8 >> i;
}
break;
case FCMPEQ32_OPF:
for (i = 0; i < 2; i++) {
s32 a = (rs1 >> (i * 32)) & 0xffffffff;
s32 b = (rs2 >> (i * 32)) & 0xffffffff;
if (a == b)
rd_val |= 2 >> i;
}
break;
}
maybe_flush_windows(0, 0, RD(insn), 0);
store_reg(regs, rd_val, RD(insn));
}
/* Emulate the VIS instructions which are not implemented in
* hardware on Niagara.
*/
int vis_emul(struct pt_regs *regs, unsigned int insn)
{
unsigned long pc = regs->tpc;
unsigned int opf;
BUG_ON(regs->tstate & TSTATE_PRIV);
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
if (test_thread_flag(TIF_32BIT))
pc = (u32)pc;
if (get_user(insn, (u32 __user *) pc))
return -EFAULT;
save_and_clear_fpu();
opf = (insn & VIS_OPF_MASK) >> VIS_OPF_SHIFT;
switch (opf) {
default:
return -EINVAL;
/* Pixel Formatting Instructions. */
case FPACK16_OPF:
case FPACK32_OPF:
case FPACKFIX_OPF:
case FEXPAND_OPF:
case FPMERGE_OPF:
pformat(regs, insn, opf);
break;
/* Partitioned Multiply Instructions */
case FMUL8x16_OPF:
case FMUL8x16AU_OPF:
case FMUL8x16AL_OPF:
case FMUL8SUx16_OPF:
case FMUL8ULx16_OPF:
case FMULD8SUx16_OPF:
case FMULD8ULx16_OPF:
pmul(regs, insn, opf);
break;
/* Pixel Compare Instructions */
case FCMPGT16_OPF:
case FCMPGT32_OPF:
case FCMPLE16_OPF:
case FCMPLE32_OPF:
case FCMPNE16_OPF:
case FCMPNE32_OPF:
case FCMPEQ16_OPF:
case FCMPEQ32_OPF:
pcmp(regs, insn, opf);
break;
/* Edge Handling Instructions */
case EDGE8_OPF:
case EDGE8N_OPF:
case EDGE8L_OPF:
case EDGE8LN_OPF:
case EDGE16_OPF:
case EDGE16N_OPF:
case EDGE16L_OPF:
case EDGE16LN_OPF:
case EDGE32_OPF:
case EDGE32N_OPF:
case EDGE32L_OPF:
case EDGE32LN_OPF:
edge(regs, insn, opf);
break;
/* Pixel Component Distance */
case PDIST_OPF:
pdist(regs, insn);
break;
/* Three-Dimensional Array Addressing Instructions */
case ARRAY8_OPF:
case ARRAY16_OPF:
case ARRAY32_OPF:
array(regs, insn, opf);
break;
/* Byte Mask and Shuffle Instructions */
case BMASK_OPF:
bmask(regs, insn);
break;
case BSHUFFLE_OPF:
bshuffle(regs, insn);
break;
}
regs->tpc = regs->tnpc;
regs->tnpc += 4;
return 0;
}
| linux-master | arch/sparc/kernel/visemul.c |
// SPDX-License-Identifier: GPL-2.0
/* pci_schizo.c: SCHIZO/TOMATILLO specific PCI controller support.
*
* Copyright (C) 2001, 2002, 2003, 2007, 2008 David S. Miller ([email protected])
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/interrupt.h>
#include <linux/of_device.h>
#include <linux/numa.h>
#include <asm/iommu.h>
#include <asm/irq.h>
#include <asm/pstate.h>
#include <asm/prom.h>
#include <asm/upa.h>
#include "pci_impl.h"
#include "iommu_common.h"
#define DRIVER_NAME "schizo"
#define PFX DRIVER_NAME ": "
/* This is a convention that at least Excalibur and Merlin
* follow. I suppose the SCHIZO used in Starcat and friends
* will do similar.
*
* The only way I could see this changing is if the newlink
* block requires more space in Schizo's address space than
* they predicted, thus requiring an address space reorg when
* the newer Schizo is taped out.
*/
/* Streaming buffer control register. */
#define SCHIZO_STRBUF_CTRL_LPTR 0x00000000000000f0UL /* LRU Lock Pointer */
#define SCHIZO_STRBUF_CTRL_LENAB 0x0000000000000008UL /* LRU Lock Enable */
#define SCHIZO_STRBUF_CTRL_RRDIS 0x0000000000000004UL /* Rerun Disable */
#define SCHIZO_STRBUF_CTRL_DENAB 0x0000000000000002UL /* Diagnostic Mode Enable */
#define SCHIZO_STRBUF_CTRL_ENAB 0x0000000000000001UL /* Streaming Buffer Enable */
/* IOMMU control register. */
#define SCHIZO_IOMMU_CTRL_RESV 0xfffffffff9000000UL /* Reserved */
#define SCHIZO_IOMMU_CTRL_XLTESTAT 0x0000000006000000UL /* Translation Error Status */
#define SCHIZO_IOMMU_CTRL_XLTEERR 0x0000000001000000UL /* Translation Error encountered */
#define SCHIZO_IOMMU_CTRL_LCKEN 0x0000000000800000UL /* Enable translation locking */
#define SCHIZO_IOMMU_CTRL_LCKPTR 0x0000000000780000UL /* Translation lock pointer */
#define SCHIZO_IOMMU_CTRL_TSBSZ 0x0000000000070000UL /* TSB Size */
#define SCHIZO_IOMMU_TSBSZ_1K 0x0000000000000000UL /* TSB Table 1024 8-byte entries */
#define SCHIZO_IOMMU_TSBSZ_2K 0x0000000000010000UL /* TSB Table 2048 8-byte entries */
#define SCHIZO_IOMMU_TSBSZ_4K 0x0000000000020000UL /* TSB Table 4096 8-byte entries */
#define SCHIZO_IOMMU_TSBSZ_8K 0x0000000000030000UL /* TSB Table 8192 8-byte entries */
#define SCHIZO_IOMMU_TSBSZ_16K 0x0000000000040000UL /* TSB Table 16k 8-byte entries */
#define SCHIZO_IOMMU_TSBSZ_32K 0x0000000000050000UL /* TSB Table 32k 8-byte entries */
#define SCHIZO_IOMMU_TSBSZ_64K 0x0000000000060000UL /* TSB Table 64k 8-byte entries */
#define SCHIZO_IOMMU_TSBSZ_128K 0x0000000000070000UL /* TSB Table 128k 8-byte entries */
#define SCHIZO_IOMMU_CTRL_RESV2 0x000000000000fff8UL /* Reserved */
#define SCHIZO_IOMMU_CTRL_TBWSZ 0x0000000000000004UL /* Assumed page size, 0=8k 1=64k */
#define SCHIZO_IOMMU_CTRL_DENAB 0x0000000000000002UL /* Diagnostic mode enable */
#define SCHIZO_IOMMU_CTRL_ENAB 0x0000000000000001UL /* IOMMU Enable */
/* Schizo config space address format is nearly identical to
* that of PSYCHO:
*
* 32 24 23 16 15 11 10 8 7 2 1 0
* ---------------------------------------------------------
* |0 0 0 0 0 0 0 0 0| bus | device | function | reg | 0 0 |
* ---------------------------------------------------------
*/
#define SCHIZO_CONFIG_BASE(PBM) ((PBM)->config_space)
#define SCHIZO_CONFIG_ENCODE(BUS, DEVFN, REG) \
(((unsigned long)(BUS) << 16) | \
((unsigned long)(DEVFN) << 8) | \
((unsigned long)(REG)))
static void *schizo_pci_config_mkaddr(struct pci_pbm_info *pbm,
unsigned char bus,
unsigned int devfn,
int where)
{
if (!pbm)
return NULL;
bus -= pbm->pci_first_busno;
return (void *)
(SCHIZO_CONFIG_BASE(pbm) |
SCHIZO_CONFIG_ENCODE(bus, devfn, where));
}
/* SCHIZO error handling support. */
enum schizo_error_type {
UE_ERR, CE_ERR, PCI_ERR, SAFARI_ERR
};
static DEFINE_SPINLOCK(stc_buf_lock);
static unsigned long stc_error_buf[128];
static unsigned long stc_tag_buf[16];
static unsigned long stc_line_buf[16];
#define SCHIZO_UE_INO 0x30 /* Uncorrectable ECC error */
#define SCHIZO_CE_INO 0x31 /* Correctable ECC error */
#define SCHIZO_PCIERR_A_INO 0x32 /* PBM A PCI bus error */
#define SCHIZO_PCIERR_B_INO 0x33 /* PBM B PCI bus error */
#define SCHIZO_SERR_INO 0x34 /* Safari interface error */
#define SCHIZO_STC_ERR 0xb800UL /* --> 0xba00 */
#define SCHIZO_STC_TAG 0xba00UL /* --> 0xba80 */
#define SCHIZO_STC_LINE 0xbb00UL /* --> 0xbb80 */
#define SCHIZO_STCERR_WRITE 0x2UL
#define SCHIZO_STCERR_READ 0x1UL
#define SCHIZO_STCTAG_PPN 0x3fffffff00000000UL
#define SCHIZO_STCTAG_VPN 0x00000000ffffe000UL
#define SCHIZO_STCTAG_VALID 0x8000000000000000UL
#define SCHIZO_STCTAG_READ 0x4000000000000000UL
#define SCHIZO_STCLINE_LINDX 0x0000000007800000UL
#define SCHIZO_STCLINE_SPTR 0x000000000007e000UL
#define SCHIZO_STCLINE_LADDR 0x0000000000001fc0UL
#define SCHIZO_STCLINE_EPTR 0x000000000000003fUL
#define SCHIZO_STCLINE_VALID 0x0000000000600000UL
#define SCHIZO_STCLINE_FOFN 0x0000000000180000UL
static void __schizo_check_stc_error_pbm(struct pci_pbm_info *pbm,
enum schizo_error_type type)
{
struct strbuf *strbuf = &pbm->stc;
unsigned long regbase = pbm->pbm_regs;
unsigned long err_base, tag_base, line_base;
u64 control;
int i;
err_base = regbase + SCHIZO_STC_ERR;
tag_base = regbase + SCHIZO_STC_TAG;
line_base = regbase + SCHIZO_STC_LINE;
spin_lock(&stc_buf_lock);
/* This is __REALLY__ dangerous. When we put the
* streaming buffer into diagnostic mode to probe
* it's tags and error status, we _must_ clear all
* of the line tag valid bits before re-enabling
* the streaming buffer. If any dirty data lives
* in the STC when we do this, we will end up
* invalidating it before it has a chance to reach
* main memory.
*/
control = upa_readq(strbuf->strbuf_control);
upa_writeq((control | SCHIZO_STRBUF_CTRL_DENAB),
strbuf->strbuf_control);
for (i = 0; i < 128; i++) {
unsigned long val;
val = upa_readq(err_base + (i * 8UL));
upa_writeq(0UL, err_base + (i * 8UL));
stc_error_buf[i] = val;
}
for (i = 0; i < 16; i++) {
stc_tag_buf[i] = upa_readq(tag_base + (i * 8UL));
stc_line_buf[i] = upa_readq(line_base + (i * 8UL));
upa_writeq(0UL, tag_base + (i * 8UL));
upa_writeq(0UL, line_base + (i * 8UL));
}
/* OK, state is logged, exit diagnostic mode. */
upa_writeq(control, strbuf->strbuf_control);
for (i = 0; i < 16; i++) {
int j, saw_error, first, last;
saw_error = 0;
first = i * 8;
last = first + 8;
for (j = first; j < last; j++) {
unsigned long errval = stc_error_buf[j];
if (errval != 0) {
saw_error++;
printk("%s: STC_ERR(%d)[wr(%d)rd(%d)]\n",
pbm->name,
j,
(errval & SCHIZO_STCERR_WRITE) ? 1 : 0,
(errval & SCHIZO_STCERR_READ) ? 1 : 0);
}
}
if (saw_error != 0) {
unsigned long tagval = stc_tag_buf[i];
unsigned long lineval = stc_line_buf[i];
printk("%s: STC_TAG(%d)[PA(%016lx)VA(%08lx)V(%d)R(%d)]\n",
pbm->name,
i,
((tagval & SCHIZO_STCTAG_PPN) >> 19UL),
(tagval & SCHIZO_STCTAG_VPN),
((tagval & SCHIZO_STCTAG_VALID) ? 1 : 0),
((tagval & SCHIZO_STCTAG_READ) ? 1 : 0));
/* XXX Should spit out per-bank error information... -DaveM */
printk("%s: STC_LINE(%d)[LIDX(%lx)SP(%lx)LADDR(%lx)EP(%lx)"
"V(%d)FOFN(%d)]\n",
pbm->name,
i,
((lineval & SCHIZO_STCLINE_LINDX) >> 23UL),
((lineval & SCHIZO_STCLINE_SPTR) >> 13UL),
((lineval & SCHIZO_STCLINE_LADDR) >> 6UL),
((lineval & SCHIZO_STCLINE_EPTR) >> 0UL),
((lineval & SCHIZO_STCLINE_VALID) ? 1 : 0),
((lineval & SCHIZO_STCLINE_FOFN) ? 1 : 0));
}
}
spin_unlock(&stc_buf_lock);
}
/* IOMMU is per-PBM in Schizo, so interrogate both for anonymous
* controller level errors.
*/
#define SCHIZO_IOMMU_TAG 0xa580UL
#define SCHIZO_IOMMU_DATA 0xa600UL
#define SCHIZO_IOMMU_TAG_CTXT 0x0000001ffe000000UL
#define SCHIZO_IOMMU_TAG_ERRSTS 0x0000000001800000UL
#define SCHIZO_IOMMU_TAG_ERR 0x0000000000400000UL
#define SCHIZO_IOMMU_TAG_WRITE 0x0000000000200000UL
#define SCHIZO_IOMMU_TAG_STREAM 0x0000000000100000UL
#define SCHIZO_IOMMU_TAG_SIZE 0x0000000000080000UL
#define SCHIZO_IOMMU_TAG_VPAGE 0x000000000007ffffUL
#define SCHIZO_IOMMU_DATA_VALID 0x0000000100000000UL
#define SCHIZO_IOMMU_DATA_CACHE 0x0000000040000000UL
#define SCHIZO_IOMMU_DATA_PPAGE 0x000000003fffffffUL
static void schizo_check_iommu_error_pbm(struct pci_pbm_info *pbm,
enum schizo_error_type type)
{
struct iommu *iommu = pbm->iommu;
unsigned long iommu_tag[16];
unsigned long iommu_data[16];
unsigned long flags;
u64 control;
int i;
spin_lock_irqsave(&iommu->lock, flags);
control = upa_readq(iommu->iommu_control);
if (control & SCHIZO_IOMMU_CTRL_XLTEERR) {
unsigned long base;
char *type_string;
/* Clear the error encountered bit. */
control &= ~SCHIZO_IOMMU_CTRL_XLTEERR;
upa_writeq(control, iommu->iommu_control);
switch((control & SCHIZO_IOMMU_CTRL_XLTESTAT) >> 25UL) {
case 0:
type_string = "Protection Error";
break;
case 1:
type_string = "Invalid Error";
break;
case 2:
type_string = "TimeOut Error";
break;
case 3:
default:
type_string = "ECC Error";
break;
}
printk("%s: IOMMU Error, type[%s]\n",
pbm->name, type_string);
/* Put the IOMMU into diagnostic mode and probe
* it's TLB for entries with error status.
*
* It is very possible for another DVMA to occur
* while we do this probe, and corrupt the system
* further. But we are so screwed at this point
* that we are likely to crash hard anyways, so
* get as much diagnostic information to the
* console as we can.
*/
upa_writeq(control | SCHIZO_IOMMU_CTRL_DENAB,
iommu->iommu_control);
base = pbm->pbm_regs;
for (i = 0; i < 16; i++) {
iommu_tag[i] =
upa_readq(base + SCHIZO_IOMMU_TAG + (i * 8UL));
iommu_data[i] =
upa_readq(base + SCHIZO_IOMMU_DATA + (i * 8UL));
/* Now clear out the entry. */
upa_writeq(0, base + SCHIZO_IOMMU_TAG + (i * 8UL));
upa_writeq(0, base + SCHIZO_IOMMU_DATA + (i * 8UL));
}
/* Leave diagnostic mode. */
upa_writeq(control, iommu->iommu_control);
for (i = 0; i < 16; i++) {
unsigned long tag, data;
tag = iommu_tag[i];
if (!(tag & SCHIZO_IOMMU_TAG_ERR))
continue;
data = iommu_data[i];
switch((tag & SCHIZO_IOMMU_TAG_ERRSTS) >> 23UL) {
case 0:
type_string = "Protection Error";
break;
case 1:
type_string = "Invalid Error";
break;
case 2:
type_string = "TimeOut Error";
break;
case 3:
default:
type_string = "ECC Error";
break;
}
printk("%s: IOMMU TAG(%d)[error(%s) ctx(%x) wr(%d) str(%d) "
"sz(%dK) vpg(%08lx)]\n",
pbm->name, i, type_string,
(int)((tag & SCHIZO_IOMMU_TAG_CTXT) >> 25UL),
((tag & SCHIZO_IOMMU_TAG_WRITE) ? 1 : 0),
((tag & SCHIZO_IOMMU_TAG_STREAM) ? 1 : 0),
((tag & SCHIZO_IOMMU_TAG_SIZE) ? 64 : 8),
(tag & SCHIZO_IOMMU_TAG_VPAGE) << IOMMU_PAGE_SHIFT);
printk("%s: IOMMU DATA(%d)[valid(%d) cache(%d) ppg(%016lx)]\n",
pbm->name, i,
((data & SCHIZO_IOMMU_DATA_VALID) ? 1 : 0),
((data & SCHIZO_IOMMU_DATA_CACHE) ? 1 : 0),
(data & SCHIZO_IOMMU_DATA_PPAGE) << IOMMU_PAGE_SHIFT);
}
}
if (pbm->stc.strbuf_enabled)
__schizo_check_stc_error_pbm(pbm, type);
spin_unlock_irqrestore(&iommu->lock, flags);
}
static void schizo_check_iommu_error(struct pci_pbm_info *pbm,
enum schizo_error_type type)
{
schizo_check_iommu_error_pbm(pbm, type);
if (pbm->sibling)
schizo_check_iommu_error_pbm(pbm->sibling, type);
}
/* Uncorrectable ECC error status gathering. */
#define SCHIZO_UE_AFSR 0x10030UL
#define SCHIZO_UE_AFAR 0x10038UL
#define SCHIZO_UEAFSR_PPIO 0x8000000000000000UL /* Safari */
#define SCHIZO_UEAFSR_PDRD 0x4000000000000000UL /* Safari/Tomatillo */
#define SCHIZO_UEAFSR_PDWR 0x2000000000000000UL /* Safari */
#define SCHIZO_UEAFSR_SPIO 0x1000000000000000UL /* Safari */
#define SCHIZO_UEAFSR_SDMA 0x0800000000000000UL /* Safari/Tomatillo */
#define SCHIZO_UEAFSR_ERRPNDG 0x0300000000000000UL /* Safari */
#define SCHIZO_UEAFSR_BMSK 0x000003ff00000000UL /* Safari */
#define SCHIZO_UEAFSR_QOFF 0x00000000c0000000UL /* Safari/Tomatillo */
#define SCHIZO_UEAFSR_AID 0x000000001f000000UL /* Safari/Tomatillo */
#define SCHIZO_UEAFSR_PARTIAL 0x0000000000800000UL /* Safari */
#define SCHIZO_UEAFSR_OWNEDIN 0x0000000000400000UL /* Safari */
#define SCHIZO_UEAFSR_MTAGSYND 0x00000000000f0000UL /* Safari */
#define SCHIZO_UEAFSR_MTAG 0x000000000000e000UL /* Safari */
#define SCHIZO_UEAFSR_ECCSYND 0x00000000000001ffUL /* Safari */
static irqreturn_t schizo_ue_intr(int irq, void *dev_id)
{
struct pci_pbm_info *pbm = dev_id;
unsigned long afsr_reg = pbm->controller_regs + SCHIZO_UE_AFSR;
unsigned long afar_reg = pbm->controller_regs + SCHIZO_UE_AFAR;
unsigned long afsr, afar, error_bits;
int reported, limit;
/* Latch uncorrectable error status. */
afar = upa_readq(afar_reg);
/* If either of the error pending bits are set in the
* AFSR, the error status is being actively updated by
* the hardware and we must re-read to get a clean value.
*/
limit = 1000;
do {
afsr = upa_readq(afsr_reg);
} while ((afsr & SCHIZO_UEAFSR_ERRPNDG) != 0 && --limit);
/* Clear the primary/secondary error status bits. */
error_bits = afsr &
(SCHIZO_UEAFSR_PPIO | SCHIZO_UEAFSR_PDRD | SCHIZO_UEAFSR_PDWR |
SCHIZO_UEAFSR_SPIO | SCHIZO_UEAFSR_SDMA);
if (!error_bits)
return IRQ_NONE;
upa_writeq(error_bits, afsr_reg);
/* Log the error. */
printk("%s: Uncorrectable Error, primary error type[%s]\n",
pbm->name,
(((error_bits & SCHIZO_UEAFSR_PPIO) ?
"PIO" :
((error_bits & SCHIZO_UEAFSR_PDRD) ?
"DMA Read" :
((error_bits & SCHIZO_UEAFSR_PDWR) ?
"DMA Write" : "???")))));
printk("%s: bytemask[%04lx] qword_offset[%lx] SAFARI_AID[%02lx]\n",
pbm->name,
(afsr & SCHIZO_UEAFSR_BMSK) >> 32UL,
(afsr & SCHIZO_UEAFSR_QOFF) >> 30UL,
(afsr & SCHIZO_UEAFSR_AID) >> 24UL);
printk("%s: partial[%d] owned_in[%d] mtag[%lx] mtag_synd[%lx] ecc_sync[%lx]\n",
pbm->name,
(afsr & SCHIZO_UEAFSR_PARTIAL) ? 1 : 0,
(afsr & SCHIZO_UEAFSR_OWNEDIN) ? 1 : 0,
(afsr & SCHIZO_UEAFSR_MTAG) >> 13UL,
(afsr & SCHIZO_UEAFSR_MTAGSYND) >> 16UL,
(afsr & SCHIZO_UEAFSR_ECCSYND) >> 0UL);
printk("%s: UE AFAR [%016lx]\n", pbm->name, afar);
printk("%s: UE Secondary errors [", pbm->name);
reported = 0;
if (afsr & SCHIZO_UEAFSR_SPIO) {
reported++;
printk("(PIO)");
}
if (afsr & SCHIZO_UEAFSR_SDMA) {
reported++;
printk("(DMA)");
}
if (!reported)
printk("(none)");
printk("]\n");
/* Interrogate IOMMU for error status. */
schizo_check_iommu_error(pbm, UE_ERR);
return IRQ_HANDLED;
}
#define SCHIZO_CE_AFSR 0x10040UL
#define SCHIZO_CE_AFAR 0x10048UL
#define SCHIZO_CEAFSR_PPIO 0x8000000000000000UL
#define SCHIZO_CEAFSR_PDRD 0x4000000000000000UL
#define SCHIZO_CEAFSR_PDWR 0x2000000000000000UL
#define SCHIZO_CEAFSR_SPIO 0x1000000000000000UL
#define SCHIZO_CEAFSR_SDMA 0x0800000000000000UL
#define SCHIZO_CEAFSR_ERRPNDG 0x0300000000000000UL
#define SCHIZO_CEAFSR_BMSK 0x000003ff00000000UL
#define SCHIZO_CEAFSR_QOFF 0x00000000c0000000UL
#define SCHIZO_CEAFSR_AID 0x000000001f000000UL
#define SCHIZO_CEAFSR_PARTIAL 0x0000000000800000UL
#define SCHIZO_CEAFSR_OWNEDIN 0x0000000000400000UL
#define SCHIZO_CEAFSR_MTAGSYND 0x00000000000f0000UL
#define SCHIZO_CEAFSR_MTAG 0x000000000000e000UL
#define SCHIZO_CEAFSR_ECCSYND 0x00000000000001ffUL
static irqreturn_t schizo_ce_intr(int irq, void *dev_id)
{
struct pci_pbm_info *pbm = dev_id;
unsigned long afsr_reg = pbm->controller_regs + SCHIZO_CE_AFSR;
unsigned long afar_reg = pbm->controller_regs + SCHIZO_CE_AFAR;
unsigned long afsr, afar, error_bits;
int reported, limit;
/* Latch error status. */
afar = upa_readq(afar_reg);
/* If either of the error pending bits are set in the
* AFSR, the error status is being actively updated by
* the hardware and we must re-read to get a clean value.
*/
limit = 1000;
do {
afsr = upa_readq(afsr_reg);
} while ((afsr & SCHIZO_UEAFSR_ERRPNDG) != 0 && --limit);
/* Clear primary/secondary error status bits. */
error_bits = afsr &
(SCHIZO_CEAFSR_PPIO | SCHIZO_CEAFSR_PDRD | SCHIZO_CEAFSR_PDWR |
SCHIZO_CEAFSR_SPIO | SCHIZO_CEAFSR_SDMA);
if (!error_bits)
return IRQ_NONE;
upa_writeq(error_bits, afsr_reg);
/* Log the error. */
printk("%s: Correctable Error, primary error type[%s]\n",
pbm->name,
(((error_bits & SCHIZO_CEAFSR_PPIO) ?
"PIO" :
((error_bits & SCHIZO_CEAFSR_PDRD) ?
"DMA Read" :
((error_bits & SCHIZO_CEAFSR_PDWR) ?
"DMA Write" : "???")))));
/* XXX Use syndrome and afar to print out module string just like
* XXX UDB CE trap handler does... -DaveM
*/
printk("%s: bytemask[%04lx] qword_offset[%lx] SAFARI_AID[%02lx]\n",
pbm->name,
(afsr & SCHIZO_UEAFSR_BMSK) >> 32UL,
(afsr & SCHIZO_UEAFSR_QOFF) >> 30UL,
(afsr & SCHIZO_UEAFSR_AID) >> 24UL);
printk("%s: partial[%d] owned_in[%d] mtag[%lx] mtag_synd[%lx] ecc_sync[%lx]\n",
pbm->name,
(afsr & SCHIZO_UEAFSR_PARTIAL) ? 1 : 0,
(afsr & SCHIZO_UEAFSR_OWNEDIN) ? 1 : 0,
(afsr & SCHIZO_UEAFSR_MTAG) >> 13UL,
(afsr & SCHIZO_UEAFSR_MTAGSYND) >> 16UL,
(afsr & SCHIZO_UEAFSR_ECCSYND) >> 0UL);
printk("%s: CE AFAR [%016lx]\n", pbm->name, afar);
printk("%s: CE Secondary errors [", pbm->name);
reported = 0;
if (afsr & SCHIZO_CEAFSR_SPIO) {
reported++;
printk("(PIO)");
}
if (afsr & SCHIZO_CEAFSR_SDMA) {
reported++;
printk("(DMA)");
}
if (!reported)
printk("(none)");
printk("]\n");
return IRQ_HANDLED;
}
#define SCHIZO_PCI_AFSR 0x2010UL
#define SCHIZO_PCI_AFAR 0x2018UL
#define SCHIZO_PCIAFSR_PMA 0x8000000000000000UL /* Schizo/Tomatillo */
#define SCHIZO_PCIAFSR_PTA 0x4000000000000000UL /* Schizo/Tomatillo */
#define SCHIZO_PCIAFSR_PRTRY 0x2000000000000000UL /* Schizo/Tomatillo */
#define SCHIZO_PCIAFSR_PPERR 0x1000000000000000UL /* Schizo/Tomatillo */
#define SCHIZO_PCIAFSR_PTTO 0x0800000000000000UL /* Schizo/Tomatillo */
#define SCHIZO_PCIAFSR_PUNUS 0x0400000000000000UL /* Schizo */
#define SCHIZO_PCIAFSR_SMA 0x0200000000000000UL /* Schizo/Tomatillo */
#define SCHIZO_PCIAFSR_STA 0x0100000000000000UL /* Schizo/Tomatillo */
#define SCHIZO_PCIAFSR_SRTRY 0x0080000000000000UL /* Schizo/Tomatillo */
#define SCHIZO_PCIAFSR_SPERR 0x0040000000000000UL /* Schizo/Tomatillo */
#define SCHIZO_PCIAFSR_STTO 0x0020000000000000UL /* Schizo/Tomatillo */
#define SCHIZO_PCIAFSR_SUNUS 0x0010000000000000UL /* Schizo */
#define SCHIZO_PCIAFSR_BMSK 0x000003ff00000000UL /* Schizo/Tomatillo */
#define SCHIZO_PCIAFSR_BLK 0x0000000080000000UL /* Schizo/Tomatillo */
#define SCHIZO_PCIAFSR_CFG 0x0000000040000000UL /* Schizo/Tomatillo */
#define SCHIZO_PCIAFSR_MEM 0x0000000020000000UL /* Schizo/Tomatillo */
#define SCHIZO_PCIAFSR_IO 0x0000000010000000UL /* Schizo/Tomatillo */
#define SCHIZO_PCI_CTRL (0x2000UL)
#define SCHIZO_PCICTRL_BUS_UNUS (1UL << 63UL) /* Safari */
#define SCHIZO_PCICTRL_DTO_INT (1UL << 61UL) /* Tomatillo */
#define SCHIZO_PCICTRL_ARB_PRIO (0x1ff << 52UL) /* Tomatillo */
#define SCHIZO_PCICTRL_ESLCK (1UL << 51UL) /* Safari */
#define SCHIZO_PCICTRL_ERRSLOT (7UL << 48UL) /* Safari */
#define SCHIZO_PCICTRL_TTO_ERR (1UL << 38UL) /* Safari/Tomatillo */
#define SCHIZO_PCICTRL_RTRY_ERR (1UL << 37UL) /* Safari/Tomatillo */
#define SCHIZO_PCICTRL_DTO_ERR (1UL << 36UL) /* Safari/Tomatillo */
#define SCHIZO_PCICTRL_SBH_ERR (1UL << 35UL) /* Safari */
#define SCHIZO_PCICTRL_SERR (1UL << 34UL) /* Safari/Tomatillo */
#define SCHIZO_PCICTRL_PCISPD (1UL << 33UL) /* Safari */
#define SCHIZO_PCICTRL_MRM_PREF (1UL << 30UL) /* Tomatillo */
#define SCHIZO_PCICTRL_RDO_PREF (1UL << 29UL) /* Tomatillo */
#define SCHIZO_PCICTRL_RDL_PREF (1UL << 28UL) /* Tomatillo */
#define SCHIZO_PCICTRL_PTO (3UL << 24UL) /* Safari/Tomatillo */
#define SCHIZO_PCICTRL_PTO_SHIFT 24UL
#define SCHIZO_PCICTRL_TRWSW (7UL << 21UL) /* Tomatillo */
#define SCHIZO_PCICTRL_F_TGT_A (1UL << 20UL) /* Tomatillo */
#define SCHIZO_PCICTRL_S_DTO_INT (1UL << 19UL) /* Safari */
#define SCHIZO_PCICTRL_F_TGT_RT (1UL << 19UL) /* Tomatillo */
#define SCHIZO_PCICTRL_SBH_INT (1UL << 18UL) /* Safari */
#define SCHIZO_PCICTRL_T_DTO_INT (1UL << 18UL) /* Tomatillo */
#define SCHIZO_PCICTRL_EEN (1UL << 17UL) /* Safari/Tomatillo */
#define SCHIZO_PCICTRL_PARK (1UL << 16UL) /* Safari/Tomatillo */
#define SCHIZO_PCICTRL_PCIRST (1UL << 8UL) /* Safari */
#define SCHIZO_PCICTRL_ARB_S (0x3fUL << 0UL) /* Safari */
#define SCHIZO_PCICTRL_ARB_T (0xffUL << 0UL) /* Tomatillo */
static irqreturn_t schizo_pcierr_intr_other(struct pci_pbm_info *pbm)
{
unsigned long csr_reg, csr, csr_error_bits;
irqreturn_t ret = IRQ_NONE;
u32 stat;
csr_reg = pbm->pbm_regs + SCHIZO_PCI_CTRL;
csr = upa_readq(csr_reg);
csr_error_bits =
csr & (SCHIZO_PCICTRL_BUS_UNUS |
SCHIZO_PCICTRL_TTO_ERR |
SCHIZO_PCICTRL_RTRY_ERR |
SCHIZO_PCICTRL_DTO_ERR |
SCHIZO_PCICTRL_SBH_ERR |
SCHIZO_PCICTRL_SERR);
if (csr_error_bits) {
/* Clear the errors. */
upa_writeq(csr, csr_reg);
/* Log 'em. */
if (csr_error_bits & SCHIZO_PCICTRL_BUS_UNUS)
printk("%s: Bus unusable error asserted.\n",
pbm->name);
if (csr_error_bits & SCHIZO_PCICTRL_TTO_ERR)
printk("%s: PCI TRDY# timeout error asserted.\n",
pbm->name);
if (csr_error_bits & SCHIZO_PCICTRL_RTRY_ERR)
printk("%s: PCI excessive retry error asserted.\n",
pbm->name);
if (csr_error_bits & SCHIZO_PCICTRL_DTO_ERR)
printk("%s: PCI discard timeout error asserted.\n",
pbm->name);
if (csr_error_bits & SCHIZO_PCICTRL_SBH_ERR)
printk("%s: PCI streaming byte hole error asserted.\n",
pbm->name);
if (csr_error_bits & SCHIZO_PCICTRL_SERR)
printk("%s: PCI SERR signal asserted.\n",
pbm->name);
ret = IRQ_HANDLED;
}
pbm->pci_ops->read(pbm->pci_bus, 0, PCI_STATUS, 2, &stat);
if (stat & (PCI_STATUS_PARITY |
PCI_STATUS_SIG_TARGET_ABORT |
PCI_STATUS_REC_TARGET_ABORT |
PCI_STATUS_REC_MASTER_ABORT |
PCI_STATUS_SIG_SYSTEM_ERROR)) {
printk("%s: PCI bus error, PCI_STATUS[%04x]\n",
pbm->name, stat);
pbm->pci_ops->write(pbm->pci_bus, 0, PCI_STATUS, 2, 0xffff);
ret = IRQ_HANDLED;
}
return ret;
}
static irqreturn_t schizo_pcierr_intr(int irq, void *dev_id)
{
struct pci_pbm_info *pbm = dev_id;
unsigned long afsr_reg, afar_reg, base;
unsigned long afsr, afar, error_bits;
int reported;
base = pbm->pbm_regs;
afsr_reg = base + SCHIZO_PCI_AFSR;
afar_reg = base + SCHIZO_PCI_AFAR;
/* Latch error status. */
afar = upa_readq(afar_reg);
afsr = upa_readq(afsr_reg);
/* Clear primary/secondary error status bits. */
error_bits = afsr &
(SCHIZO_PCIAFSR_PMA | SCHIZO_PCIAFSR_PTA |
SCHIZO_PCIAFSR_PRTRY | SCHIZO_PCIAFSR_PPERR |
SCHIZO_PCIAFSR_PTTO | SCHIZO_PCIAFSR_PUNUS |
SCHIZO_PCIAFSR_SMA | SCHIZO_PCIAFSR_STA |
SCHIZO_PCIAFSR_SRTRY | SCHIZO_PCIAFSR_SPERR |
SCHIZO_PCIAFSR_STTO | SCHIZO_PCIAFSR_SUNUS);
if (!error_bits)
return schizo_pcierr_intr_other(pbm);
upa_writeq(error_bits, afsr_reg);
/* Log the error. */
printk("%s: PCI Error, primary error type[%s]\n",
pbm->name,
(((error_bits & SCHIZO_PCIAFSR_PMA) ?
"Master Abort" :
((error_bits & SCHIZO_PCIAFSR_PTA) ?
"Target Abort" :
((error_bits & SCHIZO_PCIAFSR_PRTRY) ?
"Excessive Retries" :
((error_bits & SCHIZO_PCIAFSR_PPERR) ?
"Parity Error" :
((error_bits & SCHIZO_PCIAFSR_PTTO) ?
"Timeout" :
((error_bits & SCHIZO_PCIAFSR_PUNUS) ?
"Bus Unusable" : "???"))))))));
printk("%s: bytemask[%04lx] was_block(%d) space(%s)\n",
pbm->name,
(afsr & SCHIZO_PCIAFSR_BMSK) >> 32UL,
(afsr & SCHIZO_PCIAFSR_BLK) ? 1 : 0,
((afsr & SCHIZO_PCIAFSR_CFG) ?
"Config" :
((afsr & SCHIZO_PCIAFSR_MEM) ?
"Memory" :
((afsr & SCHIZO_PCIAFSR_IO) ?
"I/O" : "???"))));
printk("%s: PCI AFAR [%016lx]\n",
pbm->name, afar);
printk("%s: PCI Secondary errors [",
pbm->name);
reported = 0;
if (afsr & SCHIZO_PCIAFSR_SMA) {
reported++;
printk("(Master Abort)");
}
if (afsr & SCHIZO_PCIAFSR_STA) {
reported++;
printk("(Target Abort)");
}
if (afsr & SCHIZO_PCIAFSR_SRTRY) {
reported++;
printk("(Excessive Retries)");
}
if (afsr & SCHIZO_PCIAFSR_SPERR) {
reported++;
printk("(Parity Error)");
}
if (afsr & SCHIZO_PCIAFSR_STTO) {
reported++;
printk("(Timeout)");
}
if (afsr & SCHIZO_PCIAFSR_SUNUS) {
reported++;
printk("(Bus Unusable)");
}
if (!reported)
printk("(none)");
printk("]\n");
/* For the error types shown, scan PBM's PCI bus for devices
* which have logged that error type.
*/
/* If we see a Target Abort, this could be the result of an
* IOMMU translation error of some sort. It is extremely
* useful to log this information as usually it indicates
* a bug in the IOMMU support code or a PCI device driver.
*/
if (error_bits & (SCHIZO_PCIAFSR_PTA | SCHIZO_PCIAFSR_STA)) {
schizo_check_iommu_error(pbm, PCI_ERR);
pci_scan_for_target_abort(pbm, pbm->pci_bus);
}
if (error_bits & (SCHIZO_PCIAFSR_PMA | SCHIZO_PCIAFSR_SMA))
pci_scan_for_master_abort(pbm, pbm->pci_bus);
/* For excessive retries, PSYCHO/PBM will abort the device
* and there is no way to specifically check for excessive
* retries in the config space status registers. So what
* we hope is that we'll catch it via the master/target
* abort events.
*/
if (error_bits & (SCHIZO_PCIAFSR_PPERR | SCHIZO_PCIAFSR_SPERR))
pci_scan_for_parity_error(pbm, pbm->pci_bus);
return IRQ_HANDLED;
}
#define SCHIZO_SAFARI_ERRLOG 0x10018UL
#define SAFARI_ERRLOG_ERROUT 0x8000000000000000UL
#define BUS_ERROR_BADCMD 0x4000000000000000UL /* Schizo/Tomatillo */
#define BUS_ERROR_SSMDIS 0x2000000000000000UL /* Safari */
#define BUS_ERROR_BADMA 0x1000000000000000UL /* Safari */
#define BUS_ERROR_BADMB 0x0800000000000000UL /* Safari */
#define BUS_ERROR_BADMC 0x0400000000000000UL /* Safari */
#define BUS_ERROR_SNOOP_GR 0x0000000000200000UL /* Tomatillo */
#define BUS_ERROR_SNOOP_PCI 0x0000000000100000UL /* Tomatillo */
#define BUS_ERROR_SNOOP_RD 0x0000000000080000UL /* Tomatillo */
#define BUS_ERROR_SNOOP_RDS 0x0000000000020000UL /* Tomatillo */
#define BUS_ERROR_SNOOP_RDSA 0x0000000000010000UL /* Tomatillo */
#define BUS_ERROR_SNOOP_OWN 0x0000000000008000UL /* Tomatillo */
#define BUS_ERROR_SNOOP_RDO 0x0000000000004000UL /* Tomatillo */
#define BUS_ERROR_CPU1PS 0x0000000000002000UL /* Safari */
#define BUS_ERROR_WDATA_PERR 0x0000000000002000UL /* Tomatillo */
#define BUS_ERROR_CPU1PB 0x0000000000001000UL /* Safari */
#define BUS_ERROR_CTRL_PERR 0x0000000000001000UL /* Tomatillo */
#define BUS_ERROR_CPU0PS 0x0000000000000800UL /* Safari */
#define BUS_ERROR_SNOOP_ERR 0x0000000000000800UL /* Tomatillo */
#define BUS_ERROR_CPU0PB 0x0000000000000400UL /* Safari */
#define BUS_ERROR_JBUS_ILL_B 0x0000000000000400UL /* Tomatillo */
#define BUS_ERROR_CIQTO 0x0000000000000200UL /* Safari */
#define BUS_ERROR_LPQTO 0x0000000000000100UL /* Safari */
#define BUS_ERROR_JBUS_ILL_C 0x0000000000000100UL /* Tomatillo */
#define BUS_ERROR_SFPQTO 0x0000000000000080UL /* Safari */
#define BUS_ERROR_UFPQTO 0x0000000000000040UL /* Safari */
#define BUS_ERROR_RD_PERR 0x0000000000000040UL /* Tomatillo */
#define BUS_ERROR_APERR 0x0000000000000020UL /* Safari/Tomatillo */
#define BUS_ERROR_UNMAP 0x0000000000000010UL /* Safari/Tomatillo */
#define BUS_ERROR_BUSERR 0x0000000000000004UL /* Safari/Tomatillo */
#define BUS_ERROR_TIMEOUT 0x0000000000000002UL /* Safari/Tomatillo */
#define BUS_ERROR_ILL 0x0000000000000001UL /* Safari */
/* We only expect UNMAP errors here. The rest of the Safari errors
* are marked fatal and thus cause a system reset.
*/
static irqreturn_t schizo_safarierr_intr(int irq, void *dev_id)
{
struct pci_pbm_info *pbm = dev_id;
u64 errlog;
errlog = upa_readq(pbm->controller_regs + SCHIZO_SAFARI_ERRLOG);
upa_writeq(errlog & ~(SAFARI_ERRLOG_ERROUT),
pbm->controller_regs + SCHIZO_SAFARI_ERRLOG);
if (!(errlog & BUS_ERROR_UNMAP)) {
printk("%s: Unexpected Safari/JBUS error interrupt, errlog[%016llx]\n",
pbm->name, errlog);
return IRQ_HANDLED;
}
printk("%s: Safari/JBUS interrupt, UNMAPPED error, interrogating IOMMUs.\n",
pbm->name);
schizo_check_iommu_error(pbm, SAFARI_ERR);
return IRQ_HANDLED;
}
/* Nearly identical to PSYCHO equivalents... */
#define SCHIZO_ECC_CTRL 0x10020UL
#define SCHIZO_ECCCTRL_EE 0x8000000000000000UL /* Enable ECC Checking */
#define SCHIZO_ECCCTRL_UE 0x4000000000000000UL /* Enable UE Interrupts */
#define SCHIZO_ECCCTRL_CE 0x2000000000000000UL /* Enable CE INterrupts */
#define SCHIZO_SAFARI_ERRCTRL 0x10008UL
#define SCHIZO_SAFERRCTRL_EN 0x8000000000000000UL
#define SCHIZO_SAFARI_IRQCTRL 0x10010UL
#define SCHIZO_SAFIRQCTRL_EN 0x8000000000000000UL
static int pbm_routes_this_ino(struct pci_pbm_info *pbm, u32 ino)
{
ino &= IMAP_INO;
if (pbm->ino_bitmap & (1UL << ino))
return 1;
return 0;
}
/* How the Tomatillo IRQs are routed around is pure guesswork here.
*
* All the Tomatillo devices I see in prtconf dumps seem to have only
* a single PCI bus unit attached to it. It would seem they are separate
* devices because their PortID (ie. JBUS ID) values are all different
* and thus the registers are mapped to totally different locations.
*
* However, two Tomatillo's look "similar" in that the only difference
* in their PortID is the lowest bit.
*
* So if we were to ignore this lower bit, it certainly looks like two
* PCI bus units of the same Tomatillo. I still have not really
* figured this out...
*/
static void tomatillo_register_error_handlers(struct pci_pbm_info *pbm)
{
struct platform_device *op = of_find_device_by_node(pbm->op->dev.of_node);
u64 tmp, err_mask, err_no_mask;
int err;
/* Tomatillo IRQ property layout is:
* 0: PCIERR
* 1: UE ERR
* 2: CE ERR
* 3: SERR
* 4: POWER FAIL?
*/
if (pbm_routes_this_ino(pbm, SCHIZO_UE_INO)) {
err = request_irq(op->archdata.irqs[1], schizo_ue_intr, 0,
"TOMATILLO_UE", pbm);
if (err)
printk(KERN_WARNING "%s: Could not register UE, "
"err=%d\n", pbm->name, err);
}
if (pbm_routes_this_ino(pbm, SCHIZO_CE_INO)) {
err = request_irq(op->archdata.irqs[2], schizo_ce_intr, 0,
"TOMATILLO_CE", pbm);
if (err)
printk(KERN_WARNING "%s: Could not register CE, "
"err=%d\n", pbm->name, err);
}
err = 0;
if (pbm_routes_this_ino(pbm, SCHIZO_PCIERR_A_INO)) {
err = request_irq(op->archdata.irqs[0], schizo_pcierr_intr, 0,
"TOMATILLO_PCIERR", pbm);
} else if (pbm_routes_this_ino(pbm, SCHIZO_PCIERR_B_INO)) {
err = request_irq(op->archdata.irqs[0], schizo_pcierr_intr, 0,
"TOMATILLO_PCIERR", pbm);
}
if (err)
printk(KERN_WARNING "%s: Could not register PCIERR, "
"err=%d\n", pbm->name, err);
if (pbm_routes_this_ino(pbm, SCHIZO_SERR_INO)) {
err = request_irq(op->archdata.irqs[3], schizo_safarierr_intr, 0,
"TOMATILLO_SERR", pbm);
if (err)
printk(KERN_WARNING "%s: Could not register SERR, "
"err=%d\n", pbm->name, err);
}
/* Enable UE and CE interrupts for controller. */
upa_writeq((SCHIZO_ECCCTRL_EE |
SCHIZO_ECCCTRL_UE |
SCHIZO_ECCCTRL_CE), pbm->controller_regs + SCHIZO_ECC_CTRL);
/* Enable PCI Error interrupts and clear error
* bits.
*/
err_mask = (SCHIZO_PCICTRL_BUS_UNUS |
SCHIZO_PCICTRL_TTO_ERR |
SCHIZO_PCICTRL_RTRY_ERR |
SCHIZO_PCICTRL_SERR |
SCHIZO_PCICTRL_EEN);
err_no_mask = SCHIZO_PCICTRL_DTO_ERR;
tmp = upa_readq(pbm->pbm_regs + SCHIZO_PCI_CTRL);
tmp |= err_mask;
tmp &= ~err_no_mask;
upa_writeq(tmp, pbm->pbm_regs + SCHIZO_PCI_CTRL);
err_mask = (SCHIZO_PCIAFSR_PMA | SCHIZO_PCIAFSR_PTA |
SCHIZO_PCIAFSR_PRTRY | SCHIZO_PCIAFSR_PPERR |
SCHIZO_PCIAFSR_PTTO |
SCHIZO_PCIAFSR_SMA | SCHIZO_PCIAFSR_STA |
SCHIZO_PCIAFSR_SRTRY | SCHIZO_PCIAFSR_SPERR |
SCHIZO_PCIAFSR_STTO);
upa_writeq(err_mask, pbm->pbm_regs + SCHIZO_PCI_AFSR);
err_mask = (BUS_ERROR_BADCMD | BUS_ERROR_SNOOP_GR |
BUS_ERROR_SNOOP_PCI | BUS_ERROR_SNOOP_RD |
BUS_ERROR_SNOOP_RDS | BUS_ERROR_SNOOP_RDSA |
BUS_ERROR_SNOOP_OWN | BUS_ERROR_SNOOP_RDO |
BUS_ERROR_WDATA_PERR | BUS_ERROR_CTRL_PERR |
BUS_ERROR_SNOOP_ERR | BUS_ERROR_JBUS_ILL_B |
BUS_ERROR_JBUS_ILL_C | BUS_ERROR_RD_PERR |
BUS_ERROR_APERR | BUS_ERROR_UNMAP |
BUS_ERROR_BUSERR | BUS_ERROR_TIMEOUT);
upa_writeq((SCHIZO_SAFERRCTRL_EN | err_mask),
pbm->controller_regs + SCHIZO_SAFARI_ERRCTRL);
upa_writeq((SCHIZO_SAFIRQCTRL_EN | (BUS_ERROR_UNMAP)),
pbm->controller_regs + SCHIZO_SAFARI_IRQCTRL);
}
static void schizo_register_error_handlers(struct pci_pbm_info *pbm)
{
struct platform_device *op = of_find_device_by_node(pbm->op->dev.of_node);
u64 tmp, err_mask, err_no_mask;
int err;
/* Schizo IRQ property layout is:
* 0: PCIERR
* 1: UE ERR
* 2: CE ERR
* 3: SERR
* 4: POWER FAIL?
*/
if (pbm_routes_this_ino(pbm, SCHIZO_UE_INO)) {
err = request_irq(op->archdata.irqs[1], schizo_ue_intr, 0,
"SCHIZO_UE", pbm);
if (err)
printk(KERN_WARNING "%s: Could not register UE, "
"err=%d\n", pbm->name, err);
}
if (pbm_routes_this_ino(pbm, SCHIZO_CE_INO)) {
err = request_irq(op->archdata.irqs[2], schizo_ce_intr, 0,
"SCHIZO_CE", pbm);
if (err)
printk(KERN_WARNING "%s: Could not register CE, "
"err=%d\n", pbm->name, err);
}
err = 0;
if (pbm_routes_this_ino(pbm, SCHIZO_PCIERR_A_INO)) {
err = request_irq(op->archdata.irqs[0], schizo_pcierr_intr, 0,
"SCHIZO_PCIERR", pbm);
} else if (pbm_routes_this_ino(pbm, SCHIZO_PCIERR_B_INO)) {
err = request_irq(op->archdata.irqs[0], schizo_pcierr_intr, 0,
"SCHIZO_PCIERR", pbm);
}
if (err)
printk(KERN_WARNING "%s: Could not register PCIERR, "
"err=%d\n", pbm->name, err);
if (pbm_routes_this_ino(pbm, SCHIZO_SERR_INO)) {
err = request_irq(op->archdata.irqs[3], schizo_safarierr_intr, 0,
"SCHIZO_SERR", pbm);
if (err)
printk(KERN_WARNING "%s: Could not register SERR, "
"err=%d\n", pbm->name, err);
}
/* Enable UE and CE interrupts for controller. */
upa_writeq((SCHIZO_ECCCTRL_EE |
SCHIZO_ECCCTRL_UE |
SCHIZO_ECCCTRL_CE), pbm->controller_regs + SCHIZO_ECC_CTRL);
err_mask = (SCHIZO_PCICTRL_BUS_UNUS |
SCHIZO_PCICTRL_ESLCK |
SCHIZO_PCICTRL_TTO_ERR |
SCHIZO_PCICTRL_RTRY_ERR |
SCHIZO_PCICTRL_SBH_ERR |
SCHIZO_PCICTRL_SERR |
SCHIZO_PCICTRL_EEN);
err_no_mask = (SCHIZO_PCICTRL_DTO_ERR |
SCHIZO_PCICTRL_SBH_INT);
/* Enable PCI Error interrupts and clear error
* bits for each PBM.
*/
tmp = upa_readq(pbm->pbm_regs + SCHIZO_PCI_CTRL);
tmp |= err_mask;
tmp &= ~err_no_mask;
upa_writeq(tmp, pbm->pbm_regs + SCHIZO_PCI_CTRL);
upa_writeq((SCHIZO_PCIAFSR_PMA | SCHIZO_PCIAFSR_PTA |
SCHIZO_PCIAFSR_PRTRY | SCHIZO_PCIAFSR_PPERR |
SCHIZO_PCIAFSR_PTTO | SCHIZO_PCIAFSR_PUNUS |
SCHIZO_PCIAFSR_SMA | SCHIZO_PCIAFSR_STA |
SCHIZO_PCIAFSR_SRTRY | SCHIZO_PCIAFSR_SPERR |
SCHIZO_PCIAFSR_STTO | SCHIZO_PCIAFSR_SUNUS),
pbm->pbm_regs + SCHIZO_PCI_AFSR);
/* Make all Safari error conditions fatal except unmapped
* errors which we make generate interrupts.
*/
err_mask = (BUS_ERROR_BADCMD | BUS_ERROR_SSMDIS |
BUS_ERROR_BADMA | BUS_ERROR_BADMB |
BUS_ERROR_BADMC |
BUS_ERROR_CPU1PS | BUS_ERROR_CPU1PB |
BUS_ERROR_CPU0PS | BUS_ERROR_CPU0PB |
BUS_ERROR_CIQTO |
BUS_ERROR_LPQTO | BUS_ERROR_SFPQTO |
BUS_ERROR_UFPQTO | BUS_ERROR_APERR |
BUS_ERROR_BUSERR | BUS_ERROR_TIMEOUT |
BUS_ERROR_ILL);
#if 1
/* XXX Something wrong with some Excalibur systems
* XXX Sun is shipping. The behavior on a 2-cpu
* XXX machine is that both CPU1 parity error bits
* XXX are set and are immediately set again when
* XXX their error status bits are cleared. Just
* XXX ignore them for now. -DaveM
*/
err_mask &= ~(BUS_ERROR_CPU1PS | BUS_ERROR_CPU1PB |
BUS_ERROR_CPU0PS | BUS_ERROR_CPU0PB);
#endif
upa_writeq((SCHIZO_SAFERRCTRL_EN | err_mask),
pbm->controller_regs + SCHIZO_SAFARI_ERRCTRL);
}
static void pbm_config_busmastering(struct pci_pbm_info *pbm)
{
u8 *addr;
/* Set cache-line size to 64 bytes, this is actually
* a nop but I do it for completeness.
*/
addr = schizo_pci_config_mkaddr(pbm, pbm->pci_first_busno,
0, PCI_CACHE_LINE_SIZE);
pci_config_write8(addr, 64 / sizeof(u32));
/* Set PBM latency timer to 64 PCI clocks. */
addr = schizo_pci_config_mkaddr(pbm, pbm->pci_first_busno,
0, PCI_LATENCY_TIMER);
pci_config_write8(addr, 64);
}
static void schizo_scan_bus(struct pci_pbm_info *pbm, struct device *parent)
{
pbm_config_busmastering(pbm);
pbm->is_66mhz_capable =
(of_find_property(pbm->op->dev.of_node, "66mhz-capable", NULL)
!= NULL);
pbm->pci_bus = pci_scan_one_pbm(pbm, parent);
if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO)
tomatillo_register_error_handlers(pbm);
else
schizo_register_error_handlers(pbm);
}
#define SCHIZO_STRBUF_CONTROL (0x02800UL)
#define SCHIZO_STRBUF_FLUSH (0x02808UL)
#define SCHIZO_STRBUF_FSYNC (0x02810UL)
#define SCHIZO_STRBUF_CTXFLUSH (0x02818UL)
#define SCHIZO_STRBUF_CTXMATCH (0x10000UL)
static void schizo_pbm_strbuf_init(struct pci_pbm_info *pbm)
{
unsigned long base = pbm->pbm_regs;
u64 control;
if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO) {
/* TOMATILLO lacks streaming cache. */
return;
}
/* SCHIZO has context flushing. */
pbm->stc.strbuf_control = base + SCHIZO_STRBUF_CONTROL;
pbm->stc.strbuf_pflush = base + SCHIZO_STRBUF_FLUSH;
pbm->stc.strbuf_fsync = base + SCHIZO_STRBUF_FSYNC;
pbm->stc.strbuf_ctxflush = base + SCHIZO_STRBUF_CTXFLUSH;
pbm->stc.strbuf_ctxmatch_base = base + SCHIZO_STRBUF_CTXMATCH;
pbm->stc.strbuf_flushflag = (volatile unsigned long *)
((((unsigned long)&pbm->stc.__flushflag_buf[0])
+ 63UL)
& ~63UL);
pbm->stc.strbuf_flushflag_pa = (unsigned long)
__pa(pbm->stc.strbuf_flushflag);
/* Turn off LRU locking and diag mode, enable the
* streaming buffer and leave the rerun-disable
* setting however OBP set it.
*/
control = upa_readq(pbm->stc.strbuf_control);
control &= ~(SCHIZO_STRBUF_CTRL_LPTR |
SCHIZO_STRBUF_CTRL_LENAB |
SCHIZO_STRBUF_CTRL_DENAB);
control |= SCHIZO_STRBUF_CTRL_ENAB;
upa_writeq(control, pbm->stc.strbuf_control);
pbm->stc.strbuf_enabled = 1;
}
#define SCHIZO_IOMMU_CONTROL (0x00200UL)
#define SCHIZO_IOMMU_TSBBASE (0x00208UL)
#define SCHIZO_IOMMU_FLUSH (0x00210UL)
#define SCHIZO_IOMMU_CTXFLUSH (0x00218UL)
static int schizo_pbm_iommu_init(struct pci_pbm_info *pbm)
{
static const u32 vdma_default[] = { 0xc0000000, 0x40000000 };
unsigned long i, tagbase, database;
struct iommu *iommu = pbm->iommu;
int tsbsize, err;
const u32 *vdma;
u32 dma_mask;
u64 control;
vdma = of_get_property(pbm->op->dev.of_node, "virtual-dma", NULL);
if (!vdma)
vdma = vdma_default;
dma_mask = vdma[0];
switch (vdma[1]) {
case 0x20000000:
dma_mask |= 0x1fffffff;
tsbsize = 64;
break;
case 0x40000000:
dma_mask |= 0x3fffffff;
tsbsize = 128;
break;
case 0x80000000:
dma_mask |= 0x7fffffff;
tsbsize = 128;
break;
default:
printk(KERN_ERR PFX "Strange virtual-dma size.\n");
return -EINVAL;
}
/* Register addresses, SCHIZO has iommu ctx flushing. */
iommu->iommu_control = pbm->pbm_regs + SCHIZO_IOMMU_CONTROL;
iommu->iommu_tsbbase = pbm->pbm_regs + SCHIZO_IOMMU_TSBBASE;
iommu->iommu_flush = pbm->pbm_regs + SCHIZO_IOMMU_FLUSH;
iommu->iommu_tags = iommu->iommu_flush + (0xa580UL - 0x0210UL);
iommu->iommu_ctxflush = pbm->pbm_regs + SCHIZO_IOMMU_CTXFLUSH;
/* We use the main control/status register of SCHIZO as the write
* completion register.
*/
iommu->write_complete_reg = pbm->controller_regs + 0x10000UL;
/*
* Invalidate TLB Entries.
*/
control = upa_readq(iommu->iommu_control);
control |= SCHIZO_IOMMU_CTRL_DENAB;
upa_writeq(control, iommu->iommu_control);
tagbase = SCHIZO_IOMMU_TAG, database = SCHIZO_IOMMU_DATA;
for (i = 0; i < 16; i++) {
upa_writeq(0, pbm->pbm_regs + tagbase + (i * 8UL));
upa_writeq(0, pbm->pbm_regs + database + (i * 8UL));
}
/* Leave diag mode enabled for full-flushing done
* in pci_iommu.c
*/
err = iommu_table_init(iommu, tsbsize * 8 * 1024, vdma[0], dma_mask,
pbm->numa_node);
if (err) {
printk(KERN_ERR PFX "iommu_table_init() fails with %d\n", err);
return err;
}
upa_writeq(__pa(iommu->page_table), iommu->iommu_tsbbase);
control = upa_readq(iommu->iommu_control);
control &= ~(SCHIZO_IOMMU_CTRL_TSBSZ | SCHIZO_IOMMU_CTRL_TBWSZ);
switch (tsbsize) {
case 64:
control |= SCHIZO_IOMMU_TSBSZ_64K;
break;
case 128:
control |= SCHIZO_IOMMU_TSBSZ_128K;
break;
}
control |= SCHIZO_IOMMU_CTRL_ENAB;
upa_writeq(control, iommu->iommu_control);
return 0;
}
#define SCHIZO_PCI_IRQ_RETRY (0x1a00UL)
#define SCHIZO_IRQ_RETRY_INF 0xffUL
#define SCHIZO_PCI_DIAG (0x2020UL)
#define SCHIZO_PCIDIAG_D_BADECC (1UL << 10UL) /* Disable BAD ECC errors (Schizo) */
#define SCHIZO_PCIDIAG_D_BYPASS (1UL << 9UL) /* Disable MMU bypass mode (Schizo/Tomatillo) */
#define SCHIZO_PCIDIAG_D_TTO (1UL << 8UL) /* Disable TTO errors (Schizo/Tomatillo) */
#define SCHIZO_PCIDIAG_D_RTRYARB (1UL << 7UL) /* Disable retry arbitration (Schizo) */
#define SCHIZO_PCIDIAG_D_RETRY (1UL << 6UL) /* Disable retry limit (Schizo/Tomatillo) */
#define SCHIZO_PCIDIAG_D_INTSYNC (1UL << 5UL) /* Disable interrupt/DMA synch (Schizo/Tomatillo) */
#define SCHIZO_PCIDIAG_I_DMA_PARITY (1UL << 3UL) /* Invert DMA parity (Schizo/Tomatillo) */
#define SCHIZO_PCIDIAG_I_PIOD_PARITY (1UL << 2UL) /* Invert PIO data parity (Schizo/Tomatillo) */
#define SCHIZO_PCIDIAG_I_PIOA_PARITY (1UL << 1UL) /* Invert PIO address parity (Schizo/Tomatillo) */
#define TOMATILLO_PCI_IOC_CSR (0x2248UL)
#define TOMATILLO_IOC_PART_WPENAB 0x0000000000080000UL
#define TOMATILLO_IOC_RDMULT_PENAB 0x0000000000040000UL
#define TOMATILLO_IOC_RDONE_PENAB 0x0000000000020000UL
#define TOMATILLO_IOC_RDLINE_PENAB 0x0000000000010000UL
#define TOMATILLO_IOC_RDMULT_PLEN 0x000000000000c000UL
#define TOMATILLO_IOC_RDMULT_PLEN_SHIFT 14UL
#define TOMATILLO_IOC_RDONE_PLEN 0x0000000000003000UL
#define TOMATILLO_IOC_RDONE_PLEN_SHIFT 12UL
#define TOMATILLO_IOC_RDLINE_PLEN 0x0000000000000c00UL
#define TOMATILLO_IOC_RDLINE_PLEN_SHIFT 10UL
#define TOMATILLO_IOC_PREF_OFF 0x00000000000003f8UL
#define TOMATILLO_IOC_PREF_OFF_SHIFT 3UL
#define TOMATILLO_IOC_RDMULT_CPENAB 0x0000000000000004UL
#define TOMATILLO_IOC_RDONE_CPENAB 0x0000000000000002UL
#define TOMATILLO_IOC_RDLINE_CPENAB 0x0000000000000001UL
#define TOMATILLO_PCI_IOC_TDIAG (0x2250UL)
#define TOMATILLO_PCI_IOC_DDIAG (0x2290UL)
static void schizo_pbm_hw_init(struct pci_pbm_info *pbm)
{
u64 tmp;
upa_writeq(5, pbm->pbm_regs + SCHIZO_PCI_IRQ_RETRY);
tmp = upa_readq(pbm->pbm_regs + SCHIZO_PCI_CTRL);
/* Enable arbiter for all PCI slots. */
tmp |= 0xff;
if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO &&
pbm->chip_version >= 0x2)
tmp |= 0x3UL << SCHIZO_PCICTRL_PTO_SHIFT;
if (!of_property_read_bool(pbm->op->dev.of_node, "no-bus-parking"))
tmp |= SCHIZO_PCICTRL_PARK;
else
tmp &= ~SCHIZO_PCICTRL_PARK;
if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO &&
pbm->chip_version <= 0x1)
tmp |= SCHIZO_PCICTRL_DTO_INT;
else
tmp &= ~SCHIZO_PCICTRL_DTO_INT;
if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO)
tmp |= (SCHIZO_PCICTRL_MRM_PREF |
SCHIZO_PCICTRL_RDO_PREF |
SCHIZO_PCICTRL_RDL_PREF);
upa_writeq(tmp, pbm->pbm_regs + SCHIZO_PCI_CTRL);
tmp = upa_readq(pbm->pbm_regs + SCHIZO_PCI_DIAG);
tmp &= ~(SCHIZO_PCIDIAG_D_RTRYARB |
SCHIZO_PCIDIAG_D_RETRY |
SCHIZO_PCIDIAG_D_INTSYNC);
upa_writeq(tmp, pbm->pbm_regs + SCHIZO_PCI_DIAG);
if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO) {
/* Clear prefetch lengths to workaround a bug in
* Jalapeno...
*/
tmp = (TOMATILLO_IOC_PART_WPENAB |
(1 << TOMATILLO_IOC_PREF_OFF_SHIFT) |
TOMATILLO_IOC_RDMULT_CPENAB |
TOMATILLO_IOC_RDONE_CPENAB |
TOMATILLO_IOC_RDLINE_CPENAB);
upa_writeq(tmp, pbm->pbm_regs + TOMATILLO_PCI_IOC_CSR);
}
}
static int schizo_pbm_init(struct pci_pbm_info *pbm,
struct platform_device *op, u32 portid,
int chip_type)
{
const struct linux_prom64_registers *regs;
struct device_node *dp = op->dev.of_node;
const char *chipset_name;
int err;
switch (chip_type) {
case PBM_CHIP_TYPE_TOMATILLO:
chipset_name = "TOMATILLO";
break;
case PBM_CHIP_TYPE_SCHIZO_PLUS:
chipset_name = "SCHIZO+";
break;
case PBM_CHIP_TYPE_SCHIZO:
default:
chipset_name = "SCHIZO";
break;
}
/* For SCHIZO, three OBP regs:
* 1) PBM controller regs
* 2) Schizo front-end controller regs (same for both PBMs)
* 3) PBM PCI config space
*
* For TOMATILLO, four OBP regs:
* 1) PBM controller regs
* 2) Tomatillo front-end controller regs
* 3) PBM PCI config space
* 4) Ichip regs
*/
regs = of_get_property(dp, "reg", NULL);
pbm->next = pci_pbm_root;
pci_pbm_root = pbm;
pbm->numa_node = NUMA_NO_NODE;
pbm->pci_ops = &sun4u_pci_ops;
pbm->config_space_reg_bits = 8;
pbm->index = pci_num_pbms++;
pbm->portid = portid;
pbm->op = op;
pbm->chip_type = chip_type;
pbm->chip_version = of_getintprop_default(dp, "version#", 0);
pbm->chip_revision = of_getintprop_default(dp, "module-version#", 0);
pbm->pbm_regs = regs[0].phys_addr;
pbm->controller_regs = regs[1].phys_addr - 0x10000UL;
if (chip_type == PBM_CHIP_TYPE_TOMATILLO)
pbm->sync_reg = regs[3].phys_addr + 0x1a18UL;
pbm->name = dp->full_name;
printk("%s: %s PCI Bus Module ver[%x:%x]\n",
pbm->name, chipset_name,
pbm->chip_version, pbm->chip_revision);
schizo_pbm_hw_init(pbm);
pci_determine_mem_io_space(pbm);
pci_get_pbm_props(pbm);
err = schizo_pbm_iommu_init(pbm);
if (err)
return err;
schizo_pbm_strbuf_init(pbm);
schizo_scan_bus(pbm, &op->dev);
return 0;
}
static inline int portid_compare(u32 x, u32 y, int chip_type)
{
if (chip_type == PBM_CHIP_TYPE_TOMATILLO) {
if (x == (y ^ 1))
return 1;
return 0;
}
return (x == y);
}
static struct pci_pbm_info *schizo_find_sibling(u32 portid, int chip_type)
{
struct pci_pbm_info *pbm;
for (pbm = pci_pbm_root; pbm; pbm = pbm->next) {
if (portid_compare(pbm->portid, portid, chip_type))
return pbm;
}
return NULL;
}
static int __schizo_init(struct platform_device *op, unsigned long chip_type)
{
struct device_node *dp = op->dev.of_node;
struct pci_pbm_info *pbm;
struct iommu *iommu;
u32 portid;
int err;
portid = of_getintprop_default(dp, "portid", 0xff);
err = -ENOMEM;
pbm = kzalloc(sizeof(*pbm), GFP_KERNEL);
if (!pbm) {
printk(KERN_ERR PFX "Cannot allocate pci_pbm_info.\n");
goto out_err;
}
pbm->sibling = schizo_find_sibling(portid, chip_type);
iommu = kzalloc(sizeof(struct iommu), GFP_KERNEL);
if (!iommu) {
printk(KERN_ERR PFX "Cannot allocate PBM A iommu.\n");
goto out_free_pbm;
}
pbm->iommu = iommu;
if (schizo_pbm_init(pbm, op, portid, chip_type))
goto out_free_iommu;
if (pbm->sibling)
pbm->sibling->sibling = pbm;
dev_set_drvdata(&op->dev, pbm);
return 0;
out_free_iommu:
kfree(pbm->iommu);
out_free_pbm:
kfree(pbm);
out_err:
return err;
}
static const struct of_device_id schizo_match[];
static int schizo_probe(struct platform_device *op)
{
const struct of_device_id *match;
match = of_match_device(schizo_match, &op->dev);
if (!match)
return -EINVAL;
return __schizo_init(op, (unsigned long)match->data);
}
/* The ordering of this table is very important. Some Tomatillo
* nodes announce that they are compatible with both pci108e,a801
* and pci108e,8001. So list the chips in reverse chronological
* order.
*/
static const struct of_device_id schizo_match[] = {
{
.name = "pci",
.compatible = "pci108e,a801",
.data = (void *) PBM_CHIP_TYPE_TOMATILLO,
},
{
.name = "pci",
.compatible = "pci108e,8002",
.data = (void *) PBM_CHIP_TYPE_SCHIZO_PLUS,
},
{
.name = "pci",
.compatible = "pci108e,8001",
.data = (void *) PBM_CHIP_TYPE_SCHIZO,
},
{},
};
static struct platform_driver schizo_driver = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = schizo_match,
},
.probe = schizo_probe,
};
static int __init schizo_init(void)
{
return platform_driver_register(&schizo_driver);
}
subsys_initcall(schizo_init);
| linux-master | arch/sparc/kernel/pci_schizo.c |
// SPDX-License-Identifier: GPL-2.0
/*
* leon_pci_grpci2.c: GRPCI2 Host PCI driver
*
* Copyright (C) 2011 Aeroflex Gaisler AB, Daniel Hellstrom
*
*/
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <asm/io.h>
#include <asm/leon.h>
#include <asm/vaddrs.h>
#include <asm/sections.h>
#include <asm/leon_pci.h>
#include "irq.h"
struct grpci2_barcfg {
unsigned long pciadr; /* PCI Space Address */
unsigned long ahbadr; /* PCI Base address mapped to this AHB addr */
};
/* Device Node Configuration options:
* - barcfgs : Custom Configuration of Host's 6 target BARs
* - irq_mask : Limit which PCI interrupts are enabled
* - do_reset : Force PCI Reset on startup
*
* barcfgs
* =======
*
* Optional custom Target BAR configuration (see struct grpci2_barcfg). All
* addresses are physical. Array always contains 6 elements (len=2*4*6 bytes)
*
* -1 means not configured (let host driver do default setup).
*
* [i*2+0] = PCI Address of BAR[i] on target interface
* [i*2+1] = Accessing PCI address of BAR[i] result in this AMBA address
*
*
* irq_mask
* ========
*
* Limit which PCI interrupts are enabled. 0=Disable, 1=Enable. By default
* all are enabled. Use this when PCI interrupt pins are floating on PCB.
* int, len=4.
* bit0 = PCI INTA#
* bit1 = PCI INTB#
* bit2 = PCI INTC#
* bit3 = PCI INTD#
*
*
* reset
* =====
*
* Force PCI reset on startup. int, len=4
*/
/* Enable Debugging Configuration Space Access */
#undef GRPCI2_DEBUG_CFGACCESS
/*
* GRPCI2 APB Register MAP
*/
struct grpci2_regs {
unsigned int ctrl; /* 0x00 Control */
unsigned int sts_cap; /* 0x04 Status / Capabilities */
int res1; /* 0x08 */
unsigned int io_map; /* 0x0C I/O Map address */
unsigned int dma_ctrl; /* 0x10 DMA */
unsigned int dma_bdbase; /* 0x14 DMA */
int res2[2]; /* 0x18 */
unsigned int bars[6]; /* 0x20 read-only PCI BARs */
int res3[2]; /* 0x38 */
unsigned int ahbmst_map[16]; /* 0x40 AHB->PCI Map per AHB Master */
/* PCI Trace Buffer Registers (OPTIONAL) */
unsigned int t_ctrl; /* 0x80 */
unsigned int t_cnt; /* 0x84 */
unsigned int t_adpat; /* 0x88 */
unsigned int t_admask; /* 0x8C */
unsigned int t_sigpat; /* 0x90 */
unsigned int t_sigmask; /* 0x94 */
unsigned int t_adstate; /* 0x98 */
unsigned int t_sigstate; /* 0x9C */
};
#define REGLOAD(a) (be32_to_cpu(__raw_readl(&(a))))
#define REGSTORE(a, v) (__raw_writel(cpu_to_be32(v), &(a)))
#define CTRL_BUS_BIT 16
#define CTRL_RESET (1<<31)
#define CTRL_SI (1<<27)
#define CTRL_PE (1<<26)
#define CTRL_EI (1<<25)
#define CTRL_ER (1<<24)
#define CTRL_BUS (0xff<<CTRL_BUS_BIT)
#define CTRL_HOSTINT 0xf
#define STS_HOST_BIT 31
#define STS_MST_BIT 30
#define STS_TAR_BIT 29
#define STS_DMA_BIT 28
#define STS_DI_BIT 27
#define STS_HI_BIT 26
#define STS_IRQMODE_BIT 24
#define STS_TRACE_BIT 23
#define STS_CFGERRVALID_BIT 20
#define STS_CFGERR_BIT 19
#define STS_INTTYPE_BIT 12
#define STS_INTSTS_BIT 8
#define STS_FDEPTH_BIT 2
#define STS_FNUM_BIT 0
#define STS_HOST (1<<STS_HOST_BIT)
#define STS_MST (1<<STS_MST_BIT)
#define STS_TAR (1<<STS_TAR_BIT)
#define STS_DMA (1<<STS_DMA_BIT)
#define STS_DI (1<<STS_DI_BIT)
#define STS_HI (1<<STS_HI_BIT)
#define STS_IRQMODE (0x3<<STS_IRQMODE_BIT)
#define STS_TRACE (1<<STS_TRACE_BIT)
#define STS_CFGERRVALID (1<<STS_CFGERRVALID_BIT)
#define STS_CFGERR (1<<STS_CFGERR_BIT)
#define STS_INTTYPE (0x3f<<STS_INTTYPE_BIT)
#define STS_INTSTS (0xf<<STS_INTSTS_BIT)
#define STS_FDEPTH (0x7<<STS_FDEPTH_BIT)
#define STS_FNUM (0x3<<STS_FNUM_BIT)
#define STS_ISYSERR (1<<17)
#define STS_IDMA (1<<16)
#define STS_IDMAERR (1<<15)
#define STS_IMSTABRT (1<<14)
#define STS_ITGTABRT (1<<13)
#define STS_IPARERR (1<<12)
#define STS_ERR_IRQ (STS_ISYSERR | STS_IMSTABRT | STS_ITGTABRT | STS_IPARERR)
struct grpci2_bd_chan {
unsigned int ctrl; /* 0x00 DMA Control */
unsigned int nchan; /* 0x04 Next DMA Channel Address */
unsigned int nbd; /* 0x08 Next Data Descriptor in chan */
unsigned int res; /* 0x0C Reserved */
};
#define BD_CHAN_EN 0x80000000
#define BD_CHAN_TYPE 0x00300000
#define BD_CHAN_BDCNT 0x0000ffff
#define BD_CHAN_EN_BIT 31
#define BD_CHAN_TYPE_BIT 20
#define BD_CHAN_BDCNT_BIT 0
struct grpci2_bd_data {
unsigned int ctrl; /* 0x00 DMA Data Control */
unsigned int pci_adr; /* 0x04 PCI Start Address */
unsigned int ahb_adr; /* 0x08 AHB Start address */
unsigned int next; /* 0x0C Next Data Descriptor in chan */
};
#define BD_DATA_EN 0x80000000
#define BD_DATA_IE 0x40000000
#define BD_DATA_DR 0x20000000
#define BD_DATA_TYPE 0x00300000
#define BD_DATA_ER 0x00080000
#define BD_DATA_LEN 0x0000ffff
#define BD_DATA_EN_BIT 31
#define BD_DATA_IE_BIT 30
#define BD_DATA_DR_BIT 29
#define BD_DATA_TYPE_BIT 20
#define BD_DATA_ER_BIT 19
#define BD_DATA_LEN_BIT 0
/* GRPCI2 Capability */
struct grpci2_cap_first {
unsigned int ctrl;
unsigned int pci2ahb_map[6];
unsigned int ext2ahb_map;
unsigned int io_map;
unsigned int pcibar_size[6];
};
#define CAP9_CTRL_OFS 0
#define CAP9_BAR_OFS 0x4
#define CAP9_IOMAP_OFS 0x20
#define CAP9_BARSIZE_OFS 0x24
#define TGT 256
struct grpci2_priv {
struct leon_pci_info info; /* must be on top of this structure */
struct grpci2_regs __iomem *regs;
char irq;
char irq_mode; /* IRQ Mode from CAPSTS REG */
char bt_enabled;
char do_reset;
char irq_mask;
u32 pciid; /* PCI ID of Host */
unsigned char irq_map[4];
/* Virtual IRQ numbers */
unsigned int virq_err;
unsigned int virq_dma;
/* AHB PCI Windows */
unsigned long pci_area; /* MEMORY */
unsigned long pci_area_end;
unsigned long pci_io; /* I/O */
unsigned long pci_conf; /* CONFIGURATION */
unsigned long pci_conf_end;
unsigned long pci_io_va;
struct grpci2_barcfg tgtbars[6];
};
static DEFINE_SPINLOCK(grpci2_dev_lock);
static struct grpci2_priv *grpci2priv;
static int grpci2_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
struct grpci2_priv *priv = dev->bus->sysdata;
int irq_group;
/* Use default IRQ decoding on PCI BUS0 according slot numbering */
irq_group = slot & 0x3;
pin = ((pin - 1) + irq_group) & 0x3;
return priv->irq_map[pin];
}
static int grpci2_cfg_r32(struct grpci2_priv *priv, unsigned int bus,
unsigned int devfn, int where, u32 *val)
{
unsigned int *pci_conf;
unsigned long flags;
u32 tmp;
if (where & 0x3)
return -EINVAL;
if (bus == 0) {
devfn += (0x8 * 6); /* start at AD16=Device0 */
} else if (bus == TGT) {
bus = 0;
devfn = 0; /* special case: bridge controller itself */
}
/* Select bus */
spin_lock_irqsave(&grpci2_dev_lock, flags);
REGSTORE(priv->regs->ctrl, (REGLOAD(priv->regs->ctrl) & ~(0xff << 16)) |
(bus << 16));
spin_unlock_irqrestore(&grpci2_dev_lock, flags);
/* clear old status */
REGSTORE(priv->regs->sts_cap, (STS_CFGERR | STS_CFGERRVALID));
pci_conf = (unsigned int *) (priv->pci_conf |
(devfn << 8) | (where & 0xfc));
tmp = LEON3_BYPASS_LOAD_PA(pci_conf);
/* Wait until GRPCI2 signals that CFG access is done, it should be
* done instantaneously unless a DMA operation is ongoing...
*/
while ((REGLOAD(priv->regs->sts_cap) & STS_CFGERRVALID) == 0)
;
if (REGLOAD(priv->regs->sts_cap) & STS_CFGERR) {
*val = 0xffffffff;
} else {
/* Bus always little endian (unaffected by byte-swapping) */
*val = swab32(tmp);
}
return 0;
}
static int grpci2_cfg_r16(struct grpci2_priv *priv, unsigned int bus,
unsigned int devfn, int where, u32 *val)
{
u32 v;
int ret;
if (where & 0x1)
return -EINVAL;
ret = grpci2_cfg_r32(priv, bus, devfn, where & ~0x3, &v);
*val = 0xffff & (v >> (8 * (where & 0x3)));
return ret;
}
static int grpci2_cfg_r8(struct grpci2_priv *priv, unsigned int bus,
unsigned int devfn, int where, u32 *val)
{
u32 v;
int ret;
ret = grpci2_cfg_r32(priv, bus, devfn, where & ~0x3, &v);
*val = 0xff & (v >> (8 * (where & 3)));
return ret;
}
static int grpci2_cfg_w32(struct grpci2_priv *priv, unsigned int bus,
unsigned int devfn, int where, u32 val)
{
unsigned int *pci_conf;
unsigned long flags;
if (where & 0x3)
return -EINVAL;
if (bus == 0) {
devfn += (0x8 * 6); /* start at AD16=Device0 */
} else if (bus == TGT) {
bus = 0;
devfn = 0; /* special case: bridge controller itself */
}
/* Select bus */
spin_lock_irqsave(&grpci2_dev_lock, flags);
REGSTORE(priv->regs->ctrl, (REGLOAD(priv->regs->ctrl) & ~(0xff << 16)) |
(bus << 16));
spin_unlock_irqrestore(&grpci2_dev_lock, flags);
/* clear old status */
REGSTORE(priv->regs->sts_cap, (STS_CFGERR | STS_CFGERRVALID));
pci_conf = (unsigned int *) (priv->pci_conf |
(devfn << 8) | (where & 0xfc));
LEON3_BYPASS_STORE_PA(pci_conf, swab32(val));
/* Wait until GRPCI2 signals that CFG access is done, it should be
* done instantaneously unless a DMA operation is ongoing...
*/
while ((REGLOAD(priv->regs->sts_cap) & STS_CFGERRVALID) == 0)
;
return 0;
}
static int grpci2_cfg_w16(struct grpci2_priv *priv, unsigned int bus,
unsigned int devfn, int where, u32 val)
{
int ret;
u32 v;
if (where & 0x1)
return -EINVAL;
ret = grpci2_cfg_r32(priv, bus, devfn, where&~3, &v);
if (ret)
return ret;
v = (v & ~(0xffff << (8 * (where & 0x3)))) |
((0xffff & val) << (8 * (where & 0x3)));
return grpci2_cfg_w32(priv, bus, devfn, where & ~0x3, v);
}
static int grpci2_cfg_w8(struct grpci2_priv *priv, unsigned int bus,
unsigned int devfn, int where, u32 val)
{
int ret;
u32 v;
ret = grpci2_cfg_r32(priv, bus, devfn, where & ~0x3, &v);
if (ret != 0)
return ret;
v = (v & ~(0xff << (8 * (where & 0x3)))) |
((0xff & val) << (8 * (where & 0x3)));
return grpci2_cfg_w32(priv, bus, devfn, where & ~0x3, v);
}
/* Read from Configuration Space. When entering here the PCI layer has taken
* the pci_lock spinlock and IRQ is off.
*/
static int grpci2_read_config(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 *val)
{
struct grpci2_priv *priv = grpci2priv;
unsigned int busno = bus->number;
int ret;
if (PCI_SLOT(devfn) > 15 || busno > 255) {
*val = ~0;
return 0;
}
switch (size) {
case 1:
ret = grpci2_cfg_r8(priv, busno, devfn, where, val);
break;
case 2:
ret = grpci2_cfg_r16(priv, busno, devfn, where, val);
break;
case 4:
ret = grpci2_cfg_r32(priv, busno, devfn, where, val);
break;
default:
ret = -EINVAL;
break;
}
#ifdef GRPCI2_DEBUG_CFGACCESS
printk(KERN_INFO "grpci2_read_config: [%02x:%02x:%x] ofs=%d val=%x "
"size=%d\n", busno, PCI_SLOT(devfn), PCI_FUNC(devfn), where,
*val, size);
#endif
return ret;
}
/* Write to Configuration Space. When entering here the PCI layer has taken
* the pci_lock spinlock and IRQ is off.
*/
static int grpci2_write_config(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 val)
{
struct grpci2_priv *priv = grpci2priv;
unsigned int busno = bus->number;
if (PCI_SLOT(devfn) > 15 || busno > 255)
return 0;
#ifdef GRPCI2_DEBUG_CFGACCESS
printk(KERN_INFO "grpci2_write_config: [%02x:%02x:%x] ofs=%d size=%d "
"val=%x\n", busno, PCI_SLOT(devfn), PCI_FUNC(devfn),
where, size, val);
#endif
switch (size) {
default:
return -EINVAL;
case 1:
return grpci2_cfg_w8(priv, busno, devfn, where, val);
case 2:
return grpci2_cfg_w16(priv, busno, devfn, where, val);
case 4:
return grpci2_cfg_w32(priv, busno, devfn, where, val);
}
}
static struct pci_ops grpci2_ops = {
.read = grpci2_read_config,
.write = grpci2_write_config,
};
/* GENIRQ IRQ chip implementation for GRPCI2 irqmode=0..2. In configuration
* 3 where all PCI Interrupts has a separate IRQ on the system IRQ controller
* this is not needed and the standard IRQ controller can be used.
*/
static void grpci2_mask_irq(struct irq_data *data)
{
unsigned long flags;
unsigned int irqidx;
struct grpci2_priv *priv = grpci2priv;
irqidx = (unsigned int)data->chip_data - 1;
if (irqidx > 3) /* only mask PCI interrupts here */
return;
spin_lock_irqsave(&grpci2_dev_lock, flags);
REGSTORE(priv->regs->ctrl, REGLOAD(priv->regs->ctrl) & ~(1 << irqidx));
spin_unlock_irqrestore(&grpci2_dev_lock, flags);
}
static void grpci2_unmask_irq(struct irq_data *data)
{
unsigned long flags;
unsigned int irqidx;
struct grpci2_priv *priv = grpci2priv;
irqidx = (unsigned int)data->chip_data - 1;
if (irqidx > 3) /* only unmask PCI interrupts here */
return;
spin_lock_irqsave(&grpci2_dev_lock, flags);
REGSTORE(priv->regs->ctrl, REGLOAD(priv->regs->ctrl) | (1 << irqidx));
spin_unlock_irqrestore(&grpci2_dev_lock, flags);
}
static unsigned int grpci2_startup_irq(struct irq_data *data)
{
grpci2_unmask_irq(data);
return 0;
}
static void grpci2_shutdown_irq(struct irq_data *data)
{
grpci2_mask_irq(data);
}
static struct irq_chip grpci2_irq = {
.name = "grpci2",
.irq_startup = grpci2_startup_irq,
.irq_shutdown = grpci2_shutdown_irq,
.irq_mask = grpci2_mask_irq,
.irq_unmask = grpci2_unmask_irq,
};
/* Handle one or multiple IRQs from the PCI core */
static void grpci2_pci_flow_irq(struct irq_desc *desc)
{
struct grpci2_priv *priv = grpci2priv;
int i, ack = 0;
unsigned int ctrl, sts_cap, pci_ints;
ctrl = REGLOAD(priv->regs->ctrl);
sts_cap = REGLOAD(priv->regs->sts_cap);
/* Error Interrupt? */
if (sts_cap & STS_ERR_IRQ) {
generic_handle_irq(priv->virq_err);
ack = 1;
}
/* PCI Interrupt? */
pci_ints = ((~sts_cap) >> STS_INTSTS_BIT) & ctrl & CTRL_HOSTINT;
if (pci_ints) {
/* Call respective PCI Interrupt handler */
for (i = 0; i < 4; i++) {
if (pci_ints & (1 << i))
generic_handle_irq(priv->irq_map[i]);
}
ack = 1;
}
/*
* Decode DMA Interrupt only when shared with Err and PCI INTX#, when
* the DMA is a unique IRQ the DMA interrupts doesn't end up here, they
* goes directly to DMA ISR.
*/
if ((priv->irq_mode == 0) && (sts_cap & (STS_IDMA | STS_IDMAERR))) {
generic_handle_irq(priv->virq_dma);
ack = 1;
}
/*
* Call "first level" IRQ chip end-of-irq handler. It will ACK LEON IRQ
* Controller, this must be done after IRQ sources have been handled to
* avoid double IRQ generation
*/
if (ack)
desc->irq_data.chip->irq_eoi(&desc->irq_data);
}
/* Create a virtual IRQ */
static unsigned int grpci2_build_device_irq(unsigned int irq)
{
unsigned int virq = 0, pil;
pil = 1 << 8;
virq = irq_alloc(irq, pil);
if (virq == 0)
goto out;
irq_set_chip_and_handler_name(virq, &grpci2_irq, handle_simple_irq,
"pcilvl");
irq_set_chip_data(virq, (void *)irq);
out:
return virq;
}
static void grpci2_hw_init(struct grpci2_priv *priv)
{
u32 ahbadr, pciadr, bar_sz, capptr, io_map, data;
struct grpci2_regs __iomem *regs = priv->regs;
int i;
struct grpci2_barcfg *barcfg = priv->tgtbars;
/* Reset any earlier setup */
if (priv->do_reset) {
printk(KERN_INFO "GRPCI2: Resetting PCI bus\n");
REGSTORE(regs->ctrl, CTRL_RESET);
ssleep(1); /* Wait for boards to settle */
}
REGSTORE(regs->ctrl, 0);
REGSTORE(regs->sts_cap, ~0); /* Clear Status */
REGSTORE(regs->dma_ctrl, 0);
REGSTORE(regs->dma_bdbase, 0);
/* Translate I/O accesses to 0, I/O Space always @ PCI low 64Kbytes */
REGSTORE(regs->io_map, REGLOAD(regs->io_map) & 0x0000ffff);
/* set 1:1 mapping between AHB -> PCI memory space, for all Masters
* Each AHB master has it's own mapping registers. Max 16 AHB masters.
*/
for (i = 0; i < 16; i++)
REGSTORE(regs->ahbmst_map[i], priv->pci_area);
/* Get the GRPCI2 Host PCI ID */
grpci2_cfg_r32(priv, TGT, 0, PCI_VENDOR_ID, &priv->pciid);
/* Get address to first (always defined) capability structure */
grpci2_cfg_r8(priv, TGT, 0, PCI_CAPABILITY_LIST, &capptr);
/* Enable/Disable Byte twisting */
grpci2_cfg_r32(priv, TGT, 0, capptr+CAP9_IOMAP_OFS, &io_map);
io_map = (io_map & ~0x1) | (priv->bt_enabled ? 1 : 0);
grpci2_cfg_w32(priv, TGT, 0, capptr+CAP9_IOMAP_OFS, io_map);
/* Setup the Host's PCI Target BARs for other peripherals to access,
* and do DMA to the host's memory. The target BARs can be sized and
* enabled individually.
*
* User may set custom target BARs, but default is:
* The first BARs is used to map kernel low (DMA is part of normal
* region on sparc which is SRMMU_MAXMEM big) main memory 1:1 to the
* PCI bus, the other BARs are disabled. We assume that the first BAR
* is always available.
*/
for (i = 0; i < 6; i++) {
if (barcfg[i].pciadr != ~0 && barcfg[i].ahbadr != ~0) {
/* Target BARs must have the proper alignment */
ahbadr = barcfg[i].ahbadr;
pciadr = barcfg[i].pciadr;
bar_sz = ((pciadr - 1) & ~pciadr) + 1;
} else {
if (i == 0) {
/* Map main memory */
bar_sz = 0xf0000008; /* 256MB prefetchable */
ahbadr = 0xf0000000 & (u32)__pa(PAGE_ALIGN(
(unsigned long) &_end));
pciadr = ahbadr;
} else {
bar_sz = 0;
ahbadr = 0;
pciadr = 0;
}
}
grpci2_cfg_w32(priv, TGT, 0, capptr+CAP9_BARSIZE_OFS+i*4,
bar_sz);
grpci2_cfg_w32(priv, TGT, 0, PCI_BASE_ADDRESS_0+i*4, pciadr);
grpci2_cfg_w32(priv, TGT, 0, capptr+CAP9_BAR_OFS+i*4, ahbadr);
printk(KERN_INFO " TGT BAR[%d]: 0x%08x (PCI)-> 0x%08x\n",
i, pciadr, ahbadr);
}
/* set as bus master and enable pci memory responses */
grpci2_cfg_r32(priv, TGT, 0, PCI_COMMAND, &data);
data |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
grpci2_cfg_w32(priv, TGT, 0, PCI_COMMAND, data);
/* Enable Error respone (CPU-TRAP) on illegal memory access. */
REGSTORE(regs->ctrl, CTRL_ER | CTRL_PE);
}
static irqreturn_t grpci2_jump_interrupt(int irq, void *arg)
{
printk(KERN_ERR "GRPCI2: Jump IRQ happened\n");
return IRQ_NONE;
}
/* Handle GRPCI2 Error Interrupt */
static irqreturn_t grpci2_err_interrupt(int irq, void *arg)
{
struct grpci2_priv *priv = arg;
struct grpci2_regs __iomem *regs = priv->regs;
unsigned int status;
status = REGLOAD(regs->sts_cap);
if ((status & STS_ERR_IRQ) == 0)
return IRQ_NONE;
if (status & STS_IPARERR)
printk(KERN_ERR "GRPCI2: Parity Error\n");
if (status & STS_ITGTABRT)
printk(KERN_ERR "GRPCI2: Target Abort\n");
if (status & STS_IMSTABRT)
printk(KERN_ERR "GRPCI2: Master Abort\n");
if (status & STS_ISYSERR)
printk(KERN_ERR "GRPCI2: System Error\n");
/* Clear handled INT TYPE IRQs */
REGSTORE(regs->sts_cap, status & STS_ERR_IRQ);
return IRQ_HANDLED;
}
static int grpci2_of_probe(struct platform_device *ofdev)
{
struct grpci2_regs __iomem *regs;
struct grpci2_priv *priv;
int err, i, len;
const int *tmp;
unsigned int capability;
if (grpci2priv) {
printk(KERN_ERR "GRPCI2: only one GRPCI2 core supported\n");
return -ENODEV;
}
if (ofdev->num_resources < 3) {
printk(KERN_ERR "GRPCI2: not enough APB/AHB resources\n");
return -EIO;
}
/* Find Device Address */
regs = of_ioremap(&ofdev->resource[0], 0,
resource_size(&ofdev->resource[0]),
"grlib-grpci2 regs");
if (regs == NULL) {
printk(KERN_ERR "GRPCI2: ioremap failed\n");
return -EIO;
}
/*
* Check that we're in Host Slot and that we can act as a Host Bridge
* and not only as target.
*/
capability = REGLOAD(regs->sts_cap);
if ((capability & STS_HOST) || !(capability & STS_MST)) {
printk(KERN_INFO "GRPCI2: not in host system slot\n");
err = -EIO;
goto err1;
}
priv = grpci2priv = kzalloc(sizeof(struct grpci2_priv), GFP_KERNEL);
if (grpci2priv == NULL) {
err = -ENOMEM;
goto err1;
}
priv->regs = regs;
priv->irq = ofdev->archdata.irqs[0]; /* BASE IRQ */
priv->irq_mode = (capability & STS_IRQMODE) >> STS_IRQMODE_BIT;
printk(KERN_INFO "GRPCI2: host found at %p, irq%d\n", regs, priv->irq);
/* Byte twisting should be made configurable from kernel command line */
priv->bt_enabled = 1;
/* Let user do custom Target BAR assignment */
tmp = of_get_property(ofdev->dev.of_node, "barcfg", &len);
if (tmp && (len == 2*4*6))
memcpy(priv->tgtbars, tmp, 2*4*6);
else
memset(priv->tgtbars, -1, 2*4*6);
/* Limit IRQ unmasking in irq_mode 2 and 3 */
tmp = of_get_property(ofdev->dev.of_node, "irq_mask", &len);
if (tmp && (len == 4))
priv->do_reset = *tmp;
else
priv->irq_mask = 0xf;
/* Optional PCI reset. Force PCI reset on startup */
tmp = of_get_property(ofdev->dev.of_node, "reset", &len);
if (tmp && (len == 4))
priv->do_reset = *tmp;
else
priv->do_reset = 0;
/* Find PCI Memory, I/O and Configuration Space Windows */
priv->pci_area = ofdev->resource[1].start;
priv->pci_area_end = ofdev->resource[1].end+1;
priv->pci_io = ofdev->resource[2].start;
priv->pci_conf = ofdev->resource[2].start + 0x10000;
priv->pci_conf_end = priv->pci_conf + 0x10000;
priv->pci_io_va = (unsigned long)ioremap(priv->pci_io, 0x10000);
if (!priv->pci_io_va) {
err = -EIO;
goto err2;
}
printk(KERN_INFO
"GRPCI2: MEMORY SPACE [0x%08lx - 0x%08lx]\n"
" I/O SPACE [0x%08lx - 0x%08lx]\n"
" CONFIG SPACE [0x%08lx - 0x%08lx]\n",
priv->pci_area, priv->pci_area_end-1,
priv->pci_io, priv->pci_conf-1,
priv->pci_conf, priv->pci_conf_end-1);
/*
* I/O Space resources in I/O Window mapped into Virtual Adr Space
* We never use low 4KB because some devices seem have problems using
* address 0.
*/
memset(&priv->info.io_space, 0, sizeof(struct resource));
priv->info.io_space.name = "GRPCI2 PCI I/O Space";
priv->info.io_space.start = priv->pci_io_va + 0x1000;
priv->info.io_space.end = priv->pci_io_va + 0x10000 - 1;
priv->info.io_space.flags = IORESOURCE_IO;
/*
* GRPCI2 has no prefetchable memory, map everything as
* non-prefetchable memory
*/
memset(&priv->info.mem_space, 0, sizeof(struct resource));
priv->info.mem_space.name = "GRPCI2 PCI MEM Space";
priv->info.mem_space.start = priv->pci_area;
priv->info.mem_space.end = priv->pci_area_end - 1;
priv->info.mem_space.flags = IORESOURCE_MEM;
if (request_resource(&iomem_resource, &priv->info.mem_space) < 0)
goto err3;
if (request_resource(&ioport_resource, &priv->info.io_space) < 0)
goto err4;
/* setup maximum supported PCI buses */
priv->info.busn.name = "GRPCI2 busn";
priv->info.busn.start = 0;
priv->info.busn.end = 255;
grpci2_hw_init(priv);
/*
* Get PCI Interrupt to System IRQ mapping and setup IRQ handling
* Error IRQ always on PCI INTA.
*/
if (priv->irq_mode < 2) {
/* All PCI interrupts are shared using the same system IRQ */
leon_update_virq_handling(priv->irq, grpci2_pci_flow_irq,
"pcilvl", 0);
priv->irq_map[0] = grpci2_build_device_irq(1);
priv->irq_map[1] = grpci2_build_device_irq(2);
priv->irq_map[2] = grpci2_build_device_irq(3);
priv->irq_map[3] = grpci2_build_device_irq(4);
priv->virq_err = grpci2_build_device_irq(5);
if (priv->irq_mode & 1)
priv->virq_dma = ofdev->archdata.irqs[1];
else
priv->virq_dma = grpci2_build_device_irq(6);
/* Enable IRQs on LEON IRQ controller */
err = request_irq(priv->irq, grpci2_jump_interrupt, 0,
"GRPCI2_JUMP", priv);
if (err)
printk(KERN_ERR "GRPCI2: ERR IRQ request failed\n");
} else {
/* All PCI interrupts have an unique IRQ interrupt */
for (i = 0; i < 4; i++) {
/* Make LEON IRQ layer handle level IRQ by acking */
leon_update_virq_handling(ofdev->archdata.irqs[i],
handle_fasteoi_irq, "pcilvl",
1);
priv->irq_map[i] = ofdev->archdata.irqs[i];
}
priv->virq_err = priv->irq_map[0];
if (priv->irq_mode & 1)
priv->virq_dma = ofdev->archdata.irqs[4];
else
priv->virq_dma = priv->irq_map[0];
/* Unmask all PCI interrupts, request_irq will not do that */
REGSTORE(regs->ctrl, REGLOAD(regs->ctrl)|(priv->irq_mask&0xf));
}
/* Setup IRQ handler for non-configuration space access errors */
err = request_irq(priv->virq_err, grpci2_err_interrupt, IRQF_SHARED,
"GRPCI2_ERR", priv);
if (err) {
printk(KERN_DEBUG "GRPCI2: ERR VIRQ request failed: %d\n", err);
goto err5;
}
/*
* Enable Error Interrupts. PCI interrupts are unmasked once request_irq
* is called by the PCI Device drivers
*/
REGSTORE(regs->ctrl, REGLOAD(regs->ctrl) | CTRL_EI | CTRL_SI);
/* Init common layer and scan buses */
priv->info.ops = &grpci2_ops;
priv->info.map_irq = grpci2_map_irq;
leon_pci_init(ofdev, &priv->info);
return 0;
err5:
release_resource(&priv->info.io_space);
err4:
release_resource(&priv->info.mem_space);
err3:
err = -ENOMEM;
iounmap((void __iomem *)priv->pci_io_va);
err2:
kfree(priv);
err1:
of_iounmap(&ofdev->resource[0], regs,
resource_size(&ofdev->resource[0]));
return err;
}
static const struct of_device_id grpci2_of_match[] __initconst = {
{
.name = "GAISLER_GRPCI2",
},
{
.name = "01_07c",
},
{},
};
static struct platform_driver grpci2_of_driver = {
.driver = {
.name = "grpci2",
.of_match_table = grpci2_of_match,
},
.probe = grpci2_of_probe,
};
static int __init grpci2_init(void)
{
return platform_driver_register(&grpci2_of_driver);
}
subsys_initcall(grpci2_init);
| linux-master | arch/sparc/kernel/leon_pci_grpci2.c |
// SPDX-License-Identifier: GPL-2.0
/* power.c: Power management driver.
*
* Copyright (C) 1999, 2007, 2008 David S. Miller ([email protected])
*/
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/reboot.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <asm/prom.h>
#include <asm/io.h>
static void __iomem *power_reg;
static irqreturn_t power_handler(int irq, void *dev_id)
{
orderly_poweroff(true);
/* FIXME: Check registers for status... */
return IRQ_HANDLED;
}
static int has_button_interrupt(unsigned int irq, struct device_node *dp)
{
if (irq == 0xffffffff)
return 0;
if (!of_property_read_bool(dp, "button"))
return 0;
return 1;
}
static int power_probe(struct platform_device *op)
{
struct resource *res = &op->resource[0];
unsigned int irq = op->archdata.irqs[0];
power_reg = of_ioremap(res, 0, 0x4, "power");
printk(KERN_INFO "%pOFn: Control reg at %llx\n",
op->dev.of_node, res->start);
if (has_button_interrupt(irq, op->dev.of_node)) {
if (request_irq(irq,
power_handler, 0, "power", NULL) < 0)
printk(KERN_ERR "power: Cannot setup IRQ handler.\n");
}
return 0;
}
static const struct of_device_id power_match[] = {
{
.name = "power",
},
{},
};
static struct platform_driver power_driver = {
.probe = power_probe,
.driver = {
.name = "power",
.of_match_table = power_match,
},
};
builtin_platform_driver(power_driver);
| linux-master | arch/sparc/kernel/power.c |
// SPDX-License-Identifier: GPL-2.0
/* central.c: Central FHC driver for Sunfire/Starfire/Wildfire.
*
* Copyright (C) 1997, 1999, 2008 David S. Miller ([email protected])
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <asm/fhc.h>
#include <asm/upa.h>
struct clock_board {
void __iomem *clock_freq_regs;
void __iomem *clock_regs;
void __iomem *clock_ver_reg;
int num_slots;
struct resource leds_resource;
struct platform_device leds_pdev;
};
struct fhc {
void __iomem *pregs;
bool central;
bool jtag_master;
int board_num;
struct resource leds_resource;
struct platform_device leds_pdev;
};
static int clock_board_calc_nslots(struct clock_board *p)
{
u8 reg = upa_readb(p->clock_regs + CLOCK_STAT1) & 0xc0;
switch (reg) {
case 0x40:
return 16;
case 0xc0:
return 8;
case 0x80:
reg = 0;
if (p->clock_ver_reg)
reg = upa_readb(p->clock_ver_reg);
if (reg) {
if (reg & 0x80)
return 4;
else
return 5;
}
fallthrough;
default:
return 4;
}
}
static int clock_board_probe(struct platform_device *op)
{
struct clock_board *p = kzalloc(sizeof(*p), GFP_KERNEL);
int err = -ENOMEM;
if (!p) {
printk(KERN_ERR "clock_board: Cannot allocate struct clock_board\n");
goto out;
}
p->clock_freq_regs = of_ioremap(&op->resource[0], 0,
resource_size(&op->resource[0]),
"clock_board_freq");
if (!p->clock_freq_regs) {
printk(KERN_ERR "clock_board: Cannot map clock_freq_regs\n");
goto out_free;
}
p->clock_regs = of_ioremap(&op->resource[1], 0,
resource_size(&op->resource[1]),
"clock_board_regs");
if (!p->clock_regs) {
printk(KERN_ERR "clock_board: Cannot map clock_regs\n");
goto out_unmap_clock_freq_regs;
}
if (op->resource[2].flags) {
p->clock_ver_reg = of_ioremap(&op->resource[2], 0,
resource_size(&op->resource[2]),
"clock_ver_reg");
if (!p->clock_ver_reg) {
printk(KERN_ERR "clock_board: Cannot map clock_ver_reg\n");
goto out_unmap_clock_regs;
}
}
p->num_slots = clock_board_calc_nslots(p);
p->leds_resource.start = (unsigned long)
(p->clock_regs + CLOCK_CTRL);
p->leds_resource.end = p->leds_resource.start;
p->leds_resource.name = "leds";
p->leds_pdev.name = "sunfire-clockboard-leds";
p->leds_pdev.id = -1;
p->leds_pdev.resource = &p->leds_resource;
p->leds_pdev.num_resources = 1;
p->leds_pdev.dev.parent = &op->dev;
err = platform_device_register(&p->leds_pdev);
if (err) {
printk(KERN_ERR "clock_board: Could not register LEDS "
"platform device\n");
goto out_unmap_clock_ver_reg;
}
printk(KERN_INFO "clock_board: Detected %d slot Enterprise system.\n",
p->num_slots);
err = 0;
out:
return err;
out_unmap_clock_ver_reg:
if (p->clock_ver_reg)
of_iounmap(&op->resource[2], p->clock_ver_reg,
resource_size(&op->resource[2]));
out_unmap_clock_regs:
of_iounmap(&op->resource[1], p->clock_regs,
resource_size(&op->resource[1]));
out_unmap_clock_freq_regs:
of_iounmap(&op->resource[0], p->clock_freq_regs,
resource_size(&op->resource[0]));
out_free:
kfree(p);
goto out;
}
static const struct of_device_id clock_board_match[] = {
{
.name = "clock-board",
},
{},
};
static struct platform_driver clock_board_driver = {
.probe = clock_board_probe,
.driver = {
.name = "clock_board",
.of_match_table = clock_board_match,
},
};
static int fhc_probe(struct platform_device *op)
{
struct fhc *p = kzalloc(sizeof(*p), GFP_KERNEL);
int err = -ENOMEM;
u32 reg;
if (!p) {
printk(KERN_ERR "fhc: Cannot allocate struct fhc\n");
goto out;
}
if (of_node_name_eq(op->dev.of_node->parent, "central"))
p->central = true;
p->pregs = of_ioremap(&op->resource[0], 0,
resource_size(&op->resource[0]),
"fhc_pregs");
if (!p->pregs) {
printk(KERN_ERR "fhc: Cannot map pregs\n");
goto out_free;
}
if (p->central) {
reg = upa_readl(p->pregs + FHC_PREGS_BSR);
p->board_num = ((reg >> 16) & 1) | ((reg >> 12) & 0x0e);
} else {
p->board_num = of_getintprop_default(op->dev.of_node, "board#", -1);
if (p->board_num == -1) {
printk(KERN_ERR "fhc: No board# property\n");
goto out_unmap_pregs;
}
if (upa_readl(p->pregs + FHC_PREGS_JCTRL) & FHC_JTAG_CTRL_MENAB)
p->jtag_master = true;
}
if (!p->central) {
p->leds_resource.start = (unsigned long)
(p->pregs + FHC_PREGS_CTRL);
p->leds_resource.end = p->leds_resource.start;
p->leds_resource.name = "leds";
p->leds_pdev.name = "sunfire-fhc-leds";
p->leds_pdev.id = p->board_num;
p->leds_pdev.resource = &p->leds_resource;
p->leds_pdev.num_resources = 1;
p->leds_pdev.dev.parent = &op->dev;
err = platform_device_register(&p->leds_pdev);
if (err) {
printk(KERN_ERR "fhc: Could not register LEDS "
"platform device\n");
goto out_unmap_pregs;
}
}
reg = upa_readl(p->pregs + FHC_PREGS_CTRL);
if (!p->central)
reg |= FHC_CONTROL_IXIST;
reg &= ~(FHC_CONTROL_AOFF |
FHC_CONTROL_BOFF |
FHC_CONTROL_SLINE);
upa_writel(reg, p->pregs + FHC_PREGS_CTRL);
upa_readl(p->pregs + FHC_PREGS_CTRL);
reg = upa_readl(p->pregs + FHC_PREGS_ID);
printk(KERN_INFO "fhc: Board #%d, Version[%x] PartID[%x] Manuf[%x] %s\n",
p->board_num,
(reg & FHC_ID_VERS) >> 28,
(reg & FHC_ID_PARTID) >> 12,
(reg & FHC_ID_MANUF) >> 1,
(p->jtag_master ?
"(JTAG Master)" :
(p->central ? "(Central)" : "")));
err = 0;
out:
return err;
out_unmap_pregs:
of_iounmap(&op->resource[0], p->pregs, resource_size(&op->resource[0]));
out_free:
kfree(p);
goto out;
}
static const struct of_device_id fhc_match[] = {
{
.name = "fhc",
},
{},
};
static struct platform_driver fhc_driver = {
.probe = fhc_probe,
.driver = {
.name = "fhc",
.of_match_table = fhc_match,
},
};
static int __init sunfire_init(void)
{
(void) platform_driver_register(&fhc_driver);
(void) platform_driver_register(&clock_board_driver);
return 0;
}
fs_initcall(sunfire_init);
| linux-master | arch/sparc/kernel/central.c |
// SPDX-License-Identifier: GPL-2.0
/*
* starfire.c: Starfire/E10000 support.
*
* Copyright (C) 1998 David S. Miller ([email protected])
* Copyright (C) 2000 Anton Blanchard ([email protected])
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <asm/page.h>
#include <asm/oplib.h>
#include <asm/smp.h>
#include <asm/upa.h>
#include <asm/starfire.h>
/*
* A few places around the kernel check this to see if
* they need to call us to do things in a Starfire specific
* way.
*/
int this_is_starfire = 0;
void check_if_starfire(void)
{
phandle ssnode = prom_finddevice("/ssp-serial");
if (ssnode != 0 && (s32)ssnode != -1)
this_is_starfire = 1;
}
/*
* Each Starfire board has 32 registers which perform translation
* and delivery of traditional interrupt packets into the extended
* Starfire hardware format. Essentially UPAID's now have 2 more
* bits than in all previous Sun5 systems.
*/
struct starfire_irqinfo {
unsigned long imap_slots[32];
unsigned long tregs[32];
struct starfire_irqinfo *next;
int upaid, hwmid;
};
static struct starfire_irqinfo *sflist = NULL;
/* Beam me up Scott(McNeil)y... */
void starfire_hookup(int upaid)
{
struct starfire_irqinfo *p;
unsigned long treg_base, hwmid, i;
p = kmalloc(sizeof(*p), GFP_KERNEL);
if (!p) {
prom_printf("starfire_hookup: No memory, this is insane.\n");
prom_halt();
}
treg_base = 0x100fc000000UL;
hwmid = ((upaid & 0x3c) << 1) |
((upaid & 0x40) >> 4) |
(upaid & 0x3);
p->hwmid = hwmid;
treg_base += (hwmid << 33UL);
treg_base += 0x200UL;
for (i = 0; i < 32; i++) {
p->imap_slots[i] = 0UL;
p->tregs[i] = treg_base + (i * 0x10UL);
/* Lets play it safe and not overwrite existing mappings */
if (upa_readl(p->tregs[i]) != 0)
p->imap_slots[i] = 0xdeadbeaf;
}
p->upaid = upaid;
p->next = sflist;
sflist = p;
}
unsigned int starfire_translate(unsigned long imap,
unsigned int upaid)
{
struct starfire_irqinfo *p;
unsigned int bus_hwmid;
unsigned int i;
bus_hwmid = (((unsigned long)imap) >> 33) & 0x7f;
for (p = sflist; p != NULL; p = p->next)
if (p->hwmid == bus_hwmid)
break;
if (p == NULL) {
prom_printf("XFIRE: Cannot find irqinfo for imap %016lx\n",
((unsigned long)imap));
prom_halt();
}
for (i = 0; i < 32; i++) {
if (p->imap_slots[i] == imap ||
p->imap_slots[i] == 0UL)
break;
}
if (i == 32) {
printk("starfire_translate: Are you kidding me?\n");
panic("Lucy in the sky....");
}
p->imap_slots[i] = imap;
/* map to real upaid */
upaid = (((upaid & 0x3c) << 1) |
((upaid & 0x40) >> 4) |
(upaid & 0x3));
upa_writel(upaid, p->tregs[i]);
return i;
}
| linux-master | arch/sparc/kernel/starfire.c |
// SPDX-License-Identifier: GPL-2.0-only
/* arch/sparc64/kernel/traps.c
*
* Copyright (C) 1995,1997,2008,2009,2012 David S. Miller ([email protected])
* Copyright (C) 1997,1999,2000 Jakub Jelinek ([email protected])
*/
/*
* I like traps on v9, :))))
*/
#include <linux/extable.h>
#include <linux/sched/mm.h>
#include <linux/sched/debug.h>
#include <linux/linkage.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/smp.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/kallsyms.h>
#include <linux/kdebug.h>
#include <linux/ftrace.h>
#include <linux/reboot.h>
#include <linux/gfp.h>
#include <linux/context_tracking.h>
#include <asm/smp.h>
#include <asm/delay.h>
#include <asm/ptrace.h>
#include <asm/oplib.h>
#include <asm/page.h>
#include <asm/unistd.h>
#include <linux/uaccess.h>
#include <asm/fpumacro.h>
#include <asm/lsu.h>
#include <asm/dcu.h>
#include <asm/estate.h>
#include <asm/chafsr.h>
#include <asm/sfafsr.h>
#include <asm/psrcompat.h>
#include <asm/processor.h>
#include <asm/timer.h>
#include <asm/head.h>
#include <asm/prom.h>
#include <asm/memctrl.h>
#include <asm/cacheflush.h>
#include <asm/setup.h>
#include "entry.h"
#include "kernel.h"
#include "kstack.h"
/* When an irrecoverable trap occurs at tl > 0, the trap entry
* code logs the trap state registers at every level in the trap
* stack. It is found at (pt_regs + sizeof(pt_regs)) and the layout
* is as follows:
*/
struct tl1_traplog {
struct {
unsigned long tstate;
unsigned long tpc;
unsigned long tnpc;
unsigned long tt;
} trapstack[4];
unsigned long tl;
};
static void dump_tl1_traplog(struct tl1_traplog *p)
{
int i, limit;
printk(KERN_EMERG "TRAPLOG: Error at trap level 0x%lx, "
"dumping track stack.\n", p->tl);
limit = (tlb_type == hypervisor) ? 2 : 4;
for (i = 0; i < limit; i++) {
printk(KERN_EMERG
"TRAPLOG: Trap level %d TSTATE[%016lx] TPC[%016lx] "
"TNPC[%016lx] TT[%lx]\n",
i + 1,
p->trapstack[i].tstate, p->trapstack[i].tpc,
p->trapstack[i].tnpc, p->trapstack[i].tt);
printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
}
}
void bad_trap(struct pt_regs *regs, long lvl)
{
char buffer[36];
if (notify_die(DIE_TRAP, "bad trap", regs,
0, lvl, SIGTRAP) == NOTIFY_STOP)
return;
if (lvl < 0x100) {
sprintf(buffer, "Bad hw trap %lx at tl0\n", lvl);
die_if_kernel(buffer, regs);
}
lvl -= 0x100;
if (regs->tstate & TSTATE_PRIV) {
sprintf(buffer, "Kernel bad sw trap %lx", lvl);
die_if_kernel(buffer, regs);
}
if (test_thread_flag(TIF_32BIT)) {
regs->tpc &= 0xffffffff;
regs->tnpc &= 0xffffffff;
}
force_sig_fault_trapno(SIGILL, ILL_ILLTRP,
(void __user *)regs->tpc, lvl);
}
void bad_trap_tl1(struct pt_regs *regs, long lvl)
{
char buffer[36];
if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
0, lvl, SIGTRAP) == NOTIFY_STOP)
return;
dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
sprintf (buffer, "Bad trap %lx at tl>0", lvl);
die_if_kernel (buffer, regs);
}
#ifdef CONFIG_DEBUG_BUGVERBOSE
void do_BUG(const char *file, int line)
{
bust_spinlocks(1);
printk("kernel BUG at %s:%d!\n", file, line);
}
EXPORT_SYMBOL(do_BUG);
#endif
static DEFINE_SPINLOCK(dimm_handler_lock);
static dimm_printer_t dimm_handler;
static int sprintf_dimm(int synd_code, unsigned long paddr, char *buf, int buflen)
{
unsigned long flags;
int ret = -ENODEV;
spin_lock_irqsave(&dimm_handler_lock, flags);
if (dimm_handler) {
ret = dimm_handler(synd_code, paddr, buf, buflen);
} else if (tlb_type == spitfire) {
if (prom_getunumber(synd_code, paddr, buf, buflen) == -1)
ret = -EINVAL;
else
ret = 0;
} else
ret = -ENODEV;
spin_unlock_irqrestore(&dimm_handler_lock, flags);
return ret;
}
int register_dimm_printer(dimm_printer_t func)
{
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&dimm_handler_lock, flags);
if (!dimm_handler)
dimm_handler = func;
else
ret = -EEXIST;
spin_unlock_irqrestore(&dimm_handler_lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(register_dimm_printer);
void unregister_dimm_printer(dimm_printer_t func)
{
unsigned long flags;
spin_lock_irqsave(&dimm_handler_lock, flags);
if (dimm_handler == func)
dimm_handler = NULL;
spin_unlock_irqrestore(&dimm_handler_lock, flags);
}
EXPORT_SYMBOL_GPL(unregister_dimm_printer);
void spitfire_insn_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
{
enum ctx_state prev_state = exception_enter();
if (notify_die(DIE_TRAP, "instruction access exception", regs,
0, 0x8, SIGTRAP) == NOTIFY_STOP)
goto out;
if (regs->tstate & TSTATE_PRIV) {
printk("spitfire_insn_access_exception: SFSR[%016lx] "
"SFAR[%016lx], going.\n", sfsr, sfar);
die_if_kernel("Iax", regs);
}
if (test_thread_flag(TIF_32BIT)) {
regs->tpc &= 0xffffffff;
regs->tnpc &= 0xffffffff;
}
force_sig_fault(SIGSEGV, SEGV_MAPERR, (void __user *)regs->tpc);
out:
exception_exit(prev_state);
}
void spitfire_insn_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
{
if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs,
0, 0x8, SIGTRAP) == NOTIFY_STOP)
return;
dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
spitfire_insn_access_exception(regs, sfsr, sfar);
}
void sun4v_insn_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
{
unsigned short type = (type_ctx >> 16);
unsigned short ctx = (type_ctx & 0xffff);
if (notify_die(DIE_TRAP, "instruction access exception", regs,
0, 0x8, SIGTRAP) == NOTIFY_STOP)
return;
if (regs->tstate & TSTATE_PRIV) {
printk("sun4v_insn_access_exception: ADDR[%016lx] "
"CTX[%04x] TYPE[%04x], going.\n",
addr, ctx, type);
die_if_kernel("Iax", regs);
}
if (test_thread_flag(TIF_32BIT)) {
regs->tpc &= 0xffffffff;
regs->tnpc &= 0xffffffff;
}
force_sig_fault(SIGSEGV, SEGV_MAPERR, (void __user *) addr);
}
void sun4v_insn_access_exception_tl1(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
{
if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs,
0, 0x8, SIGTRAP) == NOTIFY_STOP)
return;
dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
sun4v_insn_access_exception(regs, addr, type_ctx);
}
bool is_no_fault_exception(struct pt_regs *regs)
{
unsigned char asi;
u32 insn;
if (get_user(insn, (u32 __user *)regs->tpc) == -EFAULT)
return false;
/*
* Must do a little instruction decoding here in order to
* decide on a course of action. The bits of interest are:
* insn[31:30] = op, where 3 indicates the load/store group
* insn[24:19] = op3, which identifies individual opcodes
* insn[13] indicates an immediate offset
* op3[4]=1 identifies alternate space instructions
* op3[5:4]=3 identifies floating point instructions
* op3[2]=1 identifies stores
* See "Opcode Maps" in the appendix of any Sparc V9
* architecture spec for full details.
*/
if ((insn & 0xc0800000) == 0xc0800000) { /* op=3, op3[4]=1 */
if (insn & 0x2000) /* immediate offset */
asi = (regs->tstate >> 24); /* saved %asi */
else
asi = (insn >> 5); /* immediate asi */
if ((asi & 0xf6) == ASI_PNF) {
if (insn & 0x200000) /* op3[2], stores */
return false;
if (insn & 0x1000000) /* op3[5:4]=3 (fp) */
handle_ldf_stq(insn, regs);
else
handle_ld_nf(insn, regs);
return true;
}
}
return false;
}
void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
{
enum ctx_state prev_state = exception_enter();
if (notify_die(DIE_TRAP, "data access exception", regs,
0, 0x30, SIGTRAP) == NOTIFY_STOP)
goto out;
if (regs->tstate & TSTATE_PRIV) {
/* Test if this comes from uaccess places. */
const struct exception_table_entry *entry;
entry = search_exception_tables(regs->tpc);
if (entry) {
/* Ouch, somebody is trying VM hole tricks on us... */
#ifdef DEBUG_EXCEPTIONS
printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc);
printk("EX_TABLE: insn<%016lx> fixup<%016lx>\n",
regs->tpc, entry->fixup);
#endif
regs->tpc = entry->fixup;
regs->tnpc = regs->tpc + 4;
goto out;
}
/* Shit... */
printk("spitfire_data_access_exception: SFSR[%016lx] "
"SFAR[%016lx], going.\n", sfsr, sfar);
die_if_kernel("Dax", regs);
}
if (is_no_fault_exception(regs))
return;
force_sig_fault(SIGSEGV, SEGV_MAPERR, (void __user *)sfar);
out:
exception_exit(prev_state);
}
void spitfire_data_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
{
if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs,
0, 0x30, SIGTRAP) == NOTIFY_STOP)
return;
dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
spitfire_data_access_exception(regs, sfsr, sfar);
}
void sun4v_data_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
{
unsigned short type = (type_ctx >> 16);
unsigned short ctx = (type_ctx & 0xffff);
if (notify_die(DIE_TRAP, "data access exception", regs,
0, 0x8, SIGTRAP) == NOTIFY_STOP)
return;
if (regs->tstate & TSTATE_PRIV) {
/* Test if this comes from uaccess places. */
const struct exception_table_entry *entry;
entry = search_exception_tables(regs->tpc);
if (entry) {
/* Ouch, somebody is trying VM hole tricks on us... */
#ifdef DEBUG_EXCEPTIONS
printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc);
printk("EX_TABLE: insn<%016lx> fixup<%016lx>\n",
regs->tpc, entry->fixup);
#endif
regs->tpc = entry->fixup;
regs->tnpc = regs->tpc + 4;
return;
}
printk("sun4v_data_access_exception: ADDR[%016lx] "
"CTX[%04x] TYPE[%04x], going.\n",
addr, ctx, type);
die_if_kernel("Dax", regs);
}
if (test_thread_flag(TIF_32BIT)) {
regs->tpc &= 0xffffffff;
regs->tnpc &= 0xffffffff;
}
if (is_no_fault_exception(regs))
return;
/* MCD (Memory Corruption Detection) disabled trap (TT=0x19) in HV
* is vectored thorugh data access exception trap with fault type
* set to HV_FAULT_TYPE_MCD_DIS. Check for MCD disabled trap.
* Accessing an address with invalid ASI for the address, for
* example setting an ADI tag on an address with ASI_MCD_PRIMARY
* when TTE.mcd is not set for the VA, is also vectored into
* kerbel by HV as data access exception with fault type set to
* HV_FAULT_TYPE_INV_ASI.
*/
switch (type) {
case HV_FAULT_TYPE_INV_ASI:
force_sig_fault(SIGILL, ILL_ILLADR, (void __user *)addr);
break;
case HV_FAULT_TYPE_MCD_DIS:
force_sig_fault(SIGSEGV, SEGV_ACCADI, (void __user *)addr);
break;
default:
force_sig_fault(SIGSEGV, SEGV_MAPERR, (void __user *)addr);
break;
}
}
void sun4v_data_access_exception_tl1(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
{
if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs,
0, 0x8, SIGTRAP) == NOTIFY_STOP)
return;
dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
sun4v_data_access_exception(regs, addr, type_ctx);
}
#ifdef CONFIG_PCI
#include "pci_impl.h"
#endif
/* When access exceptions happen, we must do this. */
static void spitfire_clean_and_reenable_l1_caches(void)
{
unsigned long va;
if (tlb_type != spitfire)
BUG();
/* Clean 'em. */
for (va = 0; va < (PAGE_SIZE << 1); va += 32) {
spitfire_put_icache_tag(va, 0x0);
spitfire_put_dcache_tag(va, 0x0);
}
/* Re-enable in LSU. */
__asm__ __volatile__("flush %%g6\n\t"
"membar #Sync\n\t"
"stxa %0, [%%g0] %1\n\t"
"membar #Sync"
: /* no outputs */
: "r" (LSU_CONTROL_IC | LSU_CONTROL_DC |
LSU_CONTROL_IM | LSU_CONTROL_DM),
"i" (ASI_LSU_CONTROL)
: "memory");
}
static void spitfire_enable_estate_errors(void)
{
__asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
"membar #Sync"
: /* no outputs */
: "r" (ESTATE_ERR_ALL),
"i" (ASI_ESTATE_ERROR_EN));
}
static char ecc_syndrome_table[] = {
0x4c, 0x40, 0x41, 0x48, 0x42, 0x48, 0x48, 0x49,
0x43, 0x48, 0x48, 0x49, 0x48, 0x49, 0x49, 0x4a,
0x44, 0x48, 0x48, 0x20, 0x48, 0x39, 0x4b, 0x48,
0x48, 0x25, 0x31, 0x48, 0x28, 0x48, 0x48, 0x2c,
0x45, 0x48, 0x48, 0x21, 0x48, 0x3d, 0x04, 0x48,
0x48, 0x4b, 0x35, 0x48, 0x2d, 0x48, 0x48, 0x29,
0x48, 0x00, 0x01, 0x48, 0x0a, 0x48, 0x48, 0x4b,
0x0f, 0x48, 0x48, 0x4b, 0x48, 0x49, 0x49, 0x48,
0x46, 0x48, 0x48, 0x2a, 0x48, 0x3b, 0x27, 0x48,
0x48, 0x4b, 0x33, 0x48, 0x22, 0x48, 0x48, 0x2e,
0x48, 0x19, 0x1d, 0x48, 0x1b, 0x4a, 0x48, 0x4b,
0x1f, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
0x48, 0x4b, 0x24, 0x48, 0x07, 0x48, 0x48, 0x36,
0x4b, 0x48, 0x48, 0x3e, 0x48, 0x30, 0x38, 0x48,
0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x16, 0x48,
0x48, 0x12, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
0x47, 0x48, 0x48, 0x2f, 0x48, 0x3f, 0x4b, 0x48,
0x48, 0x06, 0x37, 0x48, 0x23, 0x48, 0x48, 0x2b,
0x48, 0x05, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x32,
0x26, 0x48, 0x48, 0x3a, 0x48, 0x34, 0x3c, 0x48,
0x48, 0x11, 0x15, 0x48, 0x13, 0x4a, 0x48, 0x4b,
0x17, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x1e, 0x48,
0x48, 0x1a, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
0x48, 0x08, 0x0d, 0x48, 0x02, 0x48, 0x48, 0x49,
0x03, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x4b, 0x48,
0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x10, 0x48,
0x48, 0x14, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x18, 0x48,
0x48, 0x1c, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
0x4a, 0x0c, 0x09, 0x48, 0x0e, 0x48, 0x48, 0x4b,
0x0b, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x4b, 0x4a
};
static char *syndrome_unknown = "<Unknown>";
static void spitfire_log_udb_syndrome(unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long bit)
{
unsigned short scode;
char memmod_str[64], *p;
if (udbl & bit) {
scode = ecc_syndrome_table[udbl & 0xff];
if (sprintf_dimm(scode, afar, memmod_str, sizeof(memmod_str)) < 0)
p = syndrome_unknown;
else
p = memmod_str;
printk(KERN_WARNING "CPU[%d]: UDBL Syndrome[%x] "
"Memory Module \"%s\"\n",
smp_processor_id(), scode, p);
}
if (udbh & bit) {
scode = ecc_syndrome_table[udbh & 0xff];
if (sprintf_dimm(scode, afar, memmod_str, sizeof(memmod_str)) < 0)
p = syndrome_unknown;
else
p = memmod_str;
printk(KERN_WARNING "CPU[%d]: UDBH Syndrome[%x] "
"Memory Module \"%s\"\n",
smp_processor_id(), scode, p);
}
}
static void spitfire_cee_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, int tl1, struct pt_regs *regs)
{
printk(KERN_WARNING "CPU[%d]: Correctable ECC Error "
"AFSR[%lx] AFAR[%016lx] UDBL[%lx] UDBH[%lx] TL>1[%d]\n",
smp_processor_id(), afsr, afar, udbl, udbh, tl1);
spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_CE);
/* We always log it, even if someone is listening for this
* trap.
*/
notify_die(DIE_TRAP, "Correctable ECC Error", regs,
0, TRAP_TYPE_CEE, SIGTRAP);
/* The Correctable ECC Error trap does not disable I/D caches. So
* we only have to restore the ESTATE Error Enable register.
*/
spitfire_enable_estate_errors();
}
static void spitfire_ue_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long tt, int tl1, struct pt_regs *regs)
{
printk(KERN_WARNING "CPU[%d]: Uncorrectable Error AFSR[%lx] "
"AFAR[%lx] UDBL[%lx] UDBH[%ld] TT[%lx] TL>1[%d]\n",
smp_processor_id(), afsr, afar, udbl, udbh, tt, tl1);
/* XXX add more human friendly logging of the error status
* XXX as is implemented for cheetah
*/
spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_UE);
/* We always log it, even if someone is listening for this
* trap.
*/
notify_die(DIE_TRAP, "Uncorrectable Error", regs,
0, tt, SIGTRAP);
if (regs->tstate & TSTATE_PRIV) {
if (tl1)
dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
die_if_kernel("UE", regs);
}
/* XXX need more intelligent processing here, such as is implemented
* XXX for cheetah errors, in fact if the E-cache still holds the
* XXX line with bad parity this will loop
*/
spitfire_clean_and_reenable_l1_caches();
spitfire_enable_estate_errors();
if (test_thread_flag(TIF_32BIT)) {
regs->tpc &= 0xffffffff;
regs->tnpc &= 0xffffffff;
}
force_sig_fault(SIGBUS, BUS_OBJERR, (void *)0);
}
void spitfire_access_error(struct pt_regs *regs, unsigned long status_encoded, unsigned long afar)
{
unsigned long afsr, tt, udbh, udbl;
int tl1;
afsr = (status_encoded & SFSTAT_AFSR_MASK) >> SFSTAT_AFSR_SHIFT;
tt = (status_encoded & SFSTAT_TRAP_TYPE) >> SFSTAT_TRAP_TYPE_SHIFT;
tl1 = (status_encoded & SFSTAT_TL_GT_ONE) ? 1 : 0;
udbl = (status_encoded & SFSTAT_UDBL_MASK) >> SFSTAT_UDBL_SHIFT;
udbh = (status_encoded & SFSTAT_UDBH_MASK) >> SFSTAT_UDBH_SHIFT;
#ifdef CONFIG_PCI
if (tt == TRAP_TYPE_DAE &&
pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
spitfire_clean_and_reenable_l1_caches();
spitfire_enable_estate_errors();
pci_poke_faulted = 1;
regs->tnpc = regs->tpc + 4;
return;
}
#endif
if (afsr & SFAFSR_UE)
spitfire_ue_log(afsr, afar, udbh, udbl, tt, tl1, regs);
if (tt == TRAP_TYPE_CEE) {
/* Handle the case where we took a CEE trap, but ACK'd
* only the UE state in the UDB error registers.
*/
if (afsr & SFAFSR_UE) {
if (udbh & UDBE_CE) {
__asm__ __volatile__(
"stxa %0, [%1] %2\n\t"
"membar #Sync"
: /* no outputs */
: "r" (udbh & UDBE_CE),
"r" (0x0), "i" (ASI_UDB_ERROR_W));
}
if (udbl & UDBE_CE) {
__asm__ __volatile__(
"stxa %0, [%1] %2\n\t"
"membar #Sync"
: /* no outputs */
: "r" (udbl & UDBE_CE),
"r" (0x18), "i" (ASI_UDB_ERROR_W));
}
}
spitfire_cee_log(afsr, afar, udbh, udbl, tl1, regs);
}
}
int cheetah_pcache_forced_on;
void cheetah_enable_pcache(void)
{
unsigned long dcr;
printk("CHEETAH: Enabling P-Cache on cpu %d.\n",
smp_processor_id());
__asm__ __volatile__("ldxa [%%g0] %1, %0"
: "=r" (dcr)
: "i" (ASI_DCU_CONTROL_REG));
dcr |= (DCU_PE | DCU_HPE | DCU_SPE | DCU_SL);
__asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
"membar #Sync"
: /* no outputs */
: "r" (dcr), "i" (ASI_DCU_CONTROL_REG));
}
/* Cheetah error trap handling. */
static unsigned long ecache_flush_physbase;
static unsigned long ecache_flush_linesize;
static unsigned long ecache_flush_size;
/* This table is ordered in priority of errors and matches the
* AFAR overwrite policy as well.
*/
struct afsr_error_table {
unsigned long mask;
const char *name;
};
static const char CHAFSR_PERR_msg[] =
"System interface protocol error";
static const char CHAFSR_IERR_msg[] =
"Internal processor error";
static const char CHAFSR_ISAP_msg[] =
"System request parity error on incoming address";
static const char CHAFSR_UCU_msg[] =
"Uncorrectable E-cache ECC error for ifetch/data";
static const char CHAFSR_UCC_msg[] =
"SW Correctable E-cache ECC error for ifetch/data";
static const char CHAFSR_UE_msg[] =
"Uncorrectable system bus data ECC error for read";
static const char CHAFSR_EDU_msg[] =
"Uncorrectable E-cache ECC error for stmerge/blkld";
static const char CHAFSR_EMU_msg[] =
"Uncorrectable system bus MTAG error";
static const char CHAFSR_WDU_msg[] =
"Uncorrectable E-cache ECC error for writeback";
static const char CHAFSR_CPU_msg[] =
"Uncorrectable ECC error for copyout";
static const char CHAFSR_CE_msg[] =
"HW corrected system bus data ECC error for read";
static const char CHAFSR_EDC_msg[] =
"HW corrected E-cache ECC error for stmerge/blkld";
static const char CHAFSR_EMC_msg[] =
"HW corrected system bus MTAG ECC error";
static const char CHAFSR_WDC_msg[] =
"HW corrected E-cache ECC error for writeback";
static const char CHAFSR_CPC_msg[] =
"HW corrected ECC error for copyout";
static const char CHAFSR_TO_msg[] =
"Unmapped error from system bus";
static const char CHAFSR_BERR_msg[] =
"Bus error response from system bus";
static const char CHAFSR_IVC_msg[] =
"HW corrected system bus data ECC error for ivec read";
static const char CHAFSR_IVU_msg[] =
"Uncorrectable system bus data ECC error for ivec read";
static struct afsr_error_table __cheetah_error_table[] = {
{ CHAFSR_PERR, CHAFSR_PERR_msg },
{ CHAFSR_IERR, CHAFSR_IERR_msg },
{ CHAFSR_ISAP, CHAFSR_ISAP_msg },
{ CHAFSR_UCU, CHAFSR_UCU_msg },
{ CHAFSR_UCC, CHAFSR_UCC_msg },
{ CHAFSR_UE, CHAFSR_UE_msg },
{ CHAFSR_EDU, CHAFSR_EDU_msg },
{ CHAFSR_EMU, CHAFSR_EMU_msg },
{ CHAFSR_WDU, CHAFSR_WDU_msg },
{ CHAFSR_CPU, CHAFSR_CPU_msg },
{ CHAFSR_CE, CHAFSR_CE_msg },
{ CHAFSR_EDC, CHAFSR_EDC_msg },
{ CHAFSR_EMC, CHAFSR_EMC_msg },
{ CHAFSR_WDC, CHAFSR_WDC_msg },
{ CHAFSR_CPC, CHAFSR_CPC_msg },
{ CHAFSR_TO, CHAFSR_TO_msg },
{ CHAFSR_BERR, CHAFSR_BERR_msg },
/* These two do not update the AFAR. */
{ CHAFSR_IVC, CHAFSR_IVC_msg },
{ CHAFSR_IVU, CHAFSR_IVU_msg },
{ 0, NULL },
};
static const char CHPAFSR_DTO_msg[] =
"System bus unmapped error for prefetch/storequeue-read";
static const char CHPAFSR_DBERR_msg[] =
"System bus error for prefetch/storequeue-read";
static const char CHPAFSR_THCE_msg[] =
"Hardware corrected E-cache Tag ECC error";
static const char CHPAFSR_TSCE_msg[] =
"SW handled correctable E-cache Tag ECC error";
static const char CHPAFSR_TUE_msg[] =
"Uncorrectable E-cache Tag ECC error";
static const char CHPAFSR_DUE_msg[] =
"System bus uncorrectable data ECC error due to prefetch/store-fill";
static struct afsr_error_table __cheetah_plus_error_table[] = {
{ CHAFSR_PERR, CHAFSR_PERR_msg },
{ CHAFSR_IERR, CHAFSR_IERR_msg },
{ CHAFSR_ISAP, CHAFSR_ISAP_msg },
{ CHAFSR_UCU, CHAFSR_UCU_msg },
{ CHAFSR_UCC, CHAFSR_UCC_msg },
{ CHAFSR_UE, CHAFSR_UE_msg },
{ CHAFSR_EDU, CHAFSR_EDU_msg },
{ CHAFSR_EMU, CHAFSR_EMU_msg },
{ CHAFSR_WDU, CHAFSR_WDU_msg },
{ CHAFSR_CPU, CHAFSR_CPU_msg },
{ CHAFSR_CE, CHAFSR_CE_msg },
{ CHAFSR_EDC, CHAFSR_EDC_msg },
{ CHAFSR_EMC, CHAFSR_EMC_msg },
{ CHAFSR_WDC, CHAFSR_WDC_msg },
{ CHAFSR_CPC, CHAFSR_CPC_msg },
{ CHAFSR_TO, CHAFSR_TO_msg },
{ CHAFSR_BERR, CHAFSR_BERR_msg },
{ CHPAFSR_DTO, CHPAFSR_DTO_msg },
{ CHPAFSR_DBERR, CHPAFSR_DBERR_msg },
{ CHPAFSR_THCE, CHPAFSR_THCE_msg },
{ CHPAFSR_TSCE, CHPAFSR_TSCE_msg },
{ CHPAFSR_TUE, CHPAFSR_TUE_msg },
{ CHPAFSR_DUE, CHPAFSR_DUE_msg },
/* These two do not update the AFAR. */
{ CHAFSR_IVC, CHAFSR_IVC_msg },
{ CHAFSR_IVU, CHAFSR_IVU_msg },
{ 0, NULL },
};
static const char JPAFSR_JETO_msg[] =
"System interface protocol error, hw timeout caused";
static const char JPAFSR_SCE_msg[] =
"Parity error on system snoop results";
static const char JPAFSR_JEIC_msg[] =
"System interface protocol error, illegal command detected";
static const char JPAFSR_JEIT_msg[] =
"System interface protocol error, illegal ADTYPE detected";
static const char JPAFSR_OM_msg[] =
"Out of range memory error has occurred";
static const char JPAFSR_ETP_msg[] =
"Parity error on L2 cache tag SRAM";
static const char JPAFSR_UMS_msg[] =
"Error due to unsupported store";
static const char JPAFSR_RUE_msg[] =
"Uncorrectable ECC error from remote cache/memory";
static const char JPAFSR_RCE_msg[] =
"Correctable ECC error from remote cache/memory";
static const char JPAFSR_BP_msg[] =
"JBUS parity error on returned read data";
static const char JPAFSR_WBP_msg[] =
"JBUS parity error on data for writeback or block store";
static const char JPAFSR_FRC_msg[] =
"Foreign read to DRAM incurring correctable ECC error";
static const char JPAFSR_FRU_msg[] =
"Foreign read to DRAM incurring uncorrectable ECC error";
static struct afsr_error_table __jalapeno_error_table[] = {
{ JPAFSR_JETO, JPAFSR_JETO_msg },
{ JPAFSR_SCE, JPAFSR_SCE_msg },
{ JPAFSR_JEIC, JPAFSR_JEIC_msg },
{ JPAFSR_JEIT, JPAFSR_JEIT_msg },
{ CHAFSR_PERR, CHAFSR_PERR_msg },
{ CHAFSR_IERR, CHAFSR_IERR_msg },
{ CHAFSR_ISAP, CHAFSR_ISAP_msg },
{ CHAFSR_UCU, CHAFSR_UCU_msg },
{ CHAFSR_UCC, CHAFSR_UCC_msg },
{ CHAFSR_UE, CHAFSR_UE_msg },
{ CHAFSR_EDU, CHAFSR_EDU_msg },
{ JPAFSR_OM, JPAFSR_OM_msg },
{ CHAFSR_WDU, CHAFSR_WDU_msg },
{ CHAFSR_CPU, CHAFSR_CPU_msg },
{ CHAFSR_CE, CHAFSR_CE_msg },
{ CHAFSR_EDC, CHAFSR_EDC_msg },
{ JPAFSR_ETP, JPAFSR_ETP_msg },
{ CHAFSR_WDC, CHAFSR_WDC_msg },
{ CHAFSR_CPC, CHAFSR_CPC_msg },
{ CHAFSR_TO, CHAFSR_TO_msg },
{ CHAFSR_BERR, CHAFSR_BERR_msg },
{ JPAFSR_UMS, JPAFSR_UMS_msg },
{ JPAFSR_RUE, JPAFSR_RUE_msg },
{ JPAFSR_RCE, JPAFSR_RCE_msg },
{ JPAFSR_BP, JPAFSR_BP_msg },
{ JPAFSR_WBP, JPAFSR_WBP_msg },
{ JPAFSR_FRC, JPAFSR_FRC_msg },
{ JPAFSR_FRU, JPAFSR_FRU_msg },
/* These two do not update the AFAR. */
{ CHAFSR_IVU, CHAFSR_IVU_msg },
{ 0, NULL },
};
static struct afsr_error_table *cheetah_error_table;
static unsigned long cheetah_afsr_errors;
struct cheetah_err_info *cheetah_error_log;
static inline struct cheetah_err_info *cheetah_get_error_log(unsigned long afsr)
{
struct cheetah_err_info *p;
int cpu = smp_processor_id();
if (!cheetah_error_log)
return NULL;
p = cheetah_error_log + (cpu * 2);
if ((afsr & CHAFSR_TL1) != 0UL)
p++;
return p;
}
extern unsigned int tl0_icpe[], tl1_icpe[];
extern unsigned int tl0_dcpe[], tl1_dcpe[];
extern unsigned int tl0_fecc[], tl1_fecc[];
extern unsigned int tl0_cee[], tl1_cee[];
extern unsigned int tl0_iae[], tl1_iae[];
extern unsigned int tl0_dae[], tl1_dae[];
extern unsigned int cheetah_plus_icpe_trap_vector[], cheetah_plus_icpe_trap_vector_tl1[];
extern unsigned int cheetah_plus_dcpe_trap_vector[], cheetah_plus_dcpe_trap_vector_tl1[];
extern unsigned int cheetah_fecc_trap_vector[], cheetah_fecc_trap_vector_tl1[];
extern unsigned int cheetah_cee_trap_vector[], cheetah_cee_trap_vector_tl1[];
extern unsigned int cheetah_deferred_trap_vector[], cheetah_deferred_trap_vector_tl1[];
void __init cheetah_ecache_flush_init(void)
{
unsigned long largest_size, smallest_linesize, order, ver;
int i, sz;
/* Scan all cpu device tree nodes, note two values:
* 1) largest E-cache size
* 2) smallest E-cache line size
*/
largest_size = 0UL;
smallest_linesize = ~0UL;
for (i = 0; i < NR_CPUS; i++) {
unsigned long val;
val = cpu_data(i).ecache_size;
if (!val)
continue;
if (val > largest_size)
largest_size = val;
val = cpu_data(i).ecache_line_size;
if (val < smallest_linesize)
smallest_linesize = val;
}
if (largest_size == 0UL || smallest_linesize == ~0UL) {
prom_printf("cheetah_ecache_flush_init: Cannot probe cpu E-cache "
"parameters.\n");
prom_halt();
}
ecache_flush_size = (2 * largest_size);
ecache_flush_linesize = smallest_linesize;
ecache_flush_physbase = find_ecache_flush_span(ecache_flush_size);
if (ecache_flush_physbase == ~0UL) {
prom_printf("cheetah_ecache_flush_init: Cannot find %ld byte "
"contiguous physical memory.\n",
ecache_flush_size);
prom_halt();
}
/* Now allocate error trap reporting scoreboard. */
sz = NR_CPUS * (2 * sizeof(struct cheetah_err_info));
for (order = 0; order <= MAX_ORDER; order++) {
if ((PAGE_SIZE << order) >= sz)
break;
}
cheetah_error_log = (struct cheetah_err_info *)
__get_free_pages(GFP_KERNEL, order);
if (!cheetah_error_log) {
prom_printf("cheetah_ecache_flush_init: Failed to allocate "
"error logging scoreboard (%d bytes).\n", sz);
prom_halt();
}
memset(cheetah_error_log, 0, PAGE_SIZE << order);
/* Mark all AFSRs as invalid so that the trap handler will
* log new new information there.
*/
for (i = 0; i < 2 * NR_CPUS; i++)
cheetah_error_log[i].afsr = CHAFSR_INVALID;
__asm__ ("rdpr %%ver, %0" : "=r" (ver));
if ((ver >> 32) == __JALAPENO_ID ||
(ver >> 32) == __SERRANO_ID) {
cheetah_error_table = &__jalapeno_error_table[0];
cheetah_afsr_errors = JPAFSR_ERRORS;
} else if ((ver >> 32) == 0x003e0015) {
cheetah_error_table = &__cheetah_plus_error_table[0];
cheetah_afsr_errors = CHPAFSR_ERRORS;
} else {
cheetah_error_table = &__cheetah_error_table[0];
cheetah_afsr_errors = CHAFSR_ERRORS;
}
/* Now patch trap tables. */
memcpy(tl0_fecc, cheetah_fecc_trap_vector, (8 * 4));
memcpy(tl1_fecc, cheetah_fecc_trap_vector_tl1, (8 * 4));
memcpy(tl0_cee, cheetah_cee_trap_vector, (8 * 4));
memcpy(tl1_cee, cheetah_cee_trap_vector_tl1, (8 * 4));
memcpy(tl0_iae, cheetah_deferred_trap_vector, (8 * 4));
memcpy(tl1_iae, cheetah_deferred_trap_vector_tl1, (8 * 4));
memcpy(tl0_dae, cheetah_deferred_trap_vector, (8 * 4));
memcpy(tl1_dae, cheetah_deferred_trap_vector_tl1, (8 * 4));
if (tlb_type == cheetah_plus) {
memcpy(tl0_dcpe, cheetah_plus_dcpe_trap_vector, (8 * 4));
memcpy(tl1_dcpe, cheetah_plus_dcpe_trap_vector_tl1, (8 * 4));
memcpy(tl0_icpe, cheetah_plus_icpe_trap_vector, (8 * 4));
memcpy(tl1_icpe, cheetah_plus_icpe_trap_vector_tl1, (8 * 4));
}
flushi(PAGE_OFFSET);
}
static void cheetah_flush_ecache(void)
{
unsigned long flush_base = ecache_flush_physbase;
unsigned long flush_linesize = ecache_flush_linesize;
unsigned long flush_size = ecache_flush_size;
__asm__ __volatile__("1: subcc %0, %4, %0\n\t"
" bne,pt %%xcc, 1b\n\t"
" ldxa [%2 + %0] %3, %%g0\n\t"
: "=&r" (flush_size)
: "0" (flush_size), "r" (flush_base),
"i" (ASI_PHYS_USE_EC), "r" (flush_linesize));
}
static void cheetah_flush_ecache_line(unsigned long physaddr)
{
unsigned long alias;
physaddr &= ~(8UL - 1UL);
physaddr = (ecache_flush_physbase +
(physaddr & ((ecache_flush_size>>1UL) - 1UL)));
alias = physaddr + (ecache_flush_size >> 1UL);
__asm__ __volatile__("ldxa [%0] %2, %%g0\n\t"
"ldxa [%1] %2, %%g0\n\t"
"membar #Sync"
: /* no outputs */
: "r" (physaddr), "r" (alias),
"i" (ASI_PHYS_USE_EC));
}
/* Unfortunately, the diagnostic access to the I-cache tags we need to
* use to clear the thing interferes with I-cache coherency transactions.
*
* So we must only flush the I-cache when it is disabled.
*/
static void __cheetah_flush_icache(void)
{
unsigned int icache_size, icache_line_size;
unsigned long addr;
icache_size = local_cpu_data().icache_size;
icache_line_size = local_cpu_data().icache_line_size;
/* Clear the valid bits in all the tags. */
for (addr = 0; addr < icache_size; addr += icache_line_size) {
__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
"membar #Sync"
: /* no outputs */
: "r" (addr | (2 << 3)),
"i" (ASI_IC_TAG));
}
}
static void cheetah_flush_icache(void)
{
unsigned long dcu_save;
/* Save current DCU, disable I-cache. */
__asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
"or %0, %2, %%g1\n\t"
"stxa %%g1, [%%g0] %1\n\t"
"membar #Sync"
: "=r" (dcu_save)
: "i" (ASI_DCU_CONTROL_REG), "i" (DCU_IC)
: "g1");
__cheetah_flush_icache();
/* Restore DCU register */
__asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
"membar #Sync"
: /* no outputs */
: "r" (dcu_save), "i" (ASI_DCU_CONTROL_REG));
}
static void cheetah_flush_dcache(void)
{
unsigned int dcache_size, dcache_line_size;
unsigned long addr;
dcache_size = local_cpu_data().dcache_size;
dcache_line_size = local_cpu_data().dcache_line_size;
for (addr = 0; addr < dcache_size; addr += dcache_line_size) {
__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
"membar #Sync"
: /* no outputs */
: "r" (addr), "i" (ASI_DCACHE_TAG));
}
}
/* In order to make the even parity correct we must do two things.
* First, we clear DC_data_parity and set DC_utag to an appropriate value.
* Next, we clear out all 32-bytes of data for that line. Data of
* all-zero + tag parity value of zero == correct parity.
*/
static void cheetah_plus_zap_dcache_parity(void)
{
unsigned int dcache_size, dcache_line_size;
unsigned long addr;
dcache_size = local_cpu_data().dcache_size;
dcache_line_size = local_cpu_data().dcache_line_size;
for (addr = 0; addr < dcache_size; addr += dcache_line_size) {
unsigned long tag = (addr >> 14);
unsigned long line;
__asm__ __volatile__("membar #Sync\n\t"
"stxa %0, [%1] %2\n\t"
"membar #Sync"
: /* no outputs */
: "r" (tag), "r" (addr),
"i" (ASI_DCACHE_UTAG));
for (line = addr; line < addr + dcache_line_size; line += 8)
__asm__ __volatile__("membar #Sync\n\t"
"stxa %%g0, [%0] %1\n\t"
"membar #Sync"
: /* no outputs */
: "r" (line),
"i" (ASI_DCACHE_DATA));
}
}
/* Conversion tables used to frob Cheetah AFSR syndrome values into
* something palatable to the memory controller driver get_unumber
* routine.
*/
#define MT0 137
#define MT1 138
#define MT2 139
#define NONE 254
#define MTC0 140
#define MTC1 141
#define MTC2 142
#define MTC3 143
#define C0 128
#define C1 129
#define C2 130
#define C3 131
#define C4 132
#define C5 133
#define C6 134
#define C7 135
#define C8 136
#define M2 144
#define M3 145
#define M4 146
#define M 147
static unsigned char cheetah_ecc_syntab[] = {
/*00*/NONE, C0, C1, M2, C2, M2, M3, 47, C3, M2, M2, 53, M2, 41, 29, M,
/*01*/C4, M, M, 50, M2, 38, 25, M2, M2, 33, 24, M2, 11, M, M2, 16,
/*02*/C5, M, M, 46, M2, 37, 19, M2, M, 31, 32, M, 7, M2, M2, 10,
/*03*/M2, 40, 13, M2, 59, M, M2, 66, M, M2, M2, 0, M2, 67, 71, M,
/*04*/C6, M, M, 43, M, 36, 18, M, M2, 49, 15, M, 63, M2, M2, 6,
/*05*/M2, 44, 28, M2, M, M2, M2, 52, 68, M2, M2, 62, M2, M3, M3, M4,
/*06*/M2, 26, 106, M2, 64, M, M2, 2, 120, M, M2, M3, M, M3, M3, M4,
/*07*/116, M2, M2, M3, M2, M3, M, M4, M2, 58, 54, M2, M, M4, M4, M3,
/*08*/C7, M2, M, 42, M, 35, 17, M2, M, 45, 14, M2, 21, M2, M2, 5,
/*09*/M, 27, M, M, 99, M, M, 3, 114, M2, M2, 20, M2, M3, M3, M,
/*0a*/M2, 23, 113, M2, 112, M2, M, 51, 95, M, M2, M3, M2, M3, M3, M2,
/*0b*/103, M, M2, M3, M2, M3, M3, M4, M2, 48, M, M, 73, M2, M, M3,
/*0c*/M2, 22, 110, M2, 109, M2, M, 9, 108, M2, M, M3, M2, M3, M3, M,
/*0d*/102, M2, M, M, M2, M3, M3, M, M2, M3, M3, M2, M, M4, M, M3,
/*0e*/98, M, M2, M3, M2, M, M3, M4, M2, M3, M3, M4, M3, M, M, M,
/*0f*/M2, M3, M3, M, M3, M, M, M, 56, M4, M, M3, M4, M, M, M,
/*10*/C8, M, M2, 39, M, 34, 105, M2, M, 30, 104, M, 101, M, M, 4,
/*11*/M, M, 100, M, 83, M, M2, 12, 87, M, M, 57, M2, M, M3, M,
/*12*/M2, 97, 82, M2, 78, M2, M2, 1, 96, M, M, M, M, M, M3, M2,
/*13*/94, M, M2, M3, M2, M, M3, M, M2, M, 79, M, 69, M, M4, M,
/*14*/M2, 93, 92, M, 91, M, M2, 8, 90, M2, M2, M, M, M, M, M4,
/*15*/89, M, M, M3, M2, M3, M3, M, M, M, M3, M2, M3, M2, M, M3,
/*16*/86, M, M2, M3, M2, M, M3, M, M2, M, M3, M, M3, M, M, M3,
/*17*/M, M, M3, M2, M3, M2, M4, M, 60, M, M2, M3, M4, M, M, M2,
/*18*/M2, 88, 85, M2, 84, M, M2, 55, 81, M2, M2, M3, M2, M3, M3, M4,
/*19*/77, M, M, M, M2, M3, M, M, M2, M3, M3, M4, M3, M2, M, M,
/*1a*/74, M, M2, M3, M, M, M3, M, M, M, M3, M, M3, M, M4, M3,
/*1b*/M2, 70, 107, M4, 65, M2, M2, M, 127, M, M, M, M2, M3, M3, M,
/*1c*/80, M2, M2, 72, M, 119, 118, M, M2, 126, 76, M, 125, M, M4, M3,
/*1d*/M2, 115, 124, M, 75, M, M, M3, 61, M, M4, M, M4, M, M, M,
/*1e*/M, 123, 122, M4, 121, M4, M, M3, 117, M2, M2, M3, M4, M3, M, M,
/*1f*/111, M, M, M, M4, M3, M3, M, M, M, M3, M, M3, M2, M, M
};
static unsigned char cheetah_mtag_syntab[] = {
NONE, MTC0,
MTC1, NONE,
MTC2, NONE,
NONE, MT0,
MTC3, NONE,
NONE, MT1,
NONE, MT2,
NONE, NONE
};
/* Return the highest priority error conditon mentioned. */
static inline unsigned long cheetah_get_hipri(unsigned long afsr)
{
unsigned long tmp = 0;
int i;
for (i = 0; cheetah_error_table[i].mask; i++) {
if ((tmp = (afsr & cheetah_error_table[i].mask)) != 0UL)
return tmp;
}
return tmp;
}
static const char *cheetah_get_string(unsigned long bit)
{
int i;
for (i = 0; cheetah_error_table[i].mask; i++) {
if ((bit & cheetah_error_table[i].mask) != 0UL)
return cheetah_error_table[i].name;
}
return "???";
}
static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *info,
unsigned long afsr, unsigned long afar, int recoverable)
{
unsigned long hipri;
char unum[256];
printk("%s" "ERROR(%d): Cheetah error trap taken afsr[%016lx] afar[%016lx] TL1(%d)\n",
(recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
afsr, afar,
(afsr & CHAFSR_TL1) ? 1 : 0);
printk("%s" "ERROR(%d): TPC[%lx] TNPC[%lx] O7[%lx] TSTATE[%lx]\n",
(recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
printk("%s" "ERROR(%d): ",
(recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
printk("TPC<%pS>\n", (void *) regs->tpc);
printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
(recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
(afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
(afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT,
(afsr & CHAFSR_ME) ? ", Multiple Errors" : "",
(afsr & CHAFSR_PRIV) ? ", Privileged" : "");
hipri = cheetah_get_hipri(afsr);
printk("%s" "ERROR(%d): Highest priority error (%016lx) \"%s\"\n",
(recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
hipri, cheetah_get_string(hipri));
/* Try to get unumber if relevant. */
#define ESYND_ERRORS (CHAFSR_IVC | CHAFSR_IVU | \
CHAFSR_CPC | CHAFSR_CPU | \
CHAFSR_UE | CHAFSR_CE | \
CHAFSR_EDC | CHAFSR_EDU | \
CHAFSR_UCC | CHAFSR_UCU | \
CHAFSR_WDU | CHAFSR_WDC)
#define MSYND_ERRORS (CHAFSR_EMC | CHAFSR_EMU)
if (afsr & ESYND_ERRORS) {
int syndrome;
int ret;
syndrome = (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT;
syndrome = cheetah_ecc_syntab[syndrome];
ret = sprintf_dimm(syndrome, afar, unum, sizeof(unum));
if (ret != -1)
printk("%s" "ERROR(%d): AFAR E-syndrome [%s]\n",
(recoverable ? KERN_WARNING : KERN_CRIT),
smp_processor_id(), unum);
} else if (afsr & MSYND_ERRORS) {
int syndrome;
int ret;
syndrome = (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT;
syndrome = cheetah_mtag_syntab[syndrome];
ret = sprintf_dimm(syndrome, afar, unum, sizeof(unum));
if (ret != -1)
printk("%s" "ERROR(%d): AFAR M-syndrome [%s]\n",
(recoverable ? KERN_WARNING : KERN_CRIT),
smp_processor_id(), unum);
}
/* Now dump the cache snapshots. */
printk("%s" "ERROR(%d): D-cache idx[%x] tag[%016llx] utag[%016llx] stag[%016llx]\n",
(recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
(int) info->dcache_index,
info->dcache_tag,
info->dcache_utag,
info->dcache_stag);
printk("%s" "ERROR(%d): D-cache data0[%016llx] data1[%016llx] data2[%016llx] data3[%016llx]\n",
(recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
info->dcache_data[0],
info->dcache_data[1],
info->dcache_data[2],
info->dcache_data[3]);
printk("%s" "ERROR(%d): I-cache idx[%x] tag[%016llx] utag[%016llx] stag[%016llx] "
"u[%016llx] l[%016llx]\n",
(recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
(int) info->icache_index,
info->icache_tag,
info->icache_utag,
info->icache_stag,
info->icache_upper,
info->icache_lower);
printk("%s" "ERROR(%d): I-cache INSN0[%016llx] INSN1[%016llx] INSN2[%016llx] INSN3[%016llx]\n",
(recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
info->icache_data[0],
info->icache_data[1],
info->icache_data[2],
info->icache_data[3]);
printk("%s" "ERROR(%d): I-cache INSN4[%016llx] INSN5[%016llx] INSN6[%016llx] INSN7[%016llx]\n",
(recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
info->icache_data[4],
info->icache_data[5],
info->icache_data[6],
info->icache_data[7]);
printk("%s" "ERROR(%d): E-cache idx[%x] tag[%016llx]\n",
(recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
(int) info->ecache_index, info->ecache_tag);
printk("%s" "ERROR(%d): E-cache data0[%016llx] data1[%016llx] data2[%016llx] data3[%016llx]\n",
(recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
info->ecache_data[0],
info->ecache_data[1],
info->ecache_data[2],
info->ecache_data[3]);
afsr = (afsr & ~hipri) & cheetah_afsr_errors;
while (afsr != 0UL) {
unsigned long bit = cheetah_get_hipri(afsr);
printk("%s" "ERROR: Multiple-error (%016lx) \"%s\"\n",
(recoverable ? KERN_WARNING : KERN_CRIT),
bit, cheetah_get_string(bit));
afsr &= ~bit;
}
if (!recoverable)
printk(KERN_CRIT "ERROR: This condition is not recoverable.\n");
}
static int cheetah_recheck_errors(struct cheetah_err_info *logp)
{
unsigned long afsr, afar;
int ret = 0;
__asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
: "=r" (afsr)
: "i" (ASI_AFSR));
if ((afsr & cheetah_afsr_errors) != 0) {
if (logp != NULL) {
__asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
: "=r" (afar)
: "i" (ASI_AFAR));
logp->afsr = afsr;
logp->afar = afar;
}
ret = 1;
}
__asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
"membar #Sync\n\t"
: : "r" (afsr), "i" (ASI_AFSR));
return ret;
}
void cheetah_fecc_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
{
struct cheetah_err_info local_snapshot, *p;
int recoverable;
/* Flush E-cache */
cheetah_flush_ecache();
p = cheetah_get_error_log(afsr);
if (!p) {
prom_printf("ERROR: Early Fast-ECC error afsr[%016lx] afar[%016lx]\n",
afsr, afar);
prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
prom_halt();
}
/* Grab snapshot of logged error. */
memcpy(&local_snapshot, p, sizeof(local_snapshot));
/* If the current trap snapshot does not match what the
* trap handler passed along into our args, big trouble.
* In such a case, mark the local copy as invalid.
*
* Else, it matches and we mark the afsr in the non-local
* copy as invalid so we may log new error traps there.
*/
if (p->afsr != afsr || p->afar != afar)
local_snapshot.afsr = CHAFSR_INVALID;
else
p->afsr = CHAFSR_INVALID;
cheetah_flush_icache();
cheetah_flush_dcache();
/* Re-enable I-cache/D-cache */
__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
"or %%g1, %1, %%g1\n\t"
"stxa %%g1, [%%g0] %0\n\t"
"membar #Sync"
: /* no outputs */
: "i" (ASI_DCU_CONTROL_REG),
"i" (DCU_DC | DCU_IC)
: "g1");
/* Re-enable error reporting */
__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
"or %%g1, %1, %%g1\n\t"
"stxa %%g1, [%%g0] %0\n\t"
"membar #Sync"
: /* no outputs */
: "i" (ASI_ESTATE_ERROR_EN),
"i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
: "g1");
/* Decide if we can continue after handling this trap and
* logging the error.
*/
recoverable = 1;
if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
recoverable = 0;
/* Re-check AFSR/AFAR. What we are looking for here is whether a new
* error was logged while we had error reporting traps disabled.
*/
if (cheetah_recheck_errors(&local_snapshot)) {
unsigned long new_afsr = local_snapshot.afsr;
/* If we got a new asynchronous error, die... */
if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
CHAFSR_WDU | CHAFSR_CPU |
CHAFSR_IVU | CHAFSR_UE |
CHAFSR_BERR | CHAFSR_TO))
recoverable = 0;
}
/* Log errors. */
cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
if (!recoverable)
panic("Irrecoverable Fast-ECC error trap.\n");
/* Flush E-cache to kick the error trap handlers out. */
cheetah_flush_ecache();
}
/* Try to fix a correctable error by pushing the line out from
* the E-cache. Recheck error reporting registers to see if the
* problem is intermittent.
*/
static int cheetah_fix_ce(unsigned long physaddr)
{
unsigned long orig_estate;
unsigned long alias1, alias2;
int ret;
/* Make sure correctable error traps are disabled. */
__asm__ __volatile__("ldxa [%%g0] %2, %0\n\t"
"andn %0, %1, %%g1\n\t"
"stxa %%g1, [%%g0] %2\n\t"
"membar #Sync"
: "=&r" (orig_estate)
: "i" (ESTATE_ERROR_CEEN),
"i" (ASI_ESTATE_ERROR_EN)
: "g1");
/* We calculate alias addresses that will force the
* cache line in question out of the E-cache. Then
* we bring it back in with an atomic instruction so
* that we get it in some modified/exclusive state,
* then we displace it again to try and get proper ECC
* pushed back into the system.
*/
physaddr &= ~(8UL - 1UL);
alias1 = (ecache_flush_physbase +
(physaddr & ((ecache_flush_size >> 1) - 1)));
alias2 = alias1 + (ecache_flush_size >> 1);
__asm__ __volatile__("ldxa [%0] %3, %%g0\n\t"
"ldxa [%1] %3, %%g0\n\t"
"casxa [%2] %3, %%g0, %%g0\n\t"
"ldxa [%0] %3, %%g0\n\t"
"ldxa [%1] %3, %%g0\n\t"
"membar #Sync"
: /* no outputs */
: "r" (alias1), "r" (alias2),
"r" (physaddr), "i" (ASI_PHYS_USE_EC));
/* Did that trigger another error? */
if (cheetah_recheck_errors(NULL)) {
/* Try one more time. */
__asm__ __volatile__("ldxa [%0] %1, %%g0\n\t"
"membar #Sync"
: : "r" (physaddr), "i" (ASI_PHYS_USE_EC));
if (cheetah_recheck_errors(NULL))
ret = 2;
else
ret = 1;
} else {
/* No new error, intermittent problem. */
ret = 0;
}
/* Restore error enables. */
__asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
"membar #Sync"
: : "r" (orig_estate), "i" (ASI_ESTATE_ERROR_EN));
return ret;
}
/* Return non-zero if PADDR is a valid physical memory address. */
static int cheetah_check_main_memory(unsigned long paddr)
{
unsigned long vaddr = PAGE_OFFSET + paddr;
if (vaddr > (unsigned long) high_memory)
return 0;
return kern_addr_valid(vaddr);
}
void cheetah_cee_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
{
struct cheetah_err_info local_snapshot, *p;
int recoverable, is_memory;
p = cheetah_get_error_log(afsr);
if (!p) {
prom_printf("ERROR: Early CEE error afsr[%016lx] afar[%016lx]\n",
afsr, afar);
prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
prom_halt();
}
/* Grab snapshot of logged error. */
memcpy(&local_snapshot, p, sizeof(local_snapshot));
/* If the current trap snapshot does not match what the
* trap handler passed along into our args, big trouble.
* In such a case, mark the local copy as invalid.
*
* Else, it matches and we mark the afsr in the non-local
* copy as invalid so we may log new error traps there.
*/
if (p->afsr != afsr || p->afar != afar)
local_snapshot.afsr = CHAFSR_INVALID;
else
p->afsr = CHAFSR_INVALID;
is_memory = cheetah_check_main_memory(afar);
if (is_memory && (afsr & CHAFSR_CE) != 0UL) {
/* XXX Might want to log the results of this operation
* XXX somewhere... -DaveM
*/
cheetah_fix_ce(afar);
}
{
int flush_all, flush_line;
flush_all = flush_line = 0;
if ((afsr & CHAFSR_EDC) != 0UL) {
if ((afsr & cheetah_afsr_errors) == CHAFSR_EDC)
flush_line = 1;
else
flush_all = 1;
} else if ((afsr & CHAFSR_CPC) != 0UL) {
if ((afsr & cheetah_afsr_errors) == CHAFSR_CPC)
flush_line = 1;
else
flush_all = 1;
}
/* Trap handler only disabled I-cache, flush it. */
cheetah_flush_icache();
/* Re-enable I-cache */
__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
"or %%g1, %1, %%g1\n\t"
"stxa %%g1, [%%g0] %0\n\t"
"membar #Sync"
: /* no outputs */
: "i" (ASI_DCU_CONTROL_REG),
"i" (DCU_IC)
: "g1");
if (flush_all)
cheetah_flush_ecache();
else if (flush_line)
cheetah_flush_ecache_line(afar);
}
/* Re-enable error reporting */
__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
"or %%g1, %1, %%g1\n\t"
"stxa %%g1, [%%g0] %0\n\t"
"membar #Sync"
: /* no outputs */
: "i" (ASI_ESTATE_ERROR_EN),
"i" (ESTATE_ERROR_CEEN)
: "g1");
/* Decide if we can continue after handling this trap and
* logging the error.
*/
recoverable = 1;
if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
recoverable = 0;
/* Re-check AFSR/AFAR */
(void) cheetah_recheck_errors(&local_snapshot);
/* Log errors. */
cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
if (!recoverable)
panic("Irrecoverable Correctable-ECC error trap.\n");
}
void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
{
struct cheetah_err_info local_snapshot, *p;
int recoverable, is_memory;
#ifdef CONFIG_PCI
/* Check for the special PCI poke sequence. */
if (pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
cheetah_flush_icache();
cheetah_flush_dcache();
/* Re-enable I-cache/D-cache */
__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
"or %%g1, %1, %%g1\n\t"
"stxa %%g1, [%%g0] %0\n\t"
"membar #Sync"
: /* no outputs */
: "i" (ASI_DCU_CONTROL_REG),
"i" (DCU_DC | DCU_IC)
: "g1");
/* Re-enable error reporting */
__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
"or %%g1, %1, %%g1\n\t"
"stxa %%g1, [%%g0] %0\n\t"
"membar #Sync"
: /* no outputs */
: "i" (ASI_ESTATE_ERROR_EN),
"i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
: "g1");
(void) cheetah_recheck_errors(NULL);
pci_poke_faulted = 1;
regs->tpc += 4;
regs->tnpc = regs->tpc + 4;
return;
}
#endif
p = cheetah_get_error_log(afsr);
if (!p) {
prom_printf("ERROR: Early deferred error afsr[%016lx] afar[%016lx]\n",
afsr, afar);
prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
prom_halt();
}
/* Grab snapshot of logged error. */
memcpy(&local_snapshot, p, sizeof(local_snapshot));
/* If the current trap snapshot does not match what the
* trap handler passed along into our args, big trouble.
* In such a case, mark the local copy as invalid.
*
* Else, it matches and we mark the afsr in the non-local
* copy as invalid so we may log new error traps there.
*/
if (p->afsr != afsr || p->afar != afar)
local_snapshot.afsr = CHAFSR_INVALID;
else
p->afsr = CHAFSR_INVALID;
is_memory = cheetah_check_main_memory(afar);
{
int flush_all, flush_line;
flush_all = flush_line = 0;
if ((afsr & CHAFSR_EDU) != 0UL) {
if ((afsr & cheetah_afsr_errors) == CHAFSR_EDU)
flush_line = 1;
else
flush_all = 1;
} else if ((afsr & CHAFSR_BERR) != 0UL) {
if ((afsr & cheetah_afsr_errors) == CHAFSR_BERR)
flush_line = 1;
else
flush_all = 1;
}
cheetah_flush_icache();
cheetah_flush_dcache();
/* Re-enable I/D caches */
__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
"or %%g1, %1, %%g1\n\t"
"stxa %%g1, [%%g0] %0\n\t"
"membar #Sync"
: /* no outputs */
: "i" (ASI_DCU_CONTROL_REG),
"i" (DCU_IC | DCU_DC)
: "g1");
if (flush_all)
cheetah_flush_ecache();
else if (flush_line)
cheetah_flush_ecache_line(afar);
}
/* Re-enable error reporting */
__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
"or %%g1, %1, %%g1\n\t"
"stxa %%g1, [%%g0] %0\n\t"
"membar #Sync"
: /* no outputs */
: "i" (ASI_ESTATE_ERROR_EN),
"i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
: "g1");
/* Decide if we can continue after handling this trap and
* logging the error.
*/
recoverable = 1;
if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
recoverable = 0;
/* Re-check AFSR/AFAR. What we are looking for here is whether a new
* error was logged while we had error reporting traps disabled.
*/
if (cheetah_recheck_errors(&local_snapshot)) {
unsigned long new_afsr = local_snapshot.afsr;
/* If we got a new asynchronous error, die... */
if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
CHAFSR_WDU | CHAFSR_CPU |
CHAFSR_IVU | CHAFSR_UE |
CHAFSR_BERR | CHAFSR_TO))
recoverable = 0;
}
/* Log errors. */
cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
/* "Recoverable" here means we try to yank the page from ever
* being newly used again. This depends upon a few things:
* 1) Must be main memory, and AFAR must be valid.
* 2) If we trapped from user, OK.
* 3) Else, if we trapped from kernel we must find exception
* table entry (ie. we have to have been accessing user
* space).
*
* If AFAR is not in main memory, or we trapped from kernel
* and cannot find an exception table entry, it is unacceptable
* to try and continue.
*/
if (recoverable && is_memory) {
if ((regs->tstate & TSTATE_PRIV) == 0UL) {
/* OK, usermode access. */
recoverable = 1;
} else {
const struct exception_table_entry *entry;
entry = search_exception_tables(regs->tpc);
if (entry) {
/* OK, kernel access to userspace. */
recoverable = 1;
} else {
/* BAD, privileged state is corrupted. */
recoverable = 0;
}
if (recoverable) {
if (pfn_valid(afar >> PAGE_SHIFT))
get_page(pfn_to_page(afar >> PAGE_SHIFT));
else
recoverable = 0;
/* Only perform fixup if we still have a
* recoverable condition.
*/
if (recoverable) {
regs->tpc = entry->fixup;
regs->tnpc = regs->tpc + 4;
}
}
}
} else {
recoverable = 0;
}
if (!recoverable)
panic("Irrecoverable deferred error trap.\n");
}
/* Handle a D/I cache parity error trap. TYPE is encoded as:
*
* Bit0: 0=dcache,1=icache
* Bit1: 0=recoverable,1=unrecoverable
*
* The hardware has disabled both the I-cache and D-cache in
* the %dcr register.
*/
void cheetah_plus_parity_error(int type, struct pt_regs *regs)
{
if (type & 0x1)
__cheetah_flush_icache();
else
cheetah_plus_zap_dcache_parity();
cheetah_flush_dcache();
/* Re-enable I-cache/D-cache */
__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
"or %%g1, %1, %%g1\n\t"
"stxa %%g1, [%%g0] %0\n\t"
"membar #Sync"
: /* no outputs */
: "i" (ASI_DCU_CONTROL_REG),
"i" (DCU_DC | DCU_IC)
: "g1");
if (type & 0x2) {
printk(KERN_EMERG "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
smp_processor_id(),
(type & 0x1) ? 'I' : 'D',
regs->tpc);
printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
panic("Irrecoverable Cheetah+ parity error.");
}
printk(KERN_WARNING "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
smp_processor_id(),
(type & 0x1) ? 'I' : 'D',
regs->tpc);
printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
}
struct sun4v_error_entry {
/* Unique error handle */
/*0x00*/u64 err_handle;
/* %stick value at the time of the error */
/*0x08*/u64 err_stick;
/*0x10*/u8 reserved_1[3];
/* Error type */
/*0x13*/u8 err_type;
#define SUN4V_ERR_TYPE_UNDEFINED 0
#define SUN4V_ERR_TYPE_UNCORRECTED_RES 1
#define SUN4V_ERR_TYPE_PRECISE_NONRES 2
#define SUN4V_ERR_TYPE_DEFERRED_NONRES 3
#define SUN4V_ERR_TYPE_SHUTDOWN_RQST 4
#define SUN4V_ERR_TYPE_DUMP_CORE 5
#define SUN4V_ERR_TYPE_SP_STATE_CHANGE 6
#define SUN4V_ERR_TYPE_NUM 7
/* Error attributes */
/*0x14*/u32 err_attrs;
#define SUN4V_ERR_ATTRS_PROCESSOR 0x00000001
#define SUN4V_ERR_ATTRS_MEMORY 0x00000002
#define SUN4V_ERR_ATTRS_PIO 0x00000004
#define SUN4V_ERR_ATTRS_INT_REGISTERS 0x00000008
#define SUN4V_ERR_ATTRS_FPU_REGISTERS 0x00000010
#define SUN4V_ERR_ATTRS_SHUTDOWN_RQST 0x00000020
#define SUN4V_ERR_ATTRS_ASR 0x00000040
#define SUN4V_ERR_ATTRS_ASI 0x00000080
#define SUN4V_ERR_ATTRS_PRIV_REG 0x00000100
#define SUN4V_ERR_ATTRS_SPSTATE_MSK 0x00000600
#define SUN4V_ERR_ATTRS_MCD 0x00000800
#define SUN4V_ERR_ATTRS_SPSTATE_SHFT 9
#define SUN4V_ERR_ATTRS_MODE_MSK 0x03000000
#define SUN4V_ERR_ATTRS_MODE_SHFT 24
#define SUN4V_ERR_ATTRS_RES_QUEUE_FULL 0x80000000
#define SUN4V_ERR_SPSTATE_FAULTED 0
#define SUN4V_ERR_SPSTATE_AVAILABLE 1
#define SUN4V_ERR_SPSTATE_NOT_PRESENT 2
#define SUN4V_ERR_MODE_USER 1
#define SUN4V_ERR_MODE_PRIV 2
/* Real address of the memory region or PIO transaction */
/*0x18*/u64 err_raddr;
/* Size of the operation triggering the error, in bytes */
/*0x20*/u32 err_size;
/* ID of the CPU */
/*0x24*/u16 err_cpu;
/* Grace periof for shutdown, in seconds */
/*0x26*/u16 err_secs;
/* Value of the %asi register */
/*0x28*/u8 err_asi;
/*0x29*/u8 reserved_2;
/* Value of the ASR register number */
/*0x2a*/u16 err_asr;
#define SUN4V_ERR_ASR_VALID 0x8000
/*0x2c*/u32 reserved_3;
/*0x30*/u64 reserved_4;
/*0x38*/u64 reserved_5;
};
static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
static const char *sun4v_err_type_to_str(u8 type)
{
static const char *types[SUN4V_ERR_TYPE_NUM] = {
"undefined",
"uncorrected resumable",
"precise nonresumable",
"deferred nonresumable",
"shutdown request",
"dump core",
"SP state change",
};
if (type < SUN4V_ERR_TYPE_NUM)
return types[type];
return "unknown";
}
static void sun4v_emit_err_attr_strings(u32 attrs)
{
static const char *attr_names[] = {
"processor",
"memory",
"PIO",
"int-registers",
"fpu-registers",
"shutdown-request",
"ASR",
"ASI",
"priv-reg",
};
static const char *sp_states[] = {
"sp-faulted",
"sp-available",
"sp-not-present",
"sp-state-reserved",
};
static const char *modes[] = {
"mode-reserved0",
"user",
"priv",
"mode-reserved1",
};
u32 sp_state, mode;
int i;
for (i = 0; i < ARRAY_SIZE(attr_names); i++) {
if (attrs & (1U << i)) {
const char *s = attr_names[i];
pr_cont("%s ", s);
}
}
sp_state = ((attrs & SUN4V_ERR_ATTRS_SPSTATE_MSK) >>
SUN4V_ERR_ATTRS_SPSTATE_SHFT);
pr_cont("%s ", sp_states[sp_state]);
mode = ((attrs & SUN4V_ERR_ATTRS_MODE_MSK) >>
SUN4V_ERR_ATTRS_MODE_SHFT);
pr_cont("%s ", modes[mode]);
if (attrs & SUN4V_ERR_ATTRS_RES_QUEUE_FULL)
pr_cont("res-queue-full ");
}
/* When the report contains a real-address of "-1" it means that the
* hardware did not provide the address. So we compute the effective
* address of the load or store instruction at regs->tpc and report
* that. Usually when this happens it's a PIO and in such a case we
* are using physical addresses with bypass ASIs anyways, so what we
* report here is exactly what we want.
*/
static void sun4v_report_real_raddr(const char *pfx, struct pt_regs *regs)
{
unsigned int insn;
u64 addr;
if (!(regs->tstate & TSTATE_PRIV))
return;
insn = *(unsigned int *) regs->tpc;
addr = compute_effective_address(regs, insn, 0);
printk("%s: insn effective address [0x%016llx]\n",
pfx, addr);
}
static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
int cpu, const char *pfx, atomic_t *ocnt)
{
u64 *raw_ptr = (u64 *) ent;
u32 attrs;
int cnt;
printk("%s: Reporting on cpu %d\n", pfx, cpu);
printk("%s: TPC [0x%016lx] <%pS>\n",
pfx, regs->tpc, (void *) regs->tpc);
printk("%s: RAW [%016llx:%016llx:%016llx:%016llx\n",
pfx, raw_ptr[0], raw_ptr[1], raw_ptr[2], raw_ptr[3]);
printk("%s: %016llx:%016llx:%016llx:%016llx]\n",
pfx, raw_ptr[4], raw_ptr[5], raw_ptr[6], raw_ptr[7]);
printk("%s: handle [0x%016llx] stick [0x%016llx]\n",
pfx, ent->err_handle, ent->err_stick);
printk("%s: type [%s]\n", pfx, sun4v_err_type_to_str(ent->err_type));
attrs = ent->err_attrs;
printk("%s: attrs [0x%08x] < ", pfx, attrs);
sun4v_emit_err_attr_strings(attrs);
pr_cont(">\n");
/* Various fields in the error report are only valid if
* certain attribute bits are set.
*/
if (attrs & (SUN4V_ERR_ATTRS_MEMORY |
SUN4V_ERR_ATTRS_PIO |
SUN4V_ERR_ATTRS_ASI)) {
printk("%s: raddr [0x%016llx]\n", pfx, ent->err_raddr);
if (ent->err_raddr == ~(u64)0)
sun4v_report_real_raddr(pfx, regs);
}
if (attrs & (SUN4V_ERR_ATTRS_MEMORY | SUN4V_ERR_ATTRS_ASI))
printk("%s: size [0x%x]\n", pfx, ent->err_size);
if (attrs & (SUN4V_ERR_ATTRS_PROCESSOR |
SUN4V_ERR_ATTRS_INT_REGISTERS |
SUN4V_ERR_ATTRS_FPU_REGISTERS |
SUN4V_ERR_ATTRS_PRIV_REG))
printk("%s: cpu[%u]\n", pfx, ent->err_cpu);
if (attrs & SUN4V_ERR_ATTRS_ASI)
printk("%s: asi [0x%02x]\n", pfx, ent->err_asi);
if ((attrs & (SUN4V_ERR_ATTRS_INT_REGISTERS |
SUN4V_ERR_ATTRS_FPU_REGISTERS |
SUN4V_ERR_ATTRS_PRIV_REG)) &&
(ent->err_asr & SUN4V_ERR_ASR_VALID) != 0)
printk("%s: reg [0x%04x]\n",
pfx, ent->err_asr & ~SUN4V_ERR_ASR_VALID);
show_regs(regs);
if ((cnt = atomic_read(ocnt)) != 0) {
atomic_set(ocnt, 0);
wmb();
printk("%s: Queue overflowed %d times.\n",
pfx, cnt);
}
}
/* Handle memory corruption detected error which is vectored in
* through resumable error trap.
*/
void do_mcd_err(struct pt_regs *regs, struct sun4v_error_entry ent)
{
if (notify_die(DIE_TRAP, "MCD error", regs, 0, 0x34,
SIGSEGV) == NOTIFY_STOP)
return;
if (regs->tstate & TSTATE_PRIV) {
/* MCD exception could happen because the task was
* running a system call with MCD enabled and passed a
* non-versioned pointer or pointer with bad version
* tag to the system call. In such cases, hypervisor
* places the address of offending instruction in the
* resumable error report. This is a deferred error,
* so the read/write that caused the trap was potentially
* retired long time back and we may have no choice
* but to send SIGSEGV to the process.
*/
const struct exception_table_entry *entry;
entry = search_exception_tables(regs->tpc);
if (entry) {
/* Looks like a bad syscall parameter */
#ifdef DEBUG_EXCEPTIONS
pr_emerg("Exception: PC<%016lx> faddr<UNKNOWN>\n",
regs->tpc);
pr_emerg("EX_TABLE: insn<%016lx> fixup<%016lx>\n",
ent.err_raddr, entry->fixup);
#endif
regs->tpc = entry->fixup;
regs->tnpc = regs->tpc + 4;
return;
}
}
/* Send SIGSEGV to the userspace process with the right signal
* code
*/
force_sig_fault(SIGSEGV, SEGV_ADIDERR, (void __user *)ent.err_raddr);
}
/* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
* Log the event and clear the first word of the entry.
*/
void sun4v_resum_error(struct pt_regs *regs, unsigned long offset)
{
enum ctx_state prev_state = exception_enter();
struct sun4v_error_entry *ent, local_copy;
struct trap_per_cpu *tb;
unsigned long paddr;
int cpu;
cpu = get_cpu();
tb = &trap_block[cpu];
paddr = tb->resum_kernel_buf_pa + offset;
ent = __va(paddr);
memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry));
/* We have a local copy now, so release the entry. */
ent->err_handle = 0;
wmb();
put_cpu();
if (local_copy.err_type == SUN4V_ERR_TYPE_SHUTDOWN_RQST) {
/* We should really take the seconds field of
* the error report and use it for the shutdown
* invocation, but for now do the same thing we
* do for a DS shutdown request.
*/
pr_info("Shutdown request, %u seconds...\n",
local_copy.err_secs);
orderly_poweroff(true);
goto out;
}
/* If this is a memory corruption detected error vectored in
* by HV through resumable error trap, call the handler
*/
if (local_copy.err_attrs & SUN4V_ERR_ATTRS_MCD) {
do_mcd_err(regs, local_copy);
return;
}
sun4v_log_error(regs, &local_copy, cpu,
KERN_ERR "RESUMABLE ERROR",
&sun4v_resum_oflow_cnt);
out:
exception_exit(prev_state);
}
/* If we try to printk() we'll probably make matters worse, by trying
* to retake locks this cpu already holds or causing more errors. So
* just bump a counter, and we'll report these counter bumps above.
*/
void sun4v_resum_overflow(struct pt_regs *regs)
{
atomic_inc(&sun4v_resum_oflow_cnt);
}
/* Given a set of registers, get the virtual addressi that was being accessed
* by the faulting instructions at tpc.
*/
static unsigned long sun4v_get_vaddr(struct pt_regs *regs)
{
unsigned int insn;
if (!copy_from_user(&insn, (void __user *)regs->tpc, 4)) {
return compute_effective_address(regs, insn,
(insn >> 25) & 0x1f);
}
return 0;
}
/* Attempt to handle non-resumable errors generated from userspace.
* Returns true if the signal was handled, false otherwise.
*/
bool sun4v_nonresum_error_user_handled(struct pt_regs *regs,
struct sun4v_error_entry *ent) {
unsigned int attrs = ent->err_attrs;
if (attrs & SUN4V_ERR_ATTRS_MEMORY) {
unsigned long addr = ent->err_raddr;
if (addr == ~(u64)0) {
/* This seems highly unlikely to ever occur */
pr_emerg("SUN4V NON-RECOVERABLE ERROR: Memory error detected in unknown location!\n");
} else {
unsigned long page_cnt = DIV_ROUND_UP(ent->err_size,
PAGE_SIZE);
/* Break the unfortunate news. */
pr_emerg("SUN4V NON-RECOVERABLE ERROR: Memory failed at %016lX\n",
addr);
pr_emerg("SUN4V NON-RECOVERABLE ERROR: Claiming %lu ages.\n",
page_cnt);
while (page_cnt-- > 0) {
if (pfn_valid(addr >> PAGE_SHIFT))
get_page(pfn_to_page(addr >> PAGE_SHIFT));
addr += PAGE_SIZE;
}
}
force_sig(SIGKILL);
return true;
}
if (attrs & SUN4V_ERR_ATTRS_PIO) {
force_sig_fault(SIGBUS, BUS_ADRERR,
(void __user *)sun4v_get_vaddr(regs));
return true;
}
/* Default to doing nothing */
return false;
}
/* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
* Log the event, clear the first word of the entry, and die.
*/
void sun4v_nonresum_error(struct pt_regs *regs, unsigned long offset)
{
struct sun4v_error_entry *ent, local_copy;
struct trap_per_cpu *tb;
unsigned long paddr;
int cpu;
cpu = get_cpu();
tb = &trap_block[cpu];
paddr = tb->nonresum_kernel_buf_pa + offset;
ent = __va(paddr);
memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry));
/* We have a local copy now, so release the entry. */
ent->err_handle = 0;
wmb();
put_cpu();
if (!(regs->tstate & TSTATE_PRIV) &&
sun4v_nonresum_error_user_handled(regs, &local_copy)) {
/* DON'T PANIC: This userspace error was handled. */
return;
}
#ifdef CONFIG_PCI
/* Check for the special PCI poke sequence. */
if (pci_poke_in_progress && pci_poke_cpu == cpu) {
pci_poke_faulted = 1;
regs->tpc += 4;
regs->tnpc = regs->tpc + 4;
return;
}
#endif
sun4v_log_error(regs, &local_copy, cpu,
KERN_EMERG "NON-RESUMABLE ERROR",
&sun4v_nonresum_oflow_cnt);
panic("Non-resumable error.");
}
/* If we try to printk() we'll probably make matters worse, by trying
* to retake locks this cpu already holds or causing more errors. So
* just bump a counter, and we'll report these counter bumps above.
*/
void sun4v_nonresum_overflow(struct pt_regs *regs)
{
/* XXX Actually even this can make not that much sense. Perhaps
* XXX we should just pull the plug and panic directly from here?
*/
atomic_inc(&sun4v_nonresum_oflow_cnt);
}
static void sun4v_tlb_error(struct pt_regs *regs)
{
die_if_kernel("TLB/TSB error", regs);
}
unsigned long sun4v_err_itlb_vaddr;
unsigned long sun4v_err_itlb_ctx;
unsigned long sun4v_err_itlb_pte;
unsigned long sun4v_err_itlb_error;
void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
{
dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
regs->tpc, tl);
printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
(void *) regs->u_regs[UREG_I7]);
printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
"pte[%lx] error[%lx]\n",
sun4v_err_itlb_vaddr, sun4v_err_itlb_ctx,
sun4v_err_itlb_pte, sun4v_err_itlb_error);
sun4v_tlb_error(regs);
}
unsigned long sun4v_err_dtlb_vaddr;
unsigned long sun4v_err_dtlb_ctx;
unsigned long sun4v_err_dtlb_pte;
unsigned long sun4v_err_dtlb_error;
void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
{
dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
regs->tpc, tl);
printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
(void *) regs->u_regs[UREG_I7]);
printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
"pte[%lx] error[%lx]\n",
sun4v_err_dtlb_vaddr, sun4v_err_dtlb_ctx,
sun4v_err_dtlb_pte, sun4v_err_dtlb_error);
sun4v_tlb_error(regs);
}
void hypervisor_tlbop_error(unsigned long err, unsigned long op)
{
printk(KERN_CRIT "SUN4V: TLB hv call error %lu for op %lu\n",
err, op);
}
void hypervisor_tlbop_error_xcall(unsigned long err, unsigned long op)
{
printk(KERN_CRIT "SUN4V: XCALL TLB hv call error %lu for op %lu\n",
err, op);
}
static void do_fpe_common(struct pt_regs *regs)
{
if (regs->tstate & TSTATE_PRIV) {
regs->tpc = regs->tnpc;
regs->tnpc += 4;
} else {
unsigned long fsr = current_thread_info()->xfsr[0];
int code;
if (test_thread_flag(TIF_32BIT)) {
regs->tpc &= 0xffffffff;
regs->tnpc &= 0xffffffff;
}
code = FPE_FLTUNK;
if ((fsr & 0x1c000) == (1 << 14)) {
if (fsr & 0x10)
code = FPE_FLTINV;
else if (fsr & 0x08)
code = FPE_FLTOVF;
else if (fsr & 0x04)
code = FPE_FLTUND;
else if (fsr & 0x02)
code = FPE_FLTDIV;
else if (fsr & 0x01)
code = FPE_FLTRES;
}
force_sig_fault(SIGFPE, code, (void __user *)regs->tpc);
}
}
void do_fpieee(struct pt_regs *regs)
{
enum ctx_state prev_state = exception_enter();
if (notify_die(DIE_TRAP, "fpu exception ieee", regs,
0, 0x24, SIGFPE) == NOTIFY_STOP)
goto out;
do_fpe_common(regs);
out:
exception_exit(prev_state);
}
void do_fpother(struct pt_regs *regs)
{
enum ctx_state prev_state = exception_enter();
struct fpustate *f = FPUSTATE;
int ret = 0;
if (notify_die(DIE_TRAP, "fpu exception other", regs,
0, 0x25, SIGFPE) == NOTIFY_STOP)
goto out;
switch ((current_thread_info()->xfsr[0] & 0x1c000)) {
case (2 << 14): /* unfinished_FPop */
case (3 << 14): /* unimplemented_FPop */
ret = do_mathemu(regs, f, false);
break;
}
if (ret)
goto out;
do_fpe_common(regs);
out:
exception_exit(prev_state);
}
void do_tof(struct pt_regs *regs)
{
enum ctx_state prev_state = exception_enter();
if (notify_die(DIE_TRAP, "tagged arithmetic overflow", regs,
0, 0x26, SIGEMT) == NOTIFY_STOP)
goto out;
if (regs->tstate & TSTATE_PRIV)
die_if_kernel("Penguin overflow trap from kernel mode", regs);
if (test_thread_flag(TIF_32BIT)) {
regs->tpc &= 0xffffffff;
regs->tnpc &= 0xffffffff;
}
force_sig_fault(SIGEMT, EMT_TAGOVF, (void __user *)regs->tpc);
out:
exception_exit(prev_state);
}
void do_div0(struct pt_regs *regs)
{
enum ctx_state prev_state = exception_enter();
if (notify_die(DIE_TRAP, "integer division by zero", regs,
0, 0x28, SIGFPE) == NOTIFY_STOP)
goto out;
if (regs->tstate & TSTATE_PRIV)
die_if_kernel("TL0: Kernel divide by zero.", regs);
if (test_thread_flag(TIF_32BIT)) {
regs->tpc &= 0xffffffff;
regs->tnpc &= 0xffffffff;
}
force_sig_fault(SIGFPE, FPE_INTDIV, (void __user *)regs->tpc);
out:
exception_exit(prev_state);
}
static void instruction_dump(unsigned int *pc)
{
int i;
if ((((unsigned long) pc) & 3))
return;
printk("Instruction DUMP:");
for (i = -3; i < 6; i++)
printk("%c%08x%c",i?' ':'<',pc[i],i?' ':'>');
printk("\n");
}
static void user_instruction_dump(unsigned int __user *pc)
{
int i;
unsigned int buf[9];
if ((((unsigned long) pc) & 3))
return;
if (copy_from_user(buf, pc - 3, sizeof(buf)))
return;
printk("Instruction DUMP:");
for (i = 0; i < 9; i++)
printk("%c%08x%c",i==3?' ':'<',buf[i],i==3?' ':'>');
printk("\n");
}
void show_stack(struct task_struct *tsk, unsigned long *_ksp, const char *loglvl)
{
unsigned long fp, ksp;
struct thread_info *tp;
int count = 0;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
int graph = 0;
#endif
ksp = (unsigned long) _ksp;
if (!tsk)
tsk = current;
tp = task_thread_info(tsk);
if (ksp == 0UL) {
if (tsk == current)
asm("mov %%fp, %0" : "=r" (ksp));
else
ksp = tp->ksp;
}
if (tp == current_thread_info())
flushw_all();
fp = ksp + STACK_BIAS;
printk("%sCall Trace:\n", loglvl);
do {
struct sparc_stackf *sf;
struct pt_regs *regs;
unsigned long pc;
if (!kstack_valid(tp, fp))
break;
sf = (struct sparc_stackf *) fp;
regs = (struct pt_regs *) (sf + 1);
if (kstack_is_trap_frame(tp, regs)) {
if (!(regs->tstate & TSTATE_PRIV))
break;
pc = regs->tpc;
fp = regs->u_regs[UREG_I6] + STACK_BIAS;
} else {
pc = sf->callers_pc;
fp = (unsigned long)sf->fp + STACK_BIAS;
}
print_ip_sym(loglvl, pc);
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
if ((pc + 8UL) == (unsigned long) &return_to_handler) {
struct ftrace_ret_stack *ret_stack;
ret_stack = ftrace_graph_get_ret_stack(tsk, graph);
if (ret_stack) {
pc = ret_stack->ret;
print_ip_sym(loglvl, pc);
graph++;
}
}
#endif
} while (++count < 16);
}
static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
{
unsigned long fp = rw->ins[6];
if (!fp)
return NULL;
return (struct reg_window *) (fp + STACK_BIAS);
}
void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
{
static int die_counter;
int count = 0;
/* Amuse the user. */
printk(
" \\|/ ____ \\|/\n"
" \"@'/ .. \\`@\"\n"
" /_| \\__/ |_\\\n"
" \\__U_/\n");
printk("%s(%d): %s [#%d]\n", current->comm, task_pid_nr(current), str, ++die_counter);
notify_die(DIE_OOPS, str, regs, 0, 255, SIGSEGV);
__asm__ __volatile__("flushw");
show_regs(regs);
add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
if (regs->tstate & TSTATE_PRIV) {
struct thread_info *tp = current_thread_info();
struct reg_window *rw = (struct reg_window *)
(regs->u_regs[UREG_FP] + STACK_BIAS);
/* Stop the back trace when we hit userland or we
* find some badly aligned kernel stack.
*/
while (rw &&
count++ < 30 &&
kstack_valid(tp, (unsigned long) rw)) {
printk("Caller[%016lx]: %pS\n", rw->ins[7],
(void *) rw->ins[7]);
rw = kernel_stack_up(rw);
}
instruction_dump ((unsigned int *) regs->tpc);
} else {
if (test_thread_flag(TIF_32BIT)) {
regs->tpc &= 0xffffffff;
regs->tnpc &= 0xffffffff;
}
user_instruction_dump ((unsigned int __user *) regs->tpc);
}
if (panic_on_oops)
panic("Fatal exception");
make_task_dead((regs->tstate & TSTATE_PRIV)? SIGKILL : SIGSEGV);
}
EXPORT_SYMBOL(die_if_kernel);
#define VIS_OPCODE_MASK ((0x3 << 30) | (0x3f << 19))
#define VIS_OPCODE_VAL ((0x2 << 30) | (0x36 << 19))
void do_illegal_instruction(struct pt_regs *regs)
{
enum ctx_state prev_state = exception_enter();
unsigned long pc = regs->tpc;
unsigned long tstate = regs->tstate;
u32 insn;
if (notify_die(DIE_TRAP, "illegal instruction", regs,
0, 0x10, SIGILL) == NOTIFY_STOP)
goto out;
if (tstate & TSTATE_PRIV)
die_if_kernel("Kernel illegal instruction", regs);
if (test_thread_flag(TIF_32BIT))
pc = (u32)pc;
if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
if ((insn & 0xc1ffc000) == 0x81700000) /* POPC */ {
if (handle_popc(insn, regs))
goto out;
} else if ((insn & 0xc1580000) == 0xc1100000) /* LDQ/STQ */ {
if (handle_ldf_stq(insn, regs))
goto out;
} else if (tlb_type == hypervisor) {
if ((insn & VIS_OPCODE_MASK) == VIS_OPCODE_VAL) {
if (!vis_emul(regs, insn))
goto out;
} else {
struct fpustate *f = FPUSTATE;
/* On UltraSPARC T2 and later, FPU insns which
* are not implemented in HW signal an illegal
* instruction trap and do not set the FP Trap
* Trap in the %fsr to unimplemented_FPop.
*/
if (do_mathemu(regs, f, true))
goto out;
}
}
}
force_sig_fault(SIGILL, ILL_ILLOPC, (void __user *)pc);
out:
exception_exit(prev_state);
}
void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
{
enum ctx_state prev_state = exception_enter();
if (notify_die(DIE_TRAP, "memory address unaligned", regs,
0, 0x34, SIGSEGV) == NOTIFY_STOP)
goto out;
if (regs->tstate & TSTATE_PRIV) {
kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc));
goto out;
}
if (is_no_fault_exception(regs))
return;
force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)sfar);
out:
exception_exit(prev_state);
}
void sun4v_do_mna(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
{
if (notify_die(DIE_TRAP, "memory address unaligned", regs,
0, 0x34, SIGSEGV) == NOTIFY_STOP)
return;
if (regs->tstate & TSTATE_PRIV) {
kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc));
return;
}
if (is_no_fault_exception(regs))
return;
force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *) addr);
}
/* sun4v_mem_corrupt_detect_precise() - Handle precise exception on an ADI
* tag mismatch.
*
* ADI version tag mismatch on a load from memory always results in a
* precise exception. Tag mismatch on a store to memory will result in
* precise exception if MCDPER or PMCDPER is set to 1.
*/
void sun4v_mem_corrupt_detect_precise(struct pt_regs *regs, unsigned long addr,
unsigned long context)
{
if (notify_die(DIE_TRAP, "memory corruption precise exception", regs,
0, 0x8, SIGSEGV) == NOTIFY_STOP)
return;
if (regs->tstate & TSTATE_PRIV) {
/* MCD exception could happen because the task was running
* a system call with MCD enabled and passed a non-versioned
* pointer or pointer with bad version tag to the system
* call.
*/
const struct exception_table_entry *entry;
entry = search_exception_tables(regs->tpc);
if (entry) {
/* Looks like a bad syscall parameter */
#ifdef DEBUG_EXCEPTIONS
pr_emerg("Exception: PC<%016lx> faddr<UNKNOWN>\n",
regs->tpc);
pr_emerg("EX_TABLE: insn<%016lx> fixup<%016lx>\n",
regs->tpc, entry->fixup);
#endif
regs->tpc = entry->fixup;
regs->tnpc = regs->tpc + 4;
return;
}
pr_emerg("%s: ADDR[%016lx] CTX[%lx], going.\n",
__func__, addr, context);
die_if_kernel("MCD precise", regs);
}
if (test_thread_flag(TIF_32BIT)) {
regs->tpc &= 0xffffffff;
regs->tnpc &= 0xffffffff;
}
force_sig_fault(SIGSEGV, SEGV_ADIPERR, (void __user *)addr);
}
void do_privop(struct pt_regs *regs)
{
enum ctx_state prev_state = exception_enter();
if (notify_die(DIE_TRAP, "privileged operation", regs,
0, 0x11, SIGILL) == NOTIFY_STOP)
goto out;
if (test_thread_flag(TIF_32BIT)) {
regs->tpc &= 0xffffffff;
regs->tnpc &= 0xffffffff;
}
force_sig_fault(SIGILL, ILL_PRVOPC, (void __user *)regs->tpc);
out:
exception_exit(prev_state);
}
void do_privact(struct pt_regs *regs)
{
do_privop(regs);
}
/* Trap level 1 stuff or other traps we should never see... */
void do_cee(struct pt_regs *regs)
{
exception_enter();
die_if_kernel("TL0: Cache Error Exception", regs);
}
void do_div0_tl1(struct pt_regs *regs)
{
exception_enter();
dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
die_if_kernel("TL1: DIV0 Exception", regs);
}
void do_fpieee_tl1(struct pt_regs *regs)
{
exception_enter();
dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
die_if_kernel("TL1: FPU IEEE Exception", regs);
}
void do_fpother_tl1(struct pt_regs *regs)
{
exception_enter();
dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
die_if_kernel("TL1: FPU Other Exception", regs);
}
void do_ill_tl1(struct pt_regs *regs)
{
exception_enter();
dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
die_if_kernel("TL1: Illegal Instruction Exception", regs);
}
void do_irq_tl1(struct pt_regs *regs)
{
exception_enter();
dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
die_if_kernel("TL1: IRQ Exception", regs);
}
void do_lddfmna_tl1(struct pt_regs *regs)
{
exception_enter();
dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
die_if_kernel("TL1: LDDF Exception", regs);
}
void do_stdfmna_tl1(struct pt_regs *regs)
{
exception_enter();
dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
die_if_kernel("TL1: STDF Exception", regs);
}
void do_paw(struct pt_regs *regs)
{
exception_enter();
die_if_kernel("TL0: Phys Watchpoint Exception", regs);
}
void do_paw_tl1(struct pt_regs *regs)
{
exception_enter();
dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
die_if_kernel("TL1: Phys Watchpoint Exception", regs);
}
void do_vaw(struct pt_regs *regs)
{
exception_enter();
die_if_kernel("TL0: Virt Watchpoint Exception", regs);
}
void do_vaw_tl1(struct pt_regs *regs)
{
exception_enter();
dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
die_if_kernel("TL1: Virt Watchpoint Exception", regs);
}
void do_tof_tl1(struct pt_regs *regs)
{
exception_enter();
dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
die_if_kernel("TL1: Tag Overflow Exception", regs);
}
void do_getpsr(struct pt_regs *regs)
{
regs->u_regs[UREG_I0] = tstate_to_psr(regs->tstate);
regs->tpc = regs->tnpc;
regs->tnpc += 4;
if (test_thread_flag(TIF_32BIT)) {
regs->tpc &= 0xffffffff;
regs->tnpc &= 0xffffffff;
}
}
u64 cpu_mondo_counter[NR_CPUS] = {0};
struct trap_per_cpu trap_block[NR_CPUS];
EXPORT_SYMBOL(trap_block);
/* This can get invoked before sched_init() so play it super safe
* and use hard_smp_processor_id().
*/
void notrace init_cur_cpu_trap(struct thread_info *t)
{
int cpu = hard_smp_processor_id();
struct trap_per_cpu *p = &trap_block[cpu];
p->thread = t;
p->pgd_paddr = 0;
}
extern void thread_info_offsets_are_bolixed_dave(void);
extern void trap_per_cpu_offsets_are_bolixed_dave(void);
extern void tsb_config_offsets_are_bolixed_dave(void);
/* Only invoked on boot processor. */
void __init trap_init(void)
{
/* Compile time sanity check. */
BUILD_BUG_ON(TI_TASK != offsetof(struct thread_info, task) ||
TI_FLAGS != offsetof(struct thread_info, flags) ||
TI_CPU != offsetof(struct thread_info, cpu) ||
TI_FPSAVED != offsetof(struct thread_info, fpsaved) ||
TI_KSP != offsetof(struct thread_info, ksp) ||
TI_FAULT_ADDR != offsetof(struct thread_info,
fault_address) ||
TI_KREGS != offsetof(struct thread_info, kregs) ||
TI_UTRAPS != offsetof(struct thread_info, utraps) ||
TI_REG_WINDOW != offsetof(struct thread_info,
reg_window) ||
TI_RWIN_SPTRS != offsetof(struct thread_info,
rwbuf_stkptrs) ||
TI_GSR != offsetof(struct thread_info, gsr) ||
TI_XFSR != offsetof(struct thread_info, xfsr) ||
TI_PRE_COUNT != offsetof(struct thread_info,
preempt_count) ||
TI_NEW_CHILD != offsetof(struct thread_info, new_child) ||
TI_KUNA_REGS != offsetof(struct thread_info,
kern_una_regs) ||
TI_KUNA_INSN != offsetof(struct thread_info,
kern_una_insn) ||
TI_FPREGS != offsetof(struct thread_info, fpregs) ||
(TI_FPREGS & (64 - 1)));
BUILD_BUG_ON(TRAP_PER_CPU_THREAD != offsetof(struct trap_per_cpu,
thread) ||
(TRAP_PER_CPU_PGD_PADDR !=
offsetof(struct trap_per_cpu, pgd_paddr)) ||
(TRAP_PER_CPU_CPU_MONDO_PA !=
offsetof(struct trap_per_cpu, cpu_mondo_pa)) ||
(TRAP_PER_CPU_DEV_MONDO_PA !=
offsetof(struct trap_per_cpu, dev_mondo_pa)) ||
(TRAP_PER_CPU_RESUM_MONDO_PA !=
offsetof(struct trap_per_cpu, resum_mondo_pa)) ||
(TRAP_PER_CPU_RESUM_KBUF_PA !=
offsetof(struct trap_per_cpu, resum_kernel_buf_pa)) ||
(TRAP_PER_CPU_NONRESUM_MONDO_PA !=
offsetof(struct trap_per_cpu, nonresum_mondo_pa)) ||
(TRAP_PER_CPU_NONRESUM_KBUF_PA !=
offsetof(struct trap_per_cpu, nonresum_kernel_buf_pa)) ||
(TRAP_PER_CPU_FAULT_INFO !=
offsetof(struct trap_per_cpu, fault_info)) ||
(TRAP_PER_CPU_CPU_MONDO_BLOCK_PA !=
offsetof(struct trap_per_cpu, cpu_mondo_block_pa)) ||
(TRAP_PER_CPU_CPU_LIST_PA !=
offsetof(struct trap_per_cpu, cpu_list_pa)) ||
(TRAP_PER_CPU_TSB_HUGE !=
offsetof(struct trap_per_cpu, tsb_huge)) ||
(TRAP_PER_CPU_TSB_HUGE_TEMP !=
offsetof(struct trap_per_cpu, tsb_huge_temp)) ||
(TRAP_PER_CPU_IRQ_WORKLIST_PA !=
offsetof(struct trap_per_cpu, irq_worklist_pa)) ||
(TRAP_PER_CPU_CPU_MONDO_QMASK !=
offsetof(struct trap_per_cpu, cpu_mondo_qmask)) ||
(TRAP_PER_CPU_DEV_MONDO_QMASK !=
offsetof(struct trap_per_cpu, dev_mondo_qmask)) ||
(TRAP_PER_CPU_RESUM_QMASK !=
offsetof(struct trap_per_cpu, resum_qmask)) ||
(TRAP_PER_CPU_NONRESUM_QMASK !=
offsetof(struct trap_per_cpu, nonresum_qmask)) ||
(TRAP_PER_CPU_PER_CPU_BASE !=
offsetof(struct trap_per_cpu, __per_cpu_base)));
BUILD_BUG_ON((TSB_CONFIG_TSB !=
offsetof(struct tsb_config, tsb)) ||
(TSB_CONFIG_RSS_LIMIT !=
offsetof(struct tsb_config, tsb_rss_limit)) ||
(TSB_CONFIG_NENTRIES !=
offsetof(struct tsb_config, tsb_nentries)) ||
(TSB_CONFIG_REG_VAL !=
offsetof(struct tsb_config, tsb_reg_val)) ||
(TSB_CONFIG_MAP_VADDR !=
offsetof(struct tsb_config, tsb_map_vaddr)) ||
(TSB_CONFIG_MAP_PTE !=
offsetof(struct tsb_config, tsb_map_pte)));
/* Attach to the address space of init_task. On SMP we
* do this in smp.c:smp_callin for other cpus.
*/
mmgrab(&init_mm);
current->active_mm = &init_mm;
}
| linux-master | arch/sparc/kernel/traps_64.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/init.h>
#include <linux/types.h>
#include <linux/audit.h>
#include <asm/unistd.h>
#include "kernel.h"
static unsigned int dir_class[] = {
#include <asm-generic/audit_dir_write.h>
~0U
};
static unsigned int read_class[] = {
#include <asm-generic/audit_read.h>
~0U
};
static unsigned int write_class[] = {
#include <asm-generic/audit_write.h>
~0U
};
static unsigned int chattr_class[] = {
#include <asm-generic/audit_change_attr.h>
~0U
};
static unsigned int signal_class[] = {
#include <asm-generic/audit_signal.h>
~0U
};
int audit_classify_arch(int arch)
{
#ifdef CONFIG_COMPAT
if (arch == AUDIT_ARCH_SPARC)
return 1;
#endif
return 0;
}
int audit_classify_syscall(int abi, unsigned int syscall)
{
#ifdef CONFIG_COMPAT
if (abi == AUDIT_ARCH_SPARC)
return sparc32_classify_syscall(syscall);
#endif
switch(syscall) {
case __NR_open:
return AUDITSC_OPEN;
case __NR_openat:
return AUDITSC_OPENAT;
case __NR_socketcall:
return AUDITSC_SOCKETCALL;
case __NR_execve:
return AUDITSC_EXECVE;
case __NR_openat2:
return AUDITSC_OPENAT2;
default:
return AUDITSC_NATIVE;
}
}
static int __init audit_classes_init(void)
{
#ifdef CONFIG_COMPAT
audit_register_class(AUDIT_CLASS_WRITE_32, sparc32_write_class);
audit_register_class(AUDIT_CLASS_READ_32, sparc32_read_class);
audit_register_class(AUDIT_CLASS_DIR_WRITE_32, sparc32_dir_class);
audit_register_class(AUDIT_CLASS_CHATTR_32, sparc32_chattr_class);
audit_register_class(AUDIT_CLASS_SIGNAL_32, sparc32_signal_class);
#endif
audit_register_class(AUDIT_CLASS_WRITE, write_class);
audit_register_class(AUDIT_CLASS_READ, read_class);
audit_register_class(AUDIT_CLASS_DIR_WRITE, dir_class);
audit_register_class(AUDIT_CLASS_CHATTR, chattr_class);
audit_register_class(AUDIT_CLASS_SIGNAL, signal_class);
return 0;
}
__initcall(audit_classes_init);
| linux-master | arch/sparc/kernel/audit.c |
// SPDX-License-Identifier: GPL-2.0
/* devices.c: Initial scan of the prom device tree for important
* Sparc device nodes which we need to find.
*
* This is based on the sparc64 version, but sun4m doesn't always use
* the hardware MIDs, so be careful.
*
* Copyright (C) 1996 David S. Miller ([email protected])
*/
#include <linux/kernel.h>
#include <linux/threads.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <asm/page.h>
#include <asm/oplib.h>
#include <asm/prom.h>
#include <asm/smp.h>
#include <asm/cpudata.h>
#include <asm/cpu_type.h>
#include <asm/setup.h>
#include "kernel.h"
static char *cpu_mid_prop(void)
{
if (sparc_cpu_model == sun4d)
return "cpu-id";
return "mid";
}
static int check_cpu_node(phandle nd, int *cur_inst,
int (*compare)(phandle, int, void *), void *compare_arg,
phandle *prom_node, int *mid)
{
if (!compare(nd, *cur_inst, compare_arg)) {
if (prom_node)
*prom_node = nd;
if (mid) {
*mid = prom_getintdefault(nd, cpu_mid_prop(), 0);
if (sparc_cpu_model == sun4m)
*mid &= 3;
}
return 0;
}
(*cur_inst)++;
return -ENODEV;
}
static int __cpu_find_by(int (*compare)(phandle, int, void *),
void *compare_arg, phandle *prom_node, int *mid)
{
struct device_node *dp;
int cur_inst;
cur_inst = 0;
for_each_node_by_type(dp, "cpu") {
int err = check_cpu_node(dp->phandle, &cur_inst,
compare, compare_arg,
prom_node, mid);
if (!err) {
of_node_put(dp);
return 0;
}
}
return -ENODEV;
}
static int cpu_instance_compare(phandle nd, int instance, void *_arg)
{
int desired_instance = (int) _arg;
if (instance == desired_instance)
return 0;
return -ENODEV;
}
int cpu_find_by_instance(int instance, phandle *prom_node, int *mid)
{
return __cpu_find_by(cpu_instance_compare, (void *)instance,
prom_node, mid);
}
static int cpu_mid_compare(phandle nd, int instance, void *_arg)
{
int desired_mid = (int) _arg;
int this_mid;
this_mid = prom_getintdefault(nd, cpu_mid_prop(), 0);
if (this_mid == desired_mid
|| (sparc_cpu_model == sun4m && (this_mid & 3) == desired_mid))
return 0;
return -ENODEV;
}
int cpu_find_by_mid(int mid, phandle *prom_node)
{
return __cpu_find_by(cpu_mid_compare, (void *)mid,
prom_node, NULL);
}
/* sun4m uses truncated mids since we base the cpuid on the ttable/irqset
* address (0-3). This gives us the true hardware mid, which might have
* some other bits set. On 4d hardware and software mids are the same.
*/
int cpu_get_hwmid(phandle prom_node)
{
return prom_getintdefault(prom_node, cpu_mid_prop(), -ENODEV);
}
void __init device_scan(void)
{
printk(KERN_NOTICE "Booting Linux...\n");
#ifndef CONFIG_SMP
{
phandle cpu_node;
int err;
err = cpu_find_by_instance(0, &cpu_node, NULL);
if (err) {
/* Probably a sun4e, Sun is trying to trick us ;-) */
prom_printf("No cpu nodes, cannot continue\n");
prom_halt();
}
cpu_data(0).clock_tick = prom_getintdefault(cpu_node,
"clock-frequency",
0);
}
#endif /* !CONFIG_SMP */
auxio_probe();
auxio_power_probe();
}
| linux-master | arch/sparc/kernel/devices.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/init.h>
#include <linux/mod_devicetable.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/irq.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <asm/leon.h>
#include <asm/leon_amba.h>
#include "of_device_common.h"
#include "irq.h"
/*
* PCI bus specific translator
*/
static int of_bus_pci_match(struct device_node *np)
{
if (of_node_is_type(np, "pci") || of_node_is_type(np, "pciex")) {
/* Do not do PCI specific frobbing if the
* PCI bridge lacks a ranges property. We
* want to pass it through up to the next
* parent as-is, not with the PCI translate
* method which chops off the top address cell.
*/
if (!of_property_present(np, "ranges"))
return 0;
return 1;
}
return 0;
}
static void of_bus_pci_count_cells(struct device_node *np,
int *addrc, int *sizec)
{
if (addrc)
*addrc = 3;
if (sizec)
*sizec = 2;
}
static int of_bus_pci_map(u32 *addr, const u32 *range,
int na, int ns, int pna)
{
u32 result[OF_MAX_ADDR_CELLS];
int i;
/* Check address type match */
if ((addr[0] ^ range[0]) & 0x03000000)
return -EINVAL;
if (of_out_of_range(addr + 1, range + 1, range + na + pna,
na - 1, ns))
return -EINVAL;
/* Start with the parent range base. */
memcpy(result, range + na, pna * 4);
/* Add in the child address offset, skipping high cell. */
for (i = 0; i < na - 1; i++)
result[pna - 1 - i] +=
(addr[na - 1 - i] -
range[na - 1 - i]);
memcpy(addr, result, pna * 4);
return 0;
}
static unsigned long of_bus_pci_get_flags(const u32 *addr, unsigned long flags)
{
u32 w = addr[0];
/* For PCI, we override whatever child busses may have used. */
flags = 0;
switch((w >> 24) & 0x03) {
case 0x01:
flags |= IORESOURCE_IO;
break;
case 0x02: /* 32 bits */
case 0x03: /* 64 bits */
flags |= IORESOURCE_MEM;
break;
}
if (w & 0x40000000)
flags |= IORESOURCE_PREFETCH;
return flags;
}
static unsigned long of_bus_sbus_get_flags(const u32 *addr, unsigned long flags)
{
return IORESOURCE_MEM;
}
/*
* AMBAPP bus specific translator
*/
static int of_bus_ambapp_match(struct device_node *np)
{
return of_node_is_type(np, "ambapp");
}
static void of_bus_ambapp_count_cells(struct device_node *child,
int *addrc, int *sizec)
{
if (addrc)
*addrc = 1;
if (sizec)
*sizec = 1;
}
static int of_bus_ambapp_map(u32 *addr, const u32 *range,
int na, int ns, int pna)
{
return of_bus_default_map(addr, range, na, ns, pna);
}
static unsigned long of_bus_ambapp_get_flags(const u32 *addr,
unsigned long flags)
{
return IORESOURCE_MEM;
}
/*
* Array of bus specific translators
*/
static struct of_bus of_busses[] = {
/* PCI */
{
.name = "pci",
.addr_prop_name = "assigned-addresses",
.match = of_bus_pci_match,
.count_cells = of_bus_pci_count_cells,
.map = of_bus_pci_map,
.get_flags = of_bus_pci_get_flags,
},
/* SBUS */
{
.name = "sbus",
.addr_prop_name = "reg",
.match = of_bus_sbus_match,
.count_cells = of_bus_sbus_count_cells,
.map = of_bus_default_map,
.get_flags = of_bus_sbus_get_flags,
},
/* AMBA */
{
.name = "ambapp",
.addr_prop_name = "reg",
.match = of_bus_ambapp_match,
.count_cells = of_bus_ambapp_count_cells,
.map = of_bus_ambapp_map,
.get_flags = of_bus_ambapp_get_flags,
},
/* Default */
{
.name = "default",
.addr_prop_name = "reg",
.match = NULL,
.count_cells = of_bus_default_count_cells,
.map = of_bus_default_map,
.get_flags = of_bus_default_get_flags,
},
};
static struct of_bus *of_match_bus(struct device_node *np)
{
int i;
for (i = 0; i < ARRAY_SIZE(of_busses); i ++)
if (!of_busses[i].match || of_busses[i].match(np))
return &of_busses[i];
BUG();
return NULL;
}
static int __init build_one_resource(struct device_node *parent,
struct of_bus *bus,
struct of_bus *pbus,
u32 *addr,
int na, int ns, int pna)
{
const u32 *ranges;
unsigned int rlen;
int rone;
ranges = of_get_property(parent, "ranges", &rlen);
if (ranges == NULL || rlen == 0) {
u32 result[OF_MAX_ADDR_CELLS];
int i;
memset(result, 0, pna * 4);
for (i = 0; i < na; i++)
result[pna - 1 - i] =
addr[na - 1 - i];
memcpy(addr, result, pna * 4);
return 0;
}
/* Now walk through the ranges */
rlen /= 4;
rone = na + pna + ns;
for (; rlen >= rone; rlen -= rone, ranges += rone) {
if (!bus->map(addr, ranges, na, ns, pna))
return 0;
}
return 1;
}
static int __init use_1to1_mapping(struct device_node *pp)
{
/* If we have a ranges property in the parent, use it. */
if (of_property_present(pp, "ranges"))
return 0;
/* Some SBUS devices use intermediate nodes to express
* hierarchy within the device itself. These aren't
* real bus nodes, and don't have a 'ranges' property.
* But, we should still pass the translation work up
* to the SBUS itself.
*/
if (of_node_name_eq(pp, "dma") ||
of_node_name_eq(pp, "espdma") ||
of_node_name_eq(pp, "ledma") ||
of_node_name_eq(pp, "lebuffer"))
return 0;
return 1;
}
static int of_resource_verbose;
static void __init build_device_resources(struct platform_device *op,
struct device *parent)
{
struct platform_device *p_op;
struct of_bus *bus;
int na, ns;
int index, num_reg;
const void *preg;
if (!parent)
return;
p_op = to_platform_device(parent);
bus = of_match_bus(p_op->dev.of_node);
bus->count_cells(op->dev.of_node, &na, &ns);
preg = of_get_property(op->dev.of_node, bus->addr_prop_name, &num_reg);
if (!preg || num_reg == 0)
return;
/* Convert to num-cells. */
num_reg /= 4;
/* Conver to num-entries. */
num_reg /= na + ns;
op->resource = op->archdata.resource;
op->num_resources = num_reg;
for (index = 0; index < num_reg; index++) {
struct resource *r = &op->resource[index];
u32 addr[OF_MAX_ADDR_CELLS];
const u32 *reg = (preg + (index * ((na + ns) * 4)));
struct device_node *dp = op->dev.of_node;
struct device_node *pp = p_op->dev.of_node;
struct of_bus *pbus, *dbus;
u64 size, result = OF_BAD_ADDR;
unsigned long flags;
int dna, dns;
int pna, pns;
size = of_read_addr(reg + na, ns);
memcpy(addr, reg, na * 4);
flags = bus->get_flags(reg, 0);
if (use_1to1_mapping(pp)) {
result = of_read_addr(addr, na);
goto build_res;
}
dna = na;
dns = ns;
dbus = bus;
while (1) {
dp = pp;
pp = dp->parent;
if (!pp) {
result = of_read_addr(addr, dna);
break;
}
pbus = of_match_bus(pp);
pbus->count_cells(dp, &pna, &pns);
if (build_one_resource(dp, dbus, pbus, addr,
dna, dns, pna))
break;
flags = pbus->get_flags(addr, flags);
dna = pna;
dns = pns;
dbus = pbus;
}
build_res:
memset(r, 0, sizeof(*r));
if (of_resource_verbose)
printk("%pOF reg[%d] -> %llx\n",
op->dev.of_node, index,
result);
if (result != OF_BAD_ADDR) {
r->start = result & 0xffffffff;
r->end = result + size - 1;
r->flags = flags | ((result >> 32ULL) & 0xffUL);
}
r->name = op->dev.of_node->full_name;
}
}
static struct platform_device * __init scan_one_device(struct device_node *dp,
struct device *parent)
{
struct platform_device *op = kzalloc(sizeof(*op), GFP_KERNEL);
const struct linux_prom_irqs *intr;
struct dev_archdata *sd;
int len, i;
if (!op)
return NULL;
sd = &op->dev.archdata;
sd->op = op;
op->dev.of_node = dp;
intr = of_get_property(dp, "intr", &len);
if (intr) {
op->archdata.num_irqs = len / sizeof(struct linux_prom_irqs);
for (i = 0; i < op->archdata.num_irqs; i++)
op->archdata.irqs[i] =
sparc_config.build_device_irq(op, intr[i].pri);
} else {
const unsigned int *irq =
of_get_property(dp, "interrupts", &len);
if (irq) {
op->archdata.num_irqs = len / sizeof(unsigned int);
for (i = 0; i < op->archdata.num_irqs; i++)
op->archdata.irqs[i] =
sparc_config.build_device_irq(op, irq[i]);
} else {
op->archdata.num_irqs = 0;
}
}
build_device_resources(op, parent);
op->dev.parent = parent;
op->dev.bus = &platform_bus_type;
if (!parent)
dev_set_name(&op->dev, "root");
else
dev_set_name(&op->dev, "%08x", dp->phandle);
op->dev.coherent_dma_mask = DMA_BIT_MASK(32);
op->dev.dma_mask = &op->dev.coherent_dma_mask;
if (of_device_register(op)) {
printk("%pOF: Could not register of device.\n", dp);
kfree(op);
op = NULL;
}
return op;
}
static void __init scan_tree(struct device_node *dp, struct device *parent)
{
while (dp) {
struct platform_device *op = scan_one_device(dp, parent);
if (op)
scan_tree(dp->child, &op->dev);
dp = dp->sibling;
}
}
static int __init scan_of_devices(void)
{
struct device_node *root = of_find_node_by_path("/");
struct platform_device *parent;
parent = scan_one_device(root, NULL);
if (!parent)
return 0;
scan_tree(root->child, &parent->dev);
return 0;
}
postcore_initcall(scan_of_devices);
static int __init of_debug(char *str)
{
int val = 0;
get_option(&str, &val);
if (val & 1)
of_resource_verbose = 1;
return 1;
}
__setup("of_debug=", of_debug);
| linux-master | arch/sparc/kernel/of_device_32.c |
// SPDX-License-Identifier: GPL-2.0
/*
* linux/arch/sparc64/kernel/setup.c
*
* Copyright (C) 1995,1996 David S. Miller ([email protected])
* Copyright (C) 1997 Jakub Jelinek ([email protected])
*/
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/ptrace.h>
#include <asm/smp.h>
#include <linux/user.h>
#include <linux/screen_info.h>
#include <linux/delay.h>
#include <linux/fs.h>
#include <linux/seq_file.h>
#include <linux/syscalls.h>
#include <linux/kdev_t.h>
#include <linux/major.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/inet.h>
#include <linux/console.h>
#include <linux/root_dev.h>
#include <linux/interrupt.h>
#include <linux/cpu.h>
#include <linux/initrd.h>
#include <linux/module.h>
#include <linux/start_kernel.h>
#include <linux/memblock.h>
#include <uapi/linux/mount.h>
#include <asm/io.h>
#include <asm/processor.h>
#include <asm/oplib.h>
#include <asm/page.h>
#include <asm/idprom.h>
#include <asm/head.h>
#include <asm/starfire.h>
#include <asm/mmu_context.h>
#include <asm/timer.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/mmu.h>
#include <asm/ns87303.h>
#include <asm/btext.h>
#include <asm/elf.h>
#include <asm/mdesc.h>
#include <asm/cacheflush.h>
#include <asm/dma.h>
#include <asm/irq.h>
#ifdef CONFIG_IP_PNP
#include <net/ipconfig.h>
#endif
#include "entry.h"
#include "kernel.h"
/* Used to synchronize accesses to NatSemi SUPER I/O chip configure
* operations in asm/ns87303.h
*/
DEFINE_SPINLOCK(ns87303_lock);
EXPORT_SYMBOL(ns87303_lock);
struct screen_info screen_info = {
0, 0, /* orig-x, orig-y */
0, /* unused */
0, /* orig-video-page */
0, /* orig-video-mode */
128, /* orig-video-cols */
0, 0, 0, /* unused, ega_bx, unused */
54, /* orig-video-lines */
0, /* orig-video-isVGA */
16 /* orig-video-points */
};
static void
prom_console_write(struct console *con, const char *s, unsigned int n)
{
prom_write(s, n);
}
/* Exported for mm/init.c:paging_init. */
unsigned long cmdline_memory_size = 0;
static struct console prom_early_console = {
.name = "earlyprom",
.write = prom_console_write,
.flags = CON_PRINTBUFFER | CON_BOOT | CON_ANYTIME,
.index = -1,
};
/*
* Process kernel command line switches that are specific to the
* SPARC or that require special low-level processing.
*/
static void __init process_switch(char c)
{
switch (c) {
case 'd':
case 's':
break;
case 'h':
prom_printf("boot_flags_init: Halt!\n");
prom_halt();
break;
case 'p':
prom_early_console.flags &= ~CON_BOOT;
break;
case 'P':
/* Force UltraSPARC-III P-Cache on. */
if (tlb_type != cheetah) {
printk("BOOT: Ignoring P-Cache force option.\n");
break;
}
cheetah_pcache_forced_on = 1;
add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
cheetah_enable_pcache();
break;
default:
printk("Unknown boot switch (-%c)\n", c);
break;
}
}
static void __init boot_flags_init(char *commands)
{
while (*commands) {
/* Move to the start of the next "argument". */
while (*commands == ' ')
commands++;
/* Process any command switches, otherwise skip it. */
if (*commands == '\0')
break;
if (*commands == '-') {
commands++;
while (*commands && *commands != ' ')
process_switch(*commands++);
continue;
}
if (!strncmp(commands, "mem=", 4))
cmdline_memory_size = memparse(commands + 4, &commands);
while (*commands && *commands != ' ')
commands++;
}
}
extern unsigned short root_flags;
extern unsigned short root_dev;
extern unsigned short ram_flags;
#define RAMDISK_IMAGE_START_MASK 0x07FF
#define RAMDISK_PROMPT_FLAG 0x8000
#define RAMDISK_LOAD_FLAG 0x4000
extern int root_mountflags;
char reboot_command[COMMAND_LINE_SIZE];
static void __init per_cpu_patch(void)
{
struct cpuid_patch_entry *p;
unsigned long ver;
int is_jbus;
if (tlb_type == spitfire && !this_is_starfire)
return;
is_jbus = 0;
if (tlb_type != hypervisor) {
__asm__ ("rdpr %%ver, %0" : "=r" (ver));
is_jbus = ((ver >> 32UL) == __JALAPENO_ID ||
(ver >> 32UL) == __SERRANO_ID);
}
p = &__cpuid_patch;
while (p < &__cpuid_patch_end) {
unsigned long addr = p->addr;
unsigned int *insns;
switch (tlb_type) {
case spitfire:
insns = &p->starfire[0];
break;
case cheetah:
case cheetah_plus:
if (is_jbus)
insns = &p->cheetah_jbus[0];
else
insns = &p->cheetah_safari[0];
break;
case hypervisor:
insns = &p->sun4v[0];
break;
default:
prom_printf("Unknown cpu type, halting.\n");
prom_halt();
}
*(unsigned int *) (addr + 0) = insns[0];
wmb();
__asm__ __volatile__("flush %0" : : "r" (addr + 0));
*(unsigned int *) (addr + 4) = insns[1];
wmb();
__asm__ __volatile__("flush %0" : : "r" (addr + 4));
*(unsigned int *) (addr + 8) = insns[2];
wmb();
__asm__ __volatile__("flush %0" : : "r" (addr + 8));
*(unsigned int *) (addr + 12) = insns[3];
wmb();
__asm__ __volatile__("flush %0" : : "r" (addr + 12));
p++;
}
}
void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *start,
struct sun4v_1insn_patch_entry *end)
{
while (start < end) {
unsigned long addr = start->addr;
*(unsigned int *) (addr + 0) = start->insn;
wmb();
__asm__ __volatile__("flush %0" : : "r" (addr + 0));
start++;
}
}
void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *start,
struct sun4v_2insn_patch_entry *end)
{
while (start < end) {
unsigned long addr = start->addr;
*(unsigned int *) (addr + 0) = start->insns[0];
wmb();
__asm__ __volatile__("flush %0" : : "r" (addr + 0));
*(unsigned int *) (addr + 4) = start->insns[1];
wmb();
__asm__ __volatile__("flush %0" : : "r" (addr + 4));
start++;
}
}
void sun_m7_patch_2insn_range(struct sun4v_2insn_patch_entry *start,
struct sun4v_2insn_patch_entry *end)
{
while (start < end) {
unsigned long addr = start->addr;
*(unsigned int *) (addr + 0) = start->insns[0];
wmb();
__asm__ __volatile__("flush %0" : : "r" (addr + 0));
*(unsigned int *) (addr + 4) = start->insns[1];
wmb();
__asm__ __volatile__("flush %0" : : "r" (addr + 4));
start++;
}
}
static void __init sun4v_patch(void)
{
extern void sun4v_hvapi_init(void);
if (tlb_type != hypervisor)
return;
sun4v_patch_1insn_range(&__sun4v_1insn_patch,
&__sun4v_1insn_patch_end);
sun4v_patch_2insn_range(&__sun4v_2insn_patch,
&__sun4v_2insn_patch_end);
switch (sun4v_chip_type) {
case SUN4V_CHIP_SPARC_M7:
case SUN4V_CHIP_SPARC_M8:
case SUN4V_CHIP_SPARC_SN:
sun4v_patch_1insn_range(&__sun_m7_1insn_patch,
&__sun_m7_1insn_patch_end);
sun_m7_patch_2insn_range(&__sun_m7_2insn_patch,
&__sun_m7_2insn_patch_end);
break;
default:
break;
}
if (sun4v_chip_type != SUN4V_CHIP_NIAGARA1) {
sun4v_patch_1insn_range(&__fast_win_ctrl_1insn_patch,
&__fast_win_ctrl_1insn_patch_end);
}
sun4v_hvapi_init();
}
static void __init popc_patch(void)
{
struct popc_3insn_patch_entry *p3;
struct popc_6insn_patch_entry *p6;
p3 = &__popc_3insn_patch;
while (p3 < &__popc_3insn_patch_end) {
unsigned long i, addr = p3->addr;
for (i = 0; i < 3; i++) {
*(unsigned int *) (addr + (i * 4)) = p3->insns[i];
wmb();
__asm__ __volatile__("flush %0"
: : "r" (addr + (i * 4)));
}
p3++;
}
p6 = &__popc_6insn_patch;
while (p6 < &__popc_6insn_patch_end) {
unsigned long i, addr = p6->addr;
for (i = 0; i < 6; i++) {
*(unsigned int *) (addr + (i * 4)) = p6->insns[i];
wmb();
__asm__ __volatile__("flush %0"
: : "r" (addr + (i * 4)));
}
p6++;
}
}
static void __init pause_patch(void)
{
struct pause_patch_entry *p;
p = &__pause_3insn_patch;
while (p < &__pause_3insn_patch_end) {
unsigned long i, addr = p->addr;
for (i = 0; i < 3; i++) {
*(unsigned int *) (addr + (i * 4)) = p->insns[i];
wmb();
__asm__ __volatile__("flush %0"
: : "r" (addr + (i * 4)));
}
p++;
}
}
void __init start_early_boot(void)
{
int cpu;
check_if_starfire();
per_cpu_patch();
sun4v_patch();
smp_init_cpu_poke();
cpu = hard_smp_processor_id();
if (cpu >= NR_CPUS) {
prom_printf("Serious problem, boot cpu id (%d) >= NR_CPUS (%d)\n",
cpu, NR_CPUS);
prom_halt();
}
current_thread_info()->cpu = cpu;
time_init_early();
prom_init_report();
start_kernel();
}
/* On Ultra, we support all of the v8 capabilities. */
unsigned long sparc64_elf_hwcap = (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR |
HWCAP_SPARC_SWAP | HWCAP_SPARC_MULDIV |
HWCAP_SPARC_V9);
EXPORT_SYMBOL(sparc64_elf_hwcap);
static const char *hwcaps[] = {
"flush", "stbar", "swap", "muldiv", "v9",
"ultra3", "blkinit", "n2",
/* These strings are as they appear in the machine description
* 'hwcap-list' property for cpu nodes.
*/
"mul32", "div32", "fsmuld", "v8plus", "popc", "vis", "vis2",
"ASIBlkInit", "fmaf", "vis3", "hpc", "random", "trans", "fjfmau",
"ima", "cspare", "pause", "cbcond", NULL /*reserved for crypto */,
"adp",
};
static const char *crypto_hwcaps[] = {
"aes", "des", "kasumi", "camellia", "md5", "sha1", "sha256",
"sha512", "mpmul", "montmul", "montsqr", "crc32c",
};
void cpucap_info(struct seq_file *m)
{
unsigned long caps = sparc64_elf_hwcap;
int i, printed = 0;
seq_puts(m, "cpucaps\t\t: ");
for (i = 0; i < ARRAY_SIZE(hwcaps); i++) {
unsigned long bit = 1UL << i;
if (hwcaps[i] && (caps & bit)) {
seq_printf(m, "%s%s",
printed ? "," : "", hwcaps[i]);
printed++;
}
}
if (caps & HWCAP_SPARC_CRYPTO) {
unsigned long cfr;
__asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr));
for (i = 0; i < ARRAY_SIZE(crypto_hwcaps); i++) {
unsigned long bit = 1UL << i;
if (cfr & bit) {
seq_printf(m, "%s%s",
printed ? "," : "", crypto_hwcaps[i]);
printed++;
}
}
}
seq_putc(m, '\n');
}
static void __init report_one_hwcap(int *printed, const char *name)
{
if ((*printed) == 0)
printk(KERN_INFO "CPU CAPS: [");
printk(KERN_CONT "%s%s",
(*printed) ? "," : "", name);
if (++(*printed) == 8) {
printk(KERN_CONT "]\n");
*printed = 0;
}
}
static void __init report_crypto_hwcaps(int *printed)
{
unsigned long cfr;
int i;
__asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr));
for (i = 0; i < ARRAY_SIZE(crypto_hwcaps); i++) {
unsigned long bit = 1UL << i;
if (cfr & bit)
report_one_hwcap(printed, crypto_hwcaps[i]);
}
}
static void __init report_hwcaps(unsigned long caps)
{
int i, printed = 0;
for (i = 0; i < ARRAY_SIZE(hwcaps); i++) {
unsigned long bit = 1UL << i;
if (hwcaps[i] && (caps & bit))
report_one_hwcap(&printed, hwcaps[i]);
}
if (caps & HWCAP_SPARC_CRYPTO)
report_crypto_hwcaps(&printed);
if (printed != 0)
printk(KERN_CONT "]\n");
}
static unsigned long __init mdesc_cpu_hwcap_list(void)
{
struct mdesc_handle *hp;
unsigned long caps = 0;
const char *prop;
int len;
u64 pn;
hp = mdesc_grab();
if (!hp)
return 0;
pn = mdesc_node_by_name(hp, MDESC_NODE_NULL, "cpu");
if (pn == MDESC_NODE_NULL)
goto out;
prop = mdesc_get_property(hp, pn, "hwcap-list", &len);
if (!prop)
goto out;
while (len) {
int i, plen;
for (i = 0; i < ARRAY_SIZE(hwcaps); i++) {
unsigned long bit = 1UL << i;
if (hwcaps[i] && !strcmp(prop, hwcaps[i])) {
caps |= bit;
break;
}
}
for (i = 0; i < ARRAY_SIZE(crypto_hwcaps); i++) {
if (!strcmp(prop, crypto_hwcaps[i]))
caps |= HWCAP_SPARC_CRYPTO;
}
plen = strlen(prop) + 1;
prop += plen;
len -= plen;
}
out:
mdesc_release(hp);
return caps;
}
/* This yields a mask that user programs can use to figure out what
* instruction set this cpu supports.
*/
static void __init init_sparc64_elf_hwcap(void)
{
unsigned long cap = sparc64_elf_hwcap;
unsigned long mdesc_caps;
if (tlb_type == cheetah || tlb_type == cheetah_plus)
cap |= HWCAP_SPARC_ULTRA3;
else if (tlb_type == hypervisor) {
if (sun4v_chip_type == SUN4V_CHIP_NIAGARA1 ||
sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
sun4v_chip_type == SUN4V_CHIP_SPARC_M8 ||
sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
sun4v_chip_type == SUN4V_CHIP_SPARC64X)
cap |= HWCAP_SPARC_BLKINIT;
if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
sun4v_chip_type == SUN4V_CHIP_SPARC_M8 ||
sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
sun4v_chip_type == SUN4V_CHIP_SPARC64X)
cap |= HWCAP_SPARC_N2;
}
cap |= (AV_SPARC_MUL32 | AV_SPARC_DIV32 | AV_SPARC_V8PLUS);
mdesc_caps = mdesc_cpu_hwcap_list();
if (!mdesc_caps) {
if (tlb_type == spitfire)
cap |= AV_SPARC_VIS;
if (tlb_type == cheetah || tlb_type == cheetah_plus)
cap |= AV_SPARC_VIS | AV_SPARC_VIS2;
if (tlb_type == cheetah_plus) {
unsigned long impl, ver;
__asm__ __volatile__("rdpr %%ver, %0" : "=r" (ver));
impl = ((ver >> 32) & 0xffff);
if (impl == PANTHER_IMPL)
cap |= AV_SPARC_POPC;
}
if (tlb_type == hypervisor) {
if (sun4v_chip_type == SUN4V_CHIP_NIAGARA1)
cap |= AV_SPARC_ASI_BLK_INIT;
if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
sun4v_chip_type == SUN4V_CHIP_SPARC_M8 ||
sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
sun4v_chip_type == SUN4V_CHIP_SPARC64X)
cap |= (AV_SPARC_VIS | AV_SPARC_VIS2 |
AV_SPARC_ASI_BLK_INIT |
AV_SPARC_POPC);
if (sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
sun4v_chip_type == SUN4V_CHIP_SPARC_M8 ||
sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
sun4v_chip_type == SUN4V_CHIP_SPARC64X)
cap |= (AV_SPARC_VIS3 | AV_SPARC_HPC |
AV_SPARC_FMAF);
}
}
sparc64_elf_hwcap = cap | mdesc_caps;
report_hwcaps(sparc64_elf_hwcap);
if (sparc64_elf_hwcap & AV_SPARC_POPC)
popc_patch();
if (sparc64_elf_hwcap & AV_SPARC_PAUSE)
pause_patch();
}
void __init alloc_irqstack_bootmem(void)
{
unsigned int i, node;
for_each_possible_cpu(i) {
node = cpu_to_node(i);
softirq_stack[i] = memblock_alloc_node(THREAD_SIZE,
THREAD_SIZE, node);
if (!softirq_stack[i])
panic("%s: Failed to allocate %lu bytes align=%lx nid=%d\n",
__func__, THREAD_SIZE, THREAD_SIZE, node);
hardirq_stack[i] = memblock_alloc_node(THREAD_SIZE,
THREAD_SIZE, node);
if (!hardirq_stack[i])
panic("%s: Failed to allocate %lu bytes align=%lx nid=%d\n",
__func__, THREAD_SIZE, THREAD_SIZE, node);
}
}
void __init setup_arch(char **cmdline_p)
{
/* Initialize PROM console and command line. */
*cmdline_p = prom_getbootargs();
strscpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE);
parse_early_param();
boot_flags_init(*cmdline_p);
#ifdef CONFIG_EARLYFB
if (btext_find_display())
#endif
register_console(&prom_early_console);
if (tlb_type == hypervisor)
pr_info("ARCH: SUN4V\n");
else
pr_info("ARCH: SUN4U\n");
idprom_init();
if (!root_flags)
root_mountflags &= ~MS_RDONLY;
ROOT_DEV = old_decode_dev(root_dev);
#ifdef CONFIG_BLK_DEV_RAM
rd_image_start = ram_flags & RAMDISK_IMAGE_START_MASK;
#endif
#ifdef CONFIG_IP_PNP
if (!ic_set_manually) {
phandle chosen = prom_finddevice("/chosen");
u32 cl, sv, gw;
cl = prom_getintdefault (chosen, "client-ip", 0);
sv = prom_getintdefault (chosen, "server-ip", 0);
gw = prom_getintdefault (chosen, "gateway-ip", 0);
if (cl && sv) {
ic_myaddr = cl;
ic_servaddr = sv;
if (gw)
ic_gateway = gw;
#if defined(CONFIG_IP_PNP_BOOTP) || defined(CONFIG_IP_PNP_RARP)
ic_proto_enabled = 0;
#endif
}
}
#endif
/* Get boot processor trap_block[] setup. */
init_cur_cpu_trap(current_thread_info());
paging_init();
init_sparc64_elf_hwcap();
smp_fill_in_cpu_possible_map();
/*
* Once the OF device tree and MDESC have been setup and nr_cpus has
* been parsed, we know the list of possible cpus. Therefore we can
* allocate the IRQ stacks.
*/
alloc_irqstack_bootmem();
}
extern int stop_a_enabled;
void sun_do_break(void)
{
if (!stop_a_enabled)
return;
prom_printf("\n");
flush_user_windows();
prom_cmdline();
}
EXPORT_SYMBOL(sun_do_break);
int stop_a_enabled = 1;
EXPORT_SYMBOL(stop_a_enabled);
| linux-master | arch/sparc/kernel/setup_64.c |
// SPDX-License-Identifier: GPL-2.0
/* pci_fire.c: Sun4u platform PCI-E controller support.
*
* Copyright (C) 2007 David S. Miller ([email protected])
*/
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/msi.h>
#include <linux/export.h>
#include <linux/irq.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/numa.h>
#include <asm/prom.h>
#include <asm/irq.h>
#include <asm/upa.h>
#include "pci_impl.h"
#define DRIVER_NAME "fire"
#define PFX DRIVER_NAME ": "
#define FIRE_IOMMU_CONTROL 0x40000UL
#define FIRE_IOMMU_TSBBASE 0x40008UL
#define FIRE_IOMMU_FLUSH 0x40100UL
#define FIRE_IOMMU_FLUSHINV 0x40108UL
static int pci_fire_pbm_iommu_init(struct pci_pbm_info *pbm)
{
struct iommu *iommu = pbm->iommu;
u32 vdma[2], dma_mask;
u64 control;
int tsbsize, err;
/* No virtual-dma property on these guys, use largest size. */
vdma[0] = 0xc0000000; /* base */
vdma[1] = 0x40000000; /* size */
dma_mask = 0xffffffff;
tsbsize = 128;
/* Register addresses. */
iommu->iommu_control = pbm->pbm_regs + FIRE_IOMMU_CONTROL;
iommu->iommu_tsbbase = pbm->pbm_regs + FIRE_IOMMU_TSBBASE;
iommu->iommu_flush = pbm->pbm_regs + FIRE_IOMMU_FLUSH;
iommu->iommu_flushinv = pbm->pbm_regs + FIRE_IOMMU_FLUSHINV;
/* We use the main control/status register of FIRE as the write
* completion register.
*/
iommu->write_complete_reg = pbm->controller_regs + 0x410000UL;
/*
* Invalidate TLB Entries.
*/
upa_writeq(~(u64)0, iommu->iommu_flushinv);
err = iommu_table_init(iommu, tsbsize * 8 * 1024, vdma[0], dma_mask,
pbm->numa_node);
if (err)
return err;
upa_writeq(__pa(iommu->page_table) | 0x7UL, iommu->iommu_tsbbase);
control = upa_readq(iommu->iommu_control);
control |= (0x00000400 /* TSB cache snoop enable */ |
0x00000300 /* Cache mode */ |
0x00000002 /* Bypass enable */ |
0x00000001 /* Translation enable */);
upa_writeq(control, iommu->iommu_control);
return 0;
}
#ifdef CONFIG_PCI_MSI
struct pci_msiq_entry {
u64 word0;
#define MSIQ_WORD0_RESV 0x8000000000000000UL
#define MSIQ_WORD0_FMT_TYPE 0x7f00000000000000UL
#define MSIQ_WORD0_FMT_TYPE_SHIFT 56
#define MSIQ_WORD0_LEN 0x00ffc00000000000UL
#define MSIQ_WORD0_LEN_SHIFT 46
#define MSIQ_WORD0_ADDR0 0x00003fff00000000UL
#define MSIQ_WORD0_ADDR0_SHIFT 32
#define MSIQ_WORD0_RID 0x00000000ffff0000UL
#define MSIQ_WORD0_RID_SHIFT 16
#define MSIQ_WORD0_DATA0 0x000000000000ffffUL
#define MSIQ_WORD0_DATA0_SHIFT 0
#define MSIQ_TYPE_MSG 0x6
#define MSIQ_TYPE_MSI32 0xb
#define MSIQ_TYPE_MSI64 0xf
u64 word1;
#define MSIQ_WORD1_ADDR1 0xffffffffffff0000UL
#define MSIQ_WORD1_ADDR1_SHIFT 16
#define MSIQ_WORD1_DATA1 0x000000000000ffffUL
#define MSIQ_WORD1_DATA1_SHIFT 0
u64 resv[6];
};
/* All MSI registers are offset from pbm->pbm_regs */
#define EVENT_QUEUE_BASE_ADDR_REG 0x010000UL
#define EVENT_QUEUE_BASE_ADDR_ALL_ONES 0xfffc000000000000UL
#define EVENT_QUEUE_CONTROL_SET(EQ) (0x011000UL + (EQ) * 0x8UL)
#define EVENT_QUEUE_CONTROL_SET_OFLOW 0x0200000000000000UL
#define EVENT_QUEUE_CONTROL_SET_EN 0x0000100000000000UL
#define EVENT_QUEUE_CONTROL_CLEAR(EQ) (0x011200UL + (EQ) * 0x8UL)
#define EVENT_QUEUE_CONTROL_CLEAR_OF 0x0200000000000000UL
#define EVENT_QUEUE_CONTROL_CLEAR_E2I 0x0000800000000000UL
#define EVENT_QUEUE_CONTROL_CLEAR_DIS 0x0000100000000000UL
#define EVENT_QUEUE_STATE(EQ) (0x011400UL + (EQ) * 0x8UL)
#define EVENT_QUEUE_STATE_MASK 0x0000000000000007UL
#define EVENT_QUEUE_STATE_IDLE 0x0000000000000001UL
#define EVENT_QUEUE_STATE_ACTIVE 0x0000000000000002UL
#define EVENT_QUEUE_STATE_ERROR 0x0000000000000004UL
#define EVENT_QUEUE_TAIL(EQ) (0x011600UL + (EQ) * 0x8UL)
#define EVENT_QUEUE_TAIL_OFLOW 0x0200000000000000UL
#define EVENT_QUEUE_TAIL_VAL 0x000000000000007fUL
#define EVENT_QUEUE_HEAD(EQ) (0x011800UL + (EQ) * 0x8UL)
#define EVENT_QUEUE_HEAD_VAL 0x000000000000007fUL
#define MSI_MAP(MSI) (0x020000UL + (MSI) * 0x8UL)
#define MSI_MAP_VALID 0x8000000000000000UL
#define MSI_MAP_EQWR_N 0x4000000000000000UL
#define MSI_MAP_EQNUM 0x000000000000003fUL
#define MSI_CLEAR(MSI) (0x028000UL + (MSI) * 0x8UL)
#define MSI_CLEAR_EQWR_N 0x4000000000000000UL
#define IMONDO_DATA0 0x02C000UL
#define IMONDO_DATA0_DATA 0xffffffffffffffc0UL
#define IMONDO_DATA1 0x02C008UL
#define IMONDO_DATA1_DATA 0xffffffffffffffffUL
#define MSI_32BIT_ADDR 0x034000UL
#define MSI_32BIT_ADDR_VAL 0x00000000ffff0000UL
#define MSI_64BIT_ADDR 0x034008UL
#define MSI_64BIT_ADDR_VAL 0xffffffffffff0000UL
static int pci_fire_get_head(struct pci_pbm_info *pbm, unsigned long msiqid,
unsigned long *head)
{
*head = upa_readq(pbm->pbm_regs + EVENT_QUEUE_HEAD(msiqid));
return 0;
}
static int pci_fire_dequeue_msi(struct pci_pbm_info *pbm, unsigned long msiqid,
unsigned long *head, unsigned long *msi)
{
unsigned long type_fmt, type, msi_num;
struct pci_msiq_entry *base, *ep;
base = (pbm->msi_queues + ((msiqid - pbm->msiq_first) * 8192));
ep = &base[*head];
if ((ep->word0 & MSIQ_WORD0_FMT_TYPE) == 0)
return 0;
type_fmt = ((ep->word0 & MSIQ_WORD0_FMT_TYPE) >>
MSIQ_WORD0_FMT_TYPE_SHIFT);
type = (type_fmt >> 3);
if (unlikely(type != MSIQ_TYPE_MSI32 &&
type != MSIQ_TYPE_MSI64))
return -EINVAL;
*msi = msi_num = ((ep->word0 & MSIQ_WORD0_DATA0) >>
MSIQ_WORD0_DATA0_SHIFT);
upa_writeq(MSI_CLEAR_EQWR_N, pbm->pbm_regs + MSI_CLEAR(msi_num));
/* Clear the entry. */
ep->word0 &= ~MSIQ_WORD0_FMT_TYPE;
/* Go to next entry in ring. */
(*head)++;
if (*head >= pbm->msiq_ent_count)
*head = 0;
return 1;
}
static int pci_fire_set_head(struct pci_pbm_info *pbm, unsigned long msiqid,
unsigned long head)
{
upa_writeq(head, pbm->pbm_regs + EVENT_QUEUE_HEAD(msiqid));
return 0;
}
static int pci_fire_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid,
unsigned long msi, int is_msi64)
{
u64 val;
val = upa_readq(pbm->pbm_regs + MSI_MAP(msi));
val &= ~(MSI_MAP_EQNUM);
val |= msiqid;
upa_writeq(val, pbm->pbm_regs + MSI_MAP(msi));
upa_writeq(MSI_CLEAR_EQWR_N, pbm->pbm_regs + MSI_CLEAR(msi));
val = upa_readq(pbm->pbm_regs + MSI_MAP(msi));
val |= MSI_MAP_VALID;
upa_writeq(val, pbm->pbm_regs + MSI_MAP(msi));
return 0;
}
static int pci_fire_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi)
{
u64 val;
val = upa_readq(pbm->pbm_regs + MSI_MAP(msi));
val &= ~MSI_MAP_VALID;
upa_writeq(val, pbm->pbm_regs + MSI_MAP(msi));
return 0;
}
static int pci_fire_msiq_alloc(struct pci_pbm_info *pbm)
{
unsigned long pages, order, i;
order = get_order(512 * 1024);
pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order);
if (pages == 0UL) {
printk(KERN_ERR "MSI: Cannot allocate MSI queues (o=%lu).\n",
order);
return -ENOMEM;
}
memset((char *)pages, 0, PAGE_SIZE << order);
pbm->msi_queues = (void *) pages;
upa_writeq((EVENT_QUEUE_BASE_ADDR_ALL_ONES |
__pa(pbm->msi_queues)),
pbm->pbm_regs + EVENT_QUEUE_BASE_ADDR_REG);
upa_writeq(pbm->portid << 6, pbm->pbm_regs + IMONDO_DATA0);
upa_writeq(0, pbm->pbm_regs + IMONDO_DATA1);
upa_writeq(pbm->msi32_start, pbm->pbm_regs + MSI_32BIT_ADDR);
upa_writeq(pbm->msi64_start, pbm->pbm_regs + MSI_64BIT_ADDR);
for (i = 0; i < pbm->msiq_num; i++) {
upa_writeq(0, pbm->pbm_regs + EVENT_QUEUE_HEAD(i));
upa_writeq(0, pbm->pbm_regs + EVENT_QUEUE_TAIL(i));
}
return 0;
}
static void pci_fire_msiq_free(struct pci_pbm_info *pbm)
{
unsigned long pages, order;
order = get_order(512 * 1024);
pages = (unsigned long) pbm->msi_queues;
free_pages(pages, order);
pbm->msi_queues = NULL;
}
static int pci_fire_msiq_build_irq(struct pci_pbm_info *pbm,
unsigned long msiqid,
unsigned long devino)
{
unsigned long cregs = (unsigned long) pbm->pbm_regs;
unsigned long imap_reg, iclr_reg, int_ctrlr;
unsigned int irq;
int fixup;
u64 val;
imap_reg = cregs + (0x001000UL + (devino * 0x08UL));
iclr_reg = cregs + (0x001400UL + (devino * 0x08UL));
/* XXX iterate amongst the 4 IRQ controllers XXX */
int_ctrlr = (1UL << 6);
val = upa_readq(imap_reg);
val |= (1UL << 63) | int_ctrlr;
upa_writeq(val, imap_reg);
fixup = ((pbm->portid << 6) | devino) - int_ctrlr;
irq = build_irq(fixup, iclr_reg, imap_reg);
if (!irq)
return -ENOMEM;
upa_writeq(EVENT_QUEUE_CONTROL_SET_EN,
pbm->pbm_regs + EVENT_QUEUE_CONTROL_SET(msiqid));
return irq;
}
static const struct sparc64_msiq_ops pci_fire_msiq_ops = {
.get_head = pci_fire_get_head,
.dequeue_msi = pci_fire_dequeue_msi,
.set_head = pci_fire_set_head,
.msi_setup = pci_fire_msi_setup,
.msi_teardown = pci_fire_msi_teardown,
.msiq_alloc = pci_fire_msiq_alloc,
.msiq_free = pci_fire_msiq_free,
.msiq_build_irq = pci_fire_msiq_build_irq,
};
static void pci_fire_msi_init(struct pci_pbm_info *pbm)
{
sparc64_pbm_msi_init(pbm, &pci_fire_msiq_ops);
}
#else /* CONFIG_PCI_MSI */
static void pci_fire_msi_init(struct pci_pbm_info *pbm)
{
}
#endif /* !(CONFIG_PCI_MSI) */
/* Based at pbm->controller_regs */
#define FIRE_PARITY_CONTROL 0x470010UL
#define FIRE_PARITY_ENAB 0x8000000000000000UL
#define FIRE_FATAL_RESET_CTL 0x471028UL
#define FIRE_FATAL_RESET_SPARE 0x0000000004000000UL
#define FIRE_FATAL_RESET_MB 0x0000000002000000UL
#define FIRE_FATAL_RESET_CPE 0x0000000000008000UL
#define FIRE_FATAL_RESET_APE 0x0000000000004000UL
#define FIRE_FATAL_RESET_PIO 0x0000000000000040UL
#define FIRE_FATAL_RESET_JW 0x0000000000000004UL
#define FIRE_FATAL_RESET_JI 0x0000000000000002UL
#define FIRE_FATAL_RESET_JR 0x0000000000000001UL
#define FIRE_CORE_INTR_ENABLE 0x471800UL
/* Based at pbm->pbm_regs */
#define FIRE_TLU_CTRL 0x80000UL
#define FIRE_TLU_CTRL_TIM 0x00000000da000000UL
#define FIRE_TLU_CTRL_QDET 0x0000000000000100UL
#define FIRE_TLU_CTRL_CFG 0x0000000000000001UL
#define FIRE_TLU_DEV_CTRL 0x90008UL
#define FIRE_TLU_LINK_CTRL 0x90020UL
#define FIRE_TLU_LINK_CTRL_CLK 0x0000000000000040UL
#define FIRE_LPU_RESET 0xe2008UL
#define FIRE_LPU_LLCFG 0xe2200UL
#define FIRE_LPU_LLCFG_VC0 0x0000000000000100UL
#define FIRE_LPU_FCTRL_UCTRL 0xe2240UL
#define FIRE_LPU_FCTRL_UCTRL_N 0x0000000000000002UL
#define FIRE_LPU_FCTRL_UCTRL_P 0x0000000000000001UL
#define FIRE_LPU_TXL_FIFOP 0xe2430UL
#define FIRE_LPU_LTSSM_CFG2 0xe2788UL
#define FIRE_LPU_LTSSM_CFG3 0xe2790UL
#define FIRE_LPU_LTSSM_CFG4 0xe2798UL
#define FIRE_LPU_LTSSM_CFG5 0xe27a0UL
#define FIRE_DMC_IENAB 0x31800UL
#define FIRE_DMC_DBG_SEL_A 0x53000UL
#define FIRE_DMC_DBG_SEL_B 0x53008UL
#define FIRE_PEC_IENAB 0x51800UL
static void pci_fire_hw_init(struct pci_pbm_info *pbm)
{
u64 val;
upa_writeq(FIRE_PARITY_ENAB,
pbm->controller_regs + FIRE_PARITY_CONTROL);
upa_writeq((FIRE_FATAL_RESET_SPARE |
FIRE_FATAL_RESET_MB |
FIRE_FATAL_RESET_CPE |
FIRE_FATAL_RESET_APE |
FIRE_FATAL_RESET_PIO |
FIRE_FATAL_RESET_JW |
FIRE_FATAL_RESET_JI |
FIRE_FATAL_RESET_JR),
pbm->controller_regs + FIRE_FATAL_RESET_CTL);
upa_writeq(~(u64)0, pbm->controller_regs + FIRE_CORE_INTR_ENABLE);
val = upa_readq(pbm->pbm_regs + FIRE_TLU_CTRL);
val |= (FIRE_TLU_CTRL_TIM |
FIRE_TLU_CTRL_QDET |
FIRE_TLU_CTRL_CFG);
upa_writeq(val, pbm->pbm_regs + FIRE_TLU_CTRL);
upa_writeq(0, pbm->pbm_regs + FIRE_TLU_DEV_CTRL);
upa_writeq(FIRE_TLU_LINK_CTRL_CLK,
pbm->pbm_regs + FIRE_TLU_LINK_CTRL);
upa_writeq(0, pbm->pbm_regs + FIRE_LPU_RESET);
upa_writeq(FIRE_LPU_LLCFG_VC0, pbm->pbm_regs + FIRE_LPU_LLCFG);
upa_writeq((FIRE_LPU_FCTRL_UCTRL_N | FIRE_LPU_FCTRL_UCTRL_P),
pbm->pbm_regs + FIRE_LPU_FCTRL_UCTRL);
upa_writeq(((0xffff << 16) | (0x0000 << 0)),
pbm->pbm_regs + FIRE_LPU_TXL_FIFOP);
upa_writeq(3000000, pbm->pbm_regs + FIRE_LPU_LTSSM_CFG2);
upa_writeq(500000, pbm->pbm_regs + FIRE_LPU_LTSSM_CFG3);
upa_writeq((2 << 16) | (140 << 8),
pbm->pbm_regs + FIRE_LPU_LTSSM_CFG4);
upa_writeq(0, pbm->pbm_regs + FIRE_LPU_LTSSM_CFG5);
upa_writeq(~(u64)0, pbm->pbm_regs + FIRE_DMC_IENAB);
upa_writeq(0, pbm->pbm_regs + FIRE_DMC_DBG_SEL_A);
upa_writeq(0, pbm->pbm_regs + FIRE_DMC_DBG_SEL_B);
upa_writeq(~(u64)0, pbm->pbm_regs + FIRE_PEC_IENAB);
}
static int pci_fire_pbm_init(struct pci_pbm_info *pbm,
struct platform_device *op, u32 portid)
{
const struct linux_prom64_registers *regs;
struct device_node *dp = op->dev.of_node;
int err;
pbm->numa_node = NUMA_NO_NODE;
pbm->pci_ops = &sun4u_pci_ops;
pbm->config_space_reg_bits = 12;
pbm->index = pci_num_pbms++;
pbm->portid = portid;
pbm->op = op;
pbm->name = dp->full_name;
regs = of_get_property(dp, "reg", NULL);
pbm->pbm_regs = regs[0].phys_addr;
pbm->controller_regs = regs[1].phys_addr - 0x410000UL;
printk("%s: SUN4U PCIE Bus Module\n", pbm->name);
pci_determine_mem_io_space(pbm);
pci_get_pbm_props(pbm);
pci_fire_hw_init(pbm);
err = pci_fire_pbm_iommu_init(pbm);
if (err)
return err;
pci_fire_msi_init(pbm);
pbm->pci_bus = pci_scan_one_pbm(pbm, &op->dev);
/* XXX register error interrupt handlers XXX */
pbm->next = pci_pbm_root;
pci_pbm_root = pbm;
return 0;
}
static int fire_probe(struct platform_device *op)
{
struct device_node *dp = op->dev.of_node;
struct pci_pbm_info *pbm;
struct iommu *iommu;
u32 portid;
int err;
portid = of_getintprop_default(dp, "portid", 0xff);
err = -ENOMEM;
pbm = kzalloc(sizeof(*pbm), GFP_KERNEL);
if (!pbm) {
printk(KERN_ERR PFX "Cannot allocate pci_pbminfo.\n");
goto out_err;
}
iommu = kzalloc(sizeof(struct iommu), GFP_KERNEL);
if (!iommu) {
printk(KERN_ERR PFX "Cannot allocate PBM iommu.\n");
goto out_free_controller;
}
pbm->iommu = iommu;
err = pci_fire_pbm_init(pbm, op, portid);
if (err)
goto out_free_iommu;
dev_set_drvdata(&op->dev, pbm);
return 0;
out_free_iommu:
kfree(pbm->iommu);
out_free_controller:
kfree(pbm);
out_err:
return err;
}
static const struct of_device_id fire_match[] = {
{
.name = "pci",
.compatible = "pciex108e,80f0",
},
{},
};
static struct platform_driver fire_driver = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = fire_match,
},
.probe = fire_probe,
};
static int __init fire_init(void)
{
return platform_driver_register(&fire_driver);
}
subsys_initcall(fire_init);
| linux-master | arch/sparc/kernel/pci_fire.c |
// SPDX-License-Identifier: GPL-2.0
/* Performance event support for sparc64.
*
* Copyright (C) 2009, 2010 David S. Miller <[email protected]>
*
* This code is based almost entirely upon the x86 perf event
* code, which is:
*
* Copyright (C) 2008 Thomas Gleixner <[email protected]>
* Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
* Copyright (C) 2009 Jaswinder Singh Rajput
* Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
*/
#include <linux/perf_event.h>
#include <linux/kprobes.h>
#include <linux/ftrace.h>
#include <linux/kernel.h>
#include <linux/kdebug.h>
#include <linux/mutex.h>
#include <asm/stacktrace.h>
#include <asm/cpudata.h>
#include <linux/uaccess.h>
#include <linux/atomic.h>
#include <linux/sched/clock.h>
#include <asm/nmi.h>
#include <asm/pcr.h>
#include <asm/cacheflush.h>
#include "kernel.h"
#include "kstack.h"
/* Two classes of sparc64 chips currently exist. All of which have
* 32-bit counters which can generate overflow interrupts on the
* transition from 0xffffffff to 0.
*
* All chips upto and including SPARC-T3 have two performance
* counters. The two 32-bit counters are accessed in one go using a
* single 64-bit register.
*
* On these older chips both counters are controlled using a single
* control register. The only way to stop all sampling is to clear
* all of the context (user, supervisor, hypervisor) sampling enable
* bits. But these bits apply to both counters, thus the two counters
* can't be enabled/disabled individually.
*
* Furthermore, the control register on these older chips have two
* event fields, one for each of the two counters. It's thus nearly
* impossible to have one counter going while keeping the other one
* stopped. Therefore it is possible to get overflow interrupts for
* counters not currently "in use" and that condition must be checked
* in the overflow interrupt handler.
*
* So we use a hack, in that we program inactive counters with the
* "sw_count0" and "sw_count1" events. These count how many times
* the instruction "sethi %hi(0xfc000), %g0" is executed. It's an
* unusual way to encode a NOP and therefore will not trigger in
* normal code.
*
* Starting with SPARC-T4 we have one control register per counter.
* And the counters are stored in individual registers. The registers
* for the counters are 64-bit but only a 32-bit counter is
* implemented. The event selections on SPARC-T4 lack any
* restrictions, therefore we can elide all of the complicated
* conflict resolution code we have for SPARC-T3 and earlier chips.
*/
#define MAX_HWEVENTS 4
#define MAX_PCRS 4
#define MAX_PERIOD ((1UL << 32) - 1)
#define PIC_UPPER_INDEX 0
#define PIC_LOWER_INDEX 1
#define PIC_NO_INDEX -1
struct cpu_hw_events {
/* Number of events currently scheduled onto this cpu.
* This tells how many entries in the arrays below
* are valid.
*/
int n_events;
/* Number of new events added since the last hw_perf_disable().
* This works because the perf event layer always adds new
* events inside of a perf_{disable,enable}() sequence.
*/
int n_added;
/* Array of events current scheduled on this cpu. */
struct perf_event *event[MAX_HWEVENTS];
/* Array of encoded longs, specifying the %pcr register
* encoding and the mask of PIC counters this even can
* be scheduled on. See perf_event_encode() et al.
*/
unsigned long events[MAX_HWEVENTS];
/* The current counter index assigned to an event. When the
* event hasn't been programmed into the cpu yet, this will
* hold PIC_NO_INDEX. The event->hw.idx value tells us where
* we ought to schedule the event.
*/
int current_idx[MAX_HWEVENTS];
/* Software copy of %pcr register(s) on this cpu. */
u64 pcr[MAX_HWEVENTS];
/* Enabled/disable state. */
int enabled;
unsigned int txn_flags;
};
static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, };
/* An event map describes the characteristics of a performance
* counter event. In particular it gives the encoding as well as
* a mask telling which counters the event can be measured on.
*
* The mask is unused on SPARC-T4 and later.
*/
struct perf_event_map {
u16 encoding;
u8 pic_mask;
#define PIC_NONE 0x00
#define PIC_UPPER 0x01
#define PIC_LOWER 0x02
};
/* Encode a perf_event_map entry into a long. */
static unsigned long perf_event_encode(const struct perf_event_map *pmap)
{
return ((unsigned long) pmap->encoding << 16) | pmap->pic_mask;
}
static u8 perf_event_get_msk(unsigned long val)
{
return val & 0xff;
}
static u64 perf_event_get_enc(unsigned long val)
{
return val >> 16;
}
#define C(x) PERF_COUNT_HW_CACHE_##x
#define CACHE_OP_UNSUPPORTED 0xfffe
#define CACHE_OP_NONSENSE 0xffff
typedef struct perf_event_map cache_map_t
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX];
struct sparc_pmu {
const struct perf_event_map *(*event_map)(int);
const cache_map_t *cache_map;
int max_events;
u32 (*read_pmc)(int);
void (*write_pmc)(int, u64);
int upper_shift;
int lower_shift;
int event_mask;
int user_bit;
int priv_bit;
int hv_bit;
int irq_bit;
int upper_nop;
int lower_nop;
unsigned int flags;
#define SPARC_PMU_ALL_EXCLUDES_SAME 0x00000001
#define SPARC_PMU_HAS_CONFLICTS 0x00000002
int max_hw_events;
int num_pcrs;
int num_pic_regs;
};
static u32 sparc_default_read_pmc(int idx)
{
u64 val;
val = pcr_ops->read_pic(0);
if (idx == PIC_UPPER_INDEX)
val >>= 32;
return val & 0xffffffff;
}
static void sparc_default_write_pmc(int idx, u64 val)
{
u64 shift, mask, pic;
shift = 0;
if (idx == PIC_UPPER_INDEX)
shift = 32;
mask = ((u64) 0xffffffff) << shift;
val <<= shift;
pic = pcr_ops->read_pic(0);
pic &= ~mask;
pic |= val;
pcr_ops->write_pic(0, pic);
}
static const struct perf_event_map ultra3_perfmon_event_map[] = {
[PERF_COUNT_HW_CPU_CYCLES] = { 0x0000, PIC_UPPER | PIC_LOWER },
[PERF_COUNT_HW_INSTRUCTIONS] = { 0x0001, PIC_UPPER | PIC_LOWER },
[PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0009, PIC_LOWER },
[PERF_COUNT_HW_CACHE_MISSES] = { 0x0009, PIC_UPPER },
};
static const struct perf_event_map *ultra3_event_map(int event_id)
{
return &ultra3_perfmon_event_map[event_id];
}
static const cache_map_t ultra3_cache_map = {
[C(L1D)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, },
[C(RESULT_MISS)] = { 0x09, PIC_UPPER, },
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { 0x0a, PIC_LOWER },
[C(RESULT_MISS)] = { 0x0a, PIC_UPPER },
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
[C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
},
},
[C(L1I)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, },
[C(RESULT_MISS)] = { 0x09, PIC_UPPER, },
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
[ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE },
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
},
[C(LL)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER, },
[C(RESULT_MISS)] = { 0x0c, PIC_UPPER, },
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER },
[C(RESULT_MISS)] = { 0x0c, PIC_UPPER },
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
[C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
},
},
[C(DTLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
[C(RESULT_MISS)] = { 0x12, PIC_UPPER, },
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
},
[C(ITLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
[C(RESULT_MISS)] = { 0x11, PIC_UPPER, },
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
},
[C(BPU)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
[C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
},
[C(NODE)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
[C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
},
};
static const struct sparc_pmu ultra3_pmu = {
.event_map = ultra3_event_map,
.cache_map = &ultra3_cache_map,
.max_events = ARRAY_SIZE(ultra3_perfmon_event_map),
.read_pmc = sparc_default_read_pmc,
.write_pmc = sparc_default_write_pmc,
.upper_shift = 11,
.lower_shift = 4,
.event_mask = 0x3f,
.user_bit = PCR_UTRACE,
.priv_bit = PCR_STRACE,
.upper_nop = 0x1c,
.lower_nop = 0x14,
.flags = (SPARC_PMU_ALL_EXCLUDES_SAME |
SPARC_PMU_HAS_CONFLICTS),
.max_hw_events = 2,
.num_pcrs = 1,
.num_pic_regs = 1,
};
/* Niagara1 is very limited. The upper PIC is hard-locked to count
* only instructions, so it is free running which creates all kinds of
* problems. Some hardware designs make one wonder if the creator
* even looked at how this stuff gets used by software.
*/
static const struct perf_event_map niagara1_perfmon_event_map[] = {
[PERF_COUNT_HW_CPU_CYCLES] = { 0x00, PIC_UPPER },
[PERF_COUNT_HW_INSTRUCTIONS] = { 0x00, PIC_UPPER },
[PERF_COUNT_HW_CACHE_REFERENCES] = { 0, PIC_NONE },
[PERF_COUNT_HW_CACHE_MISSES] = { 0x03, PIC_LOWER },
};
static const struct perf_event_map *niagara1_event_map(int event_id)
{
return &niagara1_perfmon_event_map[event_id];
}
static const cache_map_t niagara1_cache_map = {
[C(L1D)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
[C(RESULT_MISS)] = { 0x03, PIC_LOWER, },
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
[C(RESULT_MISS)] = { 0x03, PIC_LOWER, },
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
[C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
},
},
[C(L1I)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 0x00, PIC_UPPER },
[C(RESULT_MISS)] = { 0x02, PIC_LOWER, },
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
[ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE },
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
},
[C(LL)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
[C(RESULT_MISS)] = { 0x07, PIC_LOWER, },
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
[C(RESULT_MISS)] = { 0x07, PIC_LOWER, },
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
[C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
},
},
[C(DTLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
[C(RESULT_MISS)] = { 0x05, PIC_LOWER, },
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
},
[C(ITLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
[C(RESULT_MISS)] = { 0x04, PIC_LOWER, },
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
},
[C(BPU)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
[C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
},
[C(NODE)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
[C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
},
};
static const struct sparc_pmu niagara1_pmu = {
.event_map = niagara1_event_map,
.cache_map = &niagara1_cache_map,
.max_events = ARRAY_SIZE(niagara1_perfmon_event_map),
.read_pmc = sparc_default_read_pmc,
.write_pmc = sparc_default_write_pmc,
.upper_shift = 0,
.lower_shift = 4,
.event_mask = 0x7,
.user_bit = PCR_UTRACE,
.priv_bit = PCR_STRACE,
.upper_nop = 0x0,
.lower_nop = 0x0,
.flags = (SPARC_PMU_ALL_EXCLUDES_SAME |
SPARC_PMU_HAS_CONFLICTS),
.max_hw_events = 2,
.num_pcrs = 1,
.num_pic_regs = 1,
};
static const struct perf_event_map niagara2_perfmon_event_map[] = {
[PERF_COUNT_HW_CPU_CYCLES] = { 0x02ff, PIC_UPPER | PIC_LOWER },
[PERF_COUNT_HW_INSTRUCTIONS] = { 0x02ff, PIC_UPPER | PIC_LOWER },
[PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0208, PIC_UPPER | PIC_LOWER },
[PERF_COUNT_HW_CACHE_MISSES] = { 0x0302, PIC_UPPER | PIC_LOWER },
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x0201, PIC_UPPER | PIC_LOWER },
[PERF_COUNT_HW_BRANCH_MISSES] = { 0x0202, PIC_UPPER | PIC_LOWER },
};
static const struct perf_event_map *niagara2_event_map(int event_id)
{
return &niagara2_perfmon_event_map[event_id];
}
static const cache_map_t niagara2_cache_map = {
[C(L1D)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, },
[C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, },
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, },
[C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, },
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
[C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
},
},
[C(L1I)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 0x02ff, PIC_UPPER | PIC_LOWER, },
[C(RESULT_MISS)] = { 0x0301, PIC_UPPER | PIC_LOWER, },
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
[ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE },
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
},
[C(LL)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, },
[C(RESULT_MISS)] = { 0x0330, PIC_UPPER | PIC_LOWER, },
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, },
[C(RESULT_MISS)] = { 0x0320, PIC_UPPER | PIC_LOWER, },
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
[C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
},
},
[C(DTLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
[C(RESULT_MISS)] = { 0x0b08, PIC_UPPER | PIC_LOWER, },
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
},
[C(ITLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
[C(RESULT_MISS)] = { 0xb04, PIC_UPPER | PIC_LOWER, },
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
},
[C(BPU)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
[C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
},
[C(NODE)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
[C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
},
};
static const struct sparc_pmu niagara2_pmu = {
.event_map = niagara2_event_map,
.cache_map = &niagara2_cache_map,
.max_events = ARRAY_SIZE(niagara2_perfmon_event_map),
.read_pmc = sparc_default_read_pmc,
.write_pmc = sparc_default_write_pmc,
.upper_shift = 19,
.lower_shift = 6,
.event_mask = 0xfff,
.user_bit = PCR_UTRACE,
.priv_bit = PCR_STRACE,
.hv_bit = PCR_N2_HTRACE,
.irq_bit = 0x30,
.upper_nop = 0x220,
.lower_nop = 0x220,
.flags = (SPARC_PMU_ALL_EXCLUDES_SAME |
SPARC_PMU_HAS_CONFLICTS),
.max_hw_events = 2,
.num_pcrs = 1,
.num_pic_regs = 1,
};
static const struct perf_event_map niagara4_perfmon_event_map[] = {
[PERF_COUNT_HW_CPU_CYCLES] = { (26 << 6) },
[PERF_COUNT_HW_INSTRUCTIONS] = { (3 << 6) | 0x3f },
[PERF_COUNT_HW_CACHE_REFERENCES] = { (3 << 6) | 0x04 },
[PERF_COUNT_HW_CACHE_MISSES] = { (16 << 6) | 0x07 },
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { (4 << 6) | 0x01 },
[PERF_COUNT_HW_BRANCH_MISSES] = { (25 << 6) | 0x0f },
};
static const struct perf_event_map *niagara4_event_map(int event_id)
{
return &niagara4_perfmon_event_map[event_id];
}
static const cache_map_t niagara4_cache_map = {
[C(L1D)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { (3 << 6) | 0x04 },
[C(RESULT_MISS)] = { (16 << 6) | 0x07 },
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { (3 << 6) | 0x08 },
[C(RESULT_MISS)] = { (16 << 6) | 0x07 },
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
[C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
},
},
[C(L1I)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { (3 << 6) | 0x3f },
[C(RESULT_MISS)] = { (11 << 6) | 0x03 },
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
[ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE },
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
},
[C(LL)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { (3 << 6) | 0x04 },
[C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { (3 << 6) | 0x08 },
[C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
[C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
},
},
[C(DTLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
[C(RESULT_MISS)] = { (17 << 6) | 0x3f },
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
},
[C(ITLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
[C(RESULT_MISS)] = { (6 << 6) | 0x3f },
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
},
[C(BPU)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
[C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
},
[C(NODE)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
[C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
},
};
static u32 sparc_vt_read_pmc(int idx)
{
u64 val = pcr_ops->read_pic(idx);
return val & 0xffffffff;
}
static void sparc_vt_write_pmc(int idx, u64 val)
{
u64 pcr;
pcr = pcr_ops->read_pcr(idx);
/* ensure ov and ntc are reset */
pcr &= ~(PCR_N4_OV | PCR_N4_NTC);
pcr_ops->write_pic(idx, val & 0xffffffff);
pcr_ops->write_pcr(idx, pcr);
}
static const struct sparc_pmu niagara4_pmu = {
.event_map = niagara4_event_map,
.cache_map = &niagara4_cache_map,
.max_events = ARRAY_SIZE(niagara4_perfmon_event_map),
.read_pmc = sparc_vt_read_pmc,
.write_pmc = sparc_vt_write_pmc,
.upper_shift = 5,
.lower_shift = 5,
.event_mask = 0x7ff,
.user_bit = PCR_N4_UTRACE,
.priv_bit = PCR_N4_STRACE,
/* We explicitly don't support hypervisor tracing. The T4
* generates the overflow event for precise events via a trap
* which will not be generated (ie. it's completely lost) if
* we happen to be in the hypervisor when the event triggers.
* Essentially, the overflow event reporting is completely
* unusable when you have hypervisor mode tracing enabled.
*/
.hv_bit = 0,
.irq_bit = PCR_N4_TOE,
.upper_nop = 0,
.lower_nop = 0,
.flags = 0,
.max_hw_events = 4,
.num_pcrs = 4,
.num_pic_regs = 4,
};
static const struct sparc_pmu sparc_m7_pmu = {
.event_map = niagara4_event_map,
.cache_map = &niagara4_cache_map,
.max_events = ARRAY_SIZE(niagara4_perfmon_event_map),
.read_pmc = sparc_vt_read_pmc,
.write_pmc = sparc_vt_write_pmc,
.upper_shift = 5,
.lower_shift = 5,
.event_mask = 0x7ff,
.user_bit = PCR_N4_UTRACE,
.priv_bit = PCR_N4_STRACE,
/* We explicitly don't support hypervisor tracing. */
.hv_bit = 0,
.irq_bit = PCR_N4_TOE,
.upper_nop = 0,
.lower_nop = 0,
.flags = 0,
.max_hw_events = 4,
.num_pcrs = 4,
.num_pic_regs = 4,
};
static const struct sparc_pmu *sparc_pmu __read_mostly;
static u64 event_encoding(u64 event_id, int idx)
{
if (idx == PIC_UPPER_INDEX)
event_id <<= sparc_pmu->upper_shift;
else
event_id <<= sparc_pmu->lower_shift;
return event_id;
}
static u64 mask_for_index(int idx)
{
return event_encoding(sparc_pmu->event_mask, idx);
}
static u64 nop_for_index(int idx)
{
return event_encoding(idx == PIC_UPPER_INDEX ?
sparc_pmu->upper_nop :
sparc_pmu->lower_nop, idx);
}
static inline void sparc_pmu_enable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx)
{
u64 enc, val, mask = mask_for_index(idx);
int pcr_index = 0;
if (sparc_pmu->num_pcrs > 1)
pcr_index = idx;
enc = perf_event_get_enc(cpuc->events[idx]);
val = cpuc->pcr[pcr_index];
val &= ~mask;
val |= event_encoding(enc, idx);
cpuc->pcr[pcr_index] = val;
pcr_ops->write_pcr(pcr_index, cpuc->pcr[pcr_index]);
}
static inline void sparc_pmu_disable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx)
{
u64 mask = mask_for_index(idx);
u64 nop = nop_for_index(idx);
int pcr_index = 0;
u64 val;
if (sparc_pmu->num_pcrs > 1)
pcr_index = idx;
val = cpuc->pcr[pcr_index];
val &= ~mask;
val |= nop;
cpuc->pcr[pcr_index] = val;
pcr_ops->write_pcr(pcr_index, cpuc->pcr[pcr_index]);
}
static u64 sparc_perf_event_update(struct perf_event *event,
struct hw_perf_event *hwc, int idx)
{
int shift = 64 - 32;
u64 prev_raw_count, new_raw_count;
s64 delta;
again:
prev_raw_count = local64_read(&hwc->prev_count);
new_raw_count = sparc_pmu->read_pmc(idx);
if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
new_raw_count) != prev_raw_count)
goto again;
delta = (new_raw_count << shift) - (prev_raw_count << shift);
delta >>= shift;
local64_add(delta, &event->count);
local64_sub(delta, &hwc->period_left);
return new_raw_count;
}
static int sparc_perf_event_set_period(struct perf_event *event,
struct hw_perf_event *hwc, int idx)
{
s64 left = local64_read(&hwc->period_left);
s64 period = hwc->sample_period;
int ret = 0;
/* The period may have been changed by PERF_EVENT_IOC_PERIOD */
if (unlikely(period != hwc->last_period))
left = period - (hwc->last_period - left);
if (unlikely(left <= -period)) {
left = period;
local64_set(&hwc->period_left, left);
hwc->last_period = period;
ret = 1;
}
if (unlikely(left <= 0)) {
left += period;
local64_set(&hwc->period_left, left);
hwc->last_period = period;
ret = 1;
}
if (left > MAX_PERIOD)
left = MAX_PERIOD;
local64_set(&hwc->prev_count, (u64)-left);
sparc_pmu->write_pmc(idx, (u64)(-left) & 0xffffffff);
perf_event_update_userpage(event);
return ret;
}
static void read_in_all_counters(struct cpu_hw_events *cpuc)
{
int i;
for (i = 0; i < cpuc->n_events; i++) {
struct perf_event *cp = cpuc->event[i];
if (cpuc->current_idx[i] != PIC_NO_INDEX &&
cpuc->current_idx[i] != cp->hw.idx) {
sparc_perf_event_update(cp, &cp->hw,
cpuc->current_idx[i]);
cpuc->current_idx[i] = PIC_NO_INDEX;
if (cp->hw.state & PERF_HES_STOPPED)
cp->hw.state |= PERF_HES_ARCH;
}
}
}
/* On this PMU all PICs are programmed using a single PCR. Calculate
* the combined control register value.
*
* For such chips we require that all of the events have the same
* configuration, so just fetch the settings from the first entry.
*/
static void calculate_single_pcr(struct cpu_hw_events *cpuc)
{
int i;
if (!cpuc->n_added)
goto out;
/* Assign to counters all unassigned events. */
for (i = 0; i < cpuc->n_events; i++) {
struct perf_event *cp = cpuc->event[i];
struct hw_perf_event *hwc = &cp->hw;
int idx = hwc->idx;
u64 enc;
if (cpuc->current_idx[i] != PIC_NO_INDEX)
continue;
sparc_perf_event_set_period(cp, hwc, idx);
cpuc->current_idx[i] = idx;
enc = perf_event_get_enc(cpuc->events[i]);
cpuc->pcr[0] &= ~mask_for_index(idx);
if (hwc->state & PERF_HES_ARCH) {
cpuc->pcr[0] |= nop_for_index(idx);
} else {
cpuc->pcr[0] |= event_encoding(enc, idx);
hwc->state = 0;
}
}
out:
cpuc->pcr[0] |= cpuc->event[0]->hw.config_base;
}
static void sparc_pmu_start(struct perf_event *event, int flags);
/* On this PMU each PIC has it's own PCR control register. */
static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc)
{
int i;
if (!cpuc->n_added)
goto out;
for (i = 0; i < cpuc->n_events; i++) {
struct perf_event *cp = cpuc->event[i];
struct hw_perf_event *hwc = &cp->hw;
int idx = hwc->idx;
if (cpuc->current_idx[i] != PIC_NO_INDEX)
continue;
cpuc->current_idx[i] = idx;
if (cp->hw.state & PERF_HES_ARCH)
continue;
sparc_pmu_start(cp, PERF_EF_RELOAD);
}
out:
for (i = 0; i < cpuc->n_events; i++) {
struct perf_event *cp = cpuc->event[i];
int idx = cp->hw.idx;
cpuc->pcr[idx] |= cp->hw.config_base;
}
}
/* If performance event entries have been added, move existing events
* around (if necessary) and then assign new entries to counters.
*/
static void update_pcrs_for_enable(struct cpu_hw_events *cpuc)
{
if (cpuc->n_added)
read_in_all_counters(cpuc);
if (sparc_pmu->num_pcrs == 1) {
calculate_single_pcr(cpuc);
} else {
calculate_multiple_pcrs(cpuc);
}
}
static void sparc_pmu_enable(struct pmu *pmu)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int i;
if (cpuc->enabled)
return;
cpuc->enabled = 1;
barrier();
if (cpuc->n_events)
update_pcrs_for_enable(cpuc);
for (i = 0; i < sparc_pmu->num_pcrs; i++)
pcr_ops->write_pcr(i, cpuc->pcr[i]);
}
static void sparc_pmu_disable(struct pmu *pmu)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int i;
if (!cpuc->enabled)
return;
cpuc->enabled = 0;
cpuc->n_added = 0;
for (i = 0; i < sparc_pmu->num_pcrs; i++) {
u64 val = cpuc->pcr[i];
val &= ~(sparc_pmu->user_bit | sparc_pmu->priv_bit |
sparc_pmu->hv_bit | sparc_pmu->irq_bit);
cpuc->pcr[i] = val;
pcr_ops->write_pcr(i, cpuc->pcr[i]);
}
}
static int active_event_index(struct cpu_hw_events *cpuc,
struct perf_event *event)
{
int i;
for (i = 0; i < cpuc->n_events; i++) {
if (cpuc->event[i] == event)
break;
}
BUG_ON(i == cpuc->n_events);
return cpuc->current_idx[i];
}
static void sparc_pmu_start(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int idx = active_event_index(cpuc, event);
if (flags & PERF_EF_RELOAD) {
WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
sparc_perf_event_set_period(event, &event->hw, idx);
}
event->hw.state = 0;
sparc_pmu_enable_event(cpuc, &event->hw, idx);
perf_event_update_userpage(event);
}
static void sparc_pmu_stop(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int idx = active_event_index(cpuc, event);
if (!(event->hw.state & PERF_HES_STOPPED)) {
sparc_pmu_disable_event(cpuc, &event->hw, idx);
event->hw.state |= PERF_HES_STOPPED;
}
if (!(event->hw.state & PERF_HES_UPTODATE) && (flags & PERF_EF_UPDATE)) {
sparc_perf_event_update(event, &event->hw, idx);
event->hw.state |= PERF_HES_UPTODATE;
}
}
static void sparc_pmu_del(struct perf_event *event, int _flags)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
unsigned long flags;
int i;
local_irq_save(flags);
for (i = 0; i < cpuc->n_events; i++) {
if (event == cpuc->event[i]) {
/* Absorb the final count and turn off the
* event.
*/
sparc_pmu_stop(event, PERF_EF_UPDATE);
/* Shift remaining entries down into
* the existing slot.
*/
while (++i < cpuc->n_events) {
cpuc->event[i - 1] = cpuc->event[i];
cpuc->events[i - 1] = cpuc->events[i];
cpuc->current_idx[i - 1] =
cpuc->current_idx[i];
}
perf_event_update_userpage(event);
cpuc->n_events--;
break;
}
}
local_irq_restore(flags);
}
static void sparc_pmu_read(struct perf_event *event)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int idx = active_event_index(cpuc, event);
struct hw_perf_event *hwc = &event->hw;
sparc_perf_event_update(event, hwc, idx);
}
static atomic_t active_events = ATOMIC_INIT(0);
static DEFINE_MUTEX(pmc_grab_mutex);
static void perf_stop_nmi_watchdog(void *unused)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int i;
stop_nmi_watchdog(NULL);
for (i = 0; i < sparc_pmu->num_pcrs; i++)
cpuc->pcr[i] = pcr_ops->read_pcr(i);
}
static void perf_event_grab_pmc(void)
{
if (atomic_inc_not_zero(&active_events))
return;
mutex_lock(&pmc_grab_mutex);
if (atomic_read(&active_events) == 0) {
if (atomic_read(&nmi_active) > 0) {
on_each_cpu(perf_stop_nmi_watchdog, NULL, 1);
BUG_ON(atomic_read(&nmi_active) != 0);
}
atomic_inc(&active_events);
}
mutex_unlock(&pmc_grab_mutex);
}
static void perf_event_release_pmc(void)
{
if (atomic_dec_and_mutex_lock(&active_events, &pmc_grab_mutex)) {
if (atomic_read(&nmi_active) == 0)
on_each_cpu(start_nmi_watchdog, NULL, 1);
mutex_unlock(&pmc_grab_mutex);
}
}
static const struct perf_event_map *sparc_map_cache_event(u64 config)
{
unsigned int cache_type, cache_op, cache_result;
const struct perf_event_map *pmap;
if (!sparc_pmu->cache_map)
return ERR_PTR(-ENOENT);
cache_type = (config >> 0) & 0xff;
if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
return ERR_PTR(-EINVAL);
cache_op = (config >> 8) & 0xff;
if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
return ERR_PTR(-EINVAL);
cache_result = (config >> 16) & 0xff;
if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
return ERR_PTR(-EINVAL);
pmap = &((*sparc_pmu->cache_map)[cache_type][cache_op][cache_result]);
if (pmap->encoding == CACHE_OP_UNSUPPORTED)
return ERR_PTR(-ENOENT);
if (pmap->encoding == CACHE_OP_NONSENSE)
return ERR_PTR(-EINVAL);
return pmap;
}
static void hw_perf_event_destroy(struct perf_event *event)
{
perf_event_release_pmc();
}
/* Make sure all events can be scheduled into the hardware at
* the same time. This is simplified by the fact that we only
* need to support 2 simultaneous HW events.
*
* As a side effect, the evts[]->hw.idx values will be assigned
* on success. These are pending indexes. When the events are
* actually programmed into the chip, these values will propagate
* to the per-cpu cpuc->current_idx[] slots, see the code in
* maybe_change_configuration() for details.
*/
static int sparc_check_constraints(struct perf_event **evts,
unsigned long *events, int n_ev)
{
u8 msk0 = 0, msk1 = 0;
int idx0 = 0;
/* This case is possible when we are invoked from
* hw_perf_group_sched_in().
*/
if (!n_ev)
return 0;
if (n_ev > sparc_pmu->max_hw_events)
return -1;
if (!(sparc_pmu->flags & SPARC_PMU_HAS_CONFLICTS)) {
int i;
for (i = 0; i < n_ev; i++)
evts[i]->hw.idx = i;
return 0;
}
msk0 = perf_event_get_msk(events[0]);
if (n_ev == 1) {
if (msk0 & PIC_LOWER)
idx0 = 1;
goto success;
}
BUG_ON(n_ev != 2);
msk1 = perf_event_get_msk(events[1]);
/* If both events can go on any counter, OK. */
if (msk0 == (PIC_UPPER | PIC_LOWER) &&
msk1 == (PIC_UPPER | PIC_LOWER))
goto success;
/* If one event is limited to a specific counter,
* and the other can go on both, OK.
*/
if ((msk0 == PIC_UPPER || msk0 == PIC_LOWER) &&
msk1 == (PIC_UPPER | PIC_LOWER)) {
if (msk0 & PIC_LOWER)
idx0 = 1;
goto success;
}
if ((msk1 == PIC_UPPER || msk1 == PIC_LOWER) &&
msk0 == (PIC_UPPER | PIC_LOWER)) {
if (msk1 & PIC_UPPER)
idx0 = 1;
goto success;
}
/* If the events are fixed to different counters, OK. */
if ((msk0 == PIC_UPPER && msk1 == PIC_LOWER) ||
(msk0 == PIC_LOWER && msk1 == PIC_UPPER)) {
if (msk0 & PIC_LOWER)
idx0 = 1;
goto success;
}
/* Otherwise, there is a conflict. */
return -1;
success:
evts[0]->hw.idx = idx0;
if (n_ev == 2)
evts[1]->hw.idx = idx0 ^ 1;
return 0;
}
static int check_excludes(struct perf_event **evts, int n_prev, int n_new)
{
int eu = 0, ek = 0, eh = 0;
struct perf_event *event;
int i, n, first;
if (!(sparc_pmu->flags & SPARC_PMU_ALL_EXCLUDES_SAME))
return 0;
n = n_prev + n_new;
if (n <= 1)
return 0;
first = 1;
for (i = 0; i < n; i++) {
event = evts[i];
if (first) {
eu = event->attr.exclude_user;
ek = event->attr.exclude_kernel;
eh = event->attr.exclude_hv;
first = 0;
} else if (event->attr.exclude_user != eu ||
event->attr.exclude_kernel != ek ||
event->attr.exclude_hv != eh) {
return -EAGAIN;
}
}
return 0;
}
static int collect_events(struct perf_event *group, int max_count,
struct perf_event *evts[], unsigned long *events,
int *current_idx)
{
struct perf_event *event;
int n = 0;
if (!is_software_event(group)) {
if (n >= max_count)
return -1;
evts[n] = group;
events[n] = group->hw.event_base;
current_idx[n++] = PIC_NO_INDEX;
}
for_each_sibling_event(event, group) {
if (!is_software_event(event) &&
event->state != PERF_EVENT_STATE_OFF) {
if (n >= max_count)
return -1;
evts[n] = event;
events[n] = event->hw.event_base;
current_idx[n++] = PIC_NO_INDEX;
}
}
return n;
}
static int sparc_pmu_add(struct perf_event *event, int ef_flags)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int n0, ret = -EAGAIN;
unsigned long flags;
local_irq_save(flags);
n0 = cpuc->n_events;
if (n0 >= sparc_pmu->max_hw_events)
goto out;
cpuc->event[n0] = event;
cpuc->events[n0] = event->hw.event_base;
cpuc->current_idx[n0] = PIC_NO_INDEX;
event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
if (!(ef_flags & PERF_EF_START))
event->hw.state |= PERF_HES_ARCH;
/*
* If group events scheduling transaction was started,
* skip the schedulability test here, it will be performed
* at commit time(->commit_txn) as a whole
*/
if (cpuc->txn_flags & PERF_PMU_TXN_ADD)
goto nocheck;
if (check_excludes(cpuc->event, n0, 1))
goto out;
if (sparc_check_constraints(cpuc->event, cpuc->events, n0 + 1))
goto out;
nocheck:
cpuc->n_events++;
cpuc->n_added++;
ret = 0;
out:
local_irq_restore(flags);
return ret;
}
static int sparc_pmu_event_init(struct perf_event *event)
{
struct perf_event_attr *attr = &event->attr;
struct perf_event *evts[MAX_HWEVENTS];
struct hw_perf_event *hwc = &event->hw;
unsigned long events[MAX_HWEVENTS];
int current_idx_dmy[MAX_HWEVENTS];
const struct perf_event_map *pmap;
int n;
if (atomic_read(&nmi_active) < 0)
return -ENODEV;
/* does not support taken branch sampling */
if (has_branch_stack(event))
return -EOPNOTSUPP;
switch (attr->type) {
case PERF_TYPE_HARDWARE:
if (attr->config >= sparc_pmu->max_events)
return -EINVAL;
pmap = sparc_pmu->event_map(attr->config);
break;
case PERF_TYPE_HW_CACHE:
pmap = sparc_map_cache_event(attr->config);
if (IS_ERR(pmap))
return PTR_ERR(pmap);
break;
case PERF_TYPE_RAW:
pmap = NULL;
break;
default:
return -ENOENT;
}
if (pmap) {
hwc->event_base = perf_event_encode(pmap);
} else {
/*
* User gives us "(encoding << 16) | pic_mask" for
* PERF_TYPE_RAW events.
*/
hwc->event_base = attr->config;
}
/* We save the enable bits in the config_base. */
hwc->config_base = sparc_pmu->irq_bit;
if (!attr->exclude_user)
hwc->config_base |= sparc_pmu->user_bit;
if (!attr->exclude_kernel)
hwc->config_base |= sparc_pmu->priv_bit;
if (!attr->exclude_hv)
hwc->config_base |= sparc_pmu->hv_bit;
n = 0;
if (event->group_leader != event) {
n = collect_events(event->group_leader,
sparc_pmu->max_hw_events - 1,
evts, events, current_idx_dmy);
if (n < 0)
return -EINVAL;
}
events[n] = hwc->event_base;
evts[n] = event;
if (check_excludes(evts, n, 1))
return -EINVAL;
if (sparc_check_constraints(evts, events, n + 1))
return -EINVAL;
hwc->idx = PIC_NO_INDEX;
/* Try to do all error checking before this point, as unwinding
* state after grabbing the PMC is difficult.
*/
perf_event_grab_pmc();
event->destroy = hw_perf_event_destroy;
if (!hwc->sample_period) {
hwc->sample_period = MAX_PERIOD;
hwc->last_period = hwc->sample_period;
local64_set(&hwc->period_left, hwc->sample_period);
}
return 0;
}
/*
* Start group events scheduling transaction
* Set the flag to make pmu::enable() not perform the
* schedulability test, it will be performed at commit time
*/
static void sparc_pmu_start_txn(struct pmu *pmu, unsigned int txn_flags)
{
struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
WARN_ON_ONCE(cpuhw->txn_flags); /* txn already in flight */
cpuhw->txn_flags = txn_flags;
if (txn_flags & ~PERF_PMU_TXN_ADD)
return;
perf_pmu_disable(pmu);
}
/*
* Stop group events scheduling transaction
* Clear the flag and pmu::enable() will perform the
* schedulability test.
*/
static void sparc_pmu_cancel_txn(struct pmu *pmu)
{
struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
unsigned int txn_flags;
WARN_ON_ONCE(!cpuhw->txn_flags); /* no txn in flight */
txn_flags = cpuhw->txn_flags;
cpuhw->txn_flags = 0;
if (txn_flags & ~PERF_PMU_TXN_ADD)
return;
perf_pmu_enable(pmu);
}
/*
* Commit group events scheduling transaction
* Perform the group schedulability test as a whole
* Return 0 if success
*/
static int sparc_pmu_commit_txn(struct pmu *pmu)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int n;
if (!sparc_pmu)
return -EINVAL;
WARN_ON_ONCE(!cpuc->txn_flags); /* no txn in flight */
if (cpuc->txn_flags & ~PERF_PMU_TXN_ADD) {
cpuc->txn_flags = 0;
return 0;
}
n = cpuc->n_events;
if (check_excludes(cpuc->event, 0, n))
return -EINVAL;
if (sparc_check_constraints(cpuc->event, cpuc->events, n))
return -EAGAIN;
cpuc->txn_flags = 0;
perf_pmu_enable(pmu);
return 0;
}
static struct pmu pmu = {
.pmu_enable = sparc_pmu_enable,
.pmu_disable = sparc_pmu_disable,
.event_init = sparc_pmu_event_init,
.add = sparc_pmu_add,
.del = sparc_pmu_del,
.start = sparc_pmu_start,
.stop = sparc_pmu_stop,
.read = sparc_pmu_read,
.start_txn = sparc_pmu_start_txn,
.cancel_txn = sparc_pmu_cancel_txn,
.commit_txn = sparc_pmu_commit_txn,
};
void perf_event_print_debug(void)
{
unsigned long flags;
int cpu, i;
if (!sparc_pmu)
return;
local_irq_save(flags);
cpu = smp_processor_id();
pr_info("\n");
for (i = 0; i < sparc_pmu->num_pcrs; i++)
pr_info("CPU#%d: PCR%d[%016llx]\n",
cpu, i, pcr_ops->read_pcr(i));
for (i = 0; i < sparc_pmu->num_pic_regs; i++)
pr_info("CPU#%d: PIC%d[%016llx]\n",
cpu, i, pcr_ops->read_pic(i));
local_irq_restore(flags);
}
static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
unsigned long cmd, void *__args)
{
struct die_args *args = __args;
struct perf_sample_data data;
struct cpu_hw_events *cpuc;
struct pt_regs *regs;
u64 finish_clock;
u64 start_clock;
int i;
if (!atomic_read(&active_events))
return NOTIFY_DONE;
switch (cmd) {
case DIE_NMI:
break;
default:
return NOTIFY_DONE;
}
start_clock = sched_clock();
regs = args->regs;
cpuc = this_cpu_ptr(&cpu_hw_events);
/* If the PMU has the TOE IRQ enable bits, we need to do a
* dummy write to the %pcr to clear the overflow bits and thus
* the interrupt.
*
* Do this before we peek at the counters to determine
* overflow so we don't lose any events.
*/
if (sparc_pmu->irq_bit &&
sparc_pmu->num_pcrs == 1)
pcr_ops->write_pcr(0, cpuc->pcr[0]);
for (i = 0; i < cpuc->n_events; i++) {
struct perf_event *event = cpuc->event[i];
int idx = cpuc->current_idx[i];
struct hw_perf_event *hwc;
u64 val;
if (sparc_pmu->irq_bit &&
sparc_pmu->num_pcrs > 1)
pcr_ops->write_pcr(idx, cpuc->pcr[idx]);
hwc = &event->hw;
val = sparc_perf_event_update(event, hwc, idx);
if (val & (1ULL << 31))
continue;
perf_sample_data_init(&data, 0, hwc->last_period);
if (!sparc_perf_event_set_period(event, hwc, idx))
continue;
if (perf_event_overflow(event, &data, regs))
sparc_pmu_stop(event, 0);
}
finish_clock = sched_clock();
perf_sample_event_took(finish_clock - start_clock);
return NOTIFY_STOP;
}
static __read_mostly struct notifier_block perf_event_nmi_notifier = {
.notifier_call = perf_event_nmi_handler,
};
static bool __init supported_pmu(void)
{
if (!strcmp(sparc_pmu_type, "ultra3") ||
!strcmp(sparc_pmu_type, "ultra3+") ||
!strcmp(sparc_pmu_type, "ultra3i") ||
!strcmp(sparc_pmu_type, "ultra4+")) {
sparc_pmu = &ultra3_pmu;
return true;
}
if (!strcmp(sparc_pmu_type, "niagara")) {
sparc_pmu = &niagara1_pmu;
return true;
}
if (!strcmp(sparc_pmu_type, "niagara2") ||
!strcmp(sparc_pmu_type, "niagara3")) {
sparc_pmu = &niagara2_pmu;
return true;
}
if (!strcmp(sparc_pmu_type, "niagara4") ||
!strcmp(sparc_pmu_type, "niagara5")) {
sparc_pmu = &niagara4_pmu;
return true;
}
if (!strcmp(sparc_pmu_type, "sparc-m7")) {
sparc_pmu = &sparc_m7_pmu;
return true;
}
return false;
}
static int __init init_hw_perf_events(void)
{
int err;
pr_info("Performance events: ");
err = pcr_arch_init();
if (err || !supported_pmu()) {
pr_cont("No support for PMU type '%s'\n", sparc_pmu_type);
return 0;
}
pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type);
perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
register_die_notifier(&perf_event_nmi_notifier);
return 0;
}
pure_initcall(init_hw_perf_events);
void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs)
{
unsigned long ksp, fp;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
int graph = 0;
#endif
stack_trace_flush();
perf_callchain_store(entry, regs->tpc);
ksp = regs->u_regs[UREG_I6];
fp = ksp + STACK_BIAS;
do {
struct sparc_stackf *sf;
struct pt_regs *regs;
unsigned long pc;
if (!kstack_valid(current_thread_info(), fp))
break;
sf = (struct sparc_stackf *) fp;
regs = (struct pt_regs *) (sf + 1);
if (kstack_is_trap_frame(current_thread_info(), regs)) {
if (user_mode(regs))
break;
pc = regs->tpc;
fp = regs->u_regs[UREG_I6] + STACK_BIAS;
} else {
pc = sf->callers_pc;
fp = (unsigned long)sf->fp + STACK_BIAS;
}
perf_callchain_store(entry, pc);
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
if ((pc + 8UL) == (unsigned long) &return_to_handler) {
struct ftrace_ret_stack *ret_stack;
ret_stack = ftrace_graph_get_ret_stack(current,
graph);
if (ret_stack) {
pc = ret_stack->ret;
perf_callchain_store(entry, pc);
graph++;
}
}
#endif
} while (entry->nr < entry->max_stack);
}
static inline int
valid_user_frame(const void __user *fp, unsigned long size)
{
/* addresses should be at least 4-byte aligned */
if (((unsigned long) fp) & 3)
return 0;
return (__range_not_ok(fp, size, TASK_SIZE) == 0);
}
static void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs)
{
unsigned long ufp;
ufp = regs->u_regs[UREG_FP] + STACK_BIAS;
do {
struct sparc_stackf __user *usf;
struct sparc_stackf sf;
unsigned long pc;
usf = (struct sparc_stackf __user *)ufp;
if (!valid_user_frame(usf, sizeof(sf)))
break;
if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
break;
pc = sf.callers_pc;
ufp = (unsigned long)sf.fp + STACK_BIAS;
perf_callchain_store(entry, pc);
} while (entry->nr < entry->max_stack);
}
static void perf_callchain_user_32(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs)
{
unsigned long ufp;
ufp = regs->u_regs[UREG_FP] & 0xffffffffUL;
do {
unsigned long pc;
if (thread32_stack_is_64bit(ufp)) {
struct sparc_stackf __user *usf;
struct sparc_stackf sf;
ufp += STACK_BIAS;
usf = (struct sparc_stackf __user *)ufp;
if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
break;
pc = sf.callers_pc & 0xffffffff;
ufp = ((unsigned long) sf.fp) & 0xffffffff;
} else {
struct sparc_stackf32 __user *usf;
struct sparc_stackf32 sf;
usf = (struct sparc_stackf32 __user *)ufp;
if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
break;
pc = sf.callers_pc;
ufp = (unsigned long)sf.fp;
}
perf_callchain_store(entry, pc);
} while (entry->nr < entry->max_stack);
}
void
perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
{
u64 saved_fault_address = current_thread_info()->fault_address;
u8 saved_fault_code = get_thread_fault_code();
perf_callchain_store(entry, regs->tpc);
if (!current->mm)
return;
flushw_user();
pagefault_disable();
if (test_thread_flag(TIF_32BIT))
perf_callchain_user_32(entry, regs);
else
perf_callchain_user_64(entry, regs);
pagefault_enable();
set_thread_fault_code(saved_fault_code);
current_thread_info()->fault_address = saved_fault_address;
}
| linux-master | arch/sparc/kernel/perf_event.c |
// SPDX-License-Identifier: GPL-2.0
/* linux/arch/sparc/kernel/time.c
*
* Copyright (C) 1995 David S. Miller ([email protected])
* Copyright (C) 1996 Thomas K. Dyas ([email protected])
*
* Chris Davis ([email protected]) 03/27/1998
* Added support for the intersil on the sun4/4200
*
* Gleb Raiko ([email protected]) 08/18/1998
* Support for MicroSPARC-IIep, PCI CPU.
*
* This file handles the Sparc specific time handling details.
*
* 1997-09-10 Updated NTP code according to technical memorandum Jan '96
* "A Kernel Model for Precision Timekeeping" by Dave Mills
*/
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/time.h>
#include <linux/rtc/m48t59.h>
#include <linux/timex.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/ioport.h>
#include <linux/profile.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <asm/mc146818rtc.h>
#include <asm/oplib.h>
#include <asm/timex.h>
#include <asm/timer.h>
#include <asm/irq.h>
#include <asm/io.h>
#include <asm/idprom.h>
#include <asm/page.h>
#include <asm/pcic.h>
#include <asm/irq_regs.h>
#include <asm/setup.h>
#include "kernel.h"
#include "irq.h"
static __cacheline_aligned_in_smp DEFINE_SEQLOCK(timer_cs_lock);
static __volatile__ u64 timer_cs_internal_counter = 0;
static char timer_cs_enabled = 0;
static struct clock_event_device timer_ce;
static char timer_ce_enabled = 0;
#ifdef CONFIG_SMP
DEFINE_PER_CPU(struct clock_event_device, sparc32_clockevent);
#endif
DEFINE_SPINLOCK(rtc_lock);
EXPORT_SYMBOL(rtc_lock);
unsigned long profile_pc(struct pt_regs *regs)
{
extern char __copy_user_begin[], __copy_user_end[];
extern char __bzero_begin[], __bzero_end[];
unsigned long pc = regs->pc;
if (in_lock_functions(pc) ||
(pc >= (unsigned long) __copy_user_begin &&
pc < (unsigned long) __copy_user_end) ||
(pc >= (unsigned long) __bzero_begin &&
pc < (unsigned long) __bzero_end))
pc = regs->u_regs[UREG_RETPC];
return pc;
}
EXPORT_SYMBOL(profile_pc);
volatile u32 __iomem *master_l10_counter;
irqreturn_t notrace timer_interrupt(int dummy, void *dev_id)
{
if (timer_cs_enabled) {
write_seqlock(&timer_cs_lock);
timer_cs_internal_counter++;
sparc_config.clear_clock_irq();
write_sequnlock(&timer_cs_lock);
} else {
sparc_config.clear_clock_irq();
}
if (timer_ce_enabled)
timer_ce.event_handler(&timer_ce);
return IRQ_HANDLED;
}
static int timer_ce_shutdown(struct clock_event_device *evt)
{
timer_ce_enabled = 0;
smp_mb();
return 0;
}
static int timer_ce_set_periodic(struct clock_event_device *evt)
{
timer_ce_enabled = 1;
smp_mb();
return 0;
}
static __init void setup_timer_ce(void)
{
struct clock_event_device *ce = &timer_ce;
BUG_ON(smp_processor_id() != boot_cpu_id);
ce->name = "timer_ce";
ce->rating = 100;
ce->features = CLOCK_EVT_FEAT_PERIODIC;
ce->set_state_shutdown = timer_ce_shutdown;
ce->set_state_periodic = timer_ce_set_periodic;
ce->tick_resume = timer_ce_set_periodic;
ce->cpumask = cpu_possible_mask;
ce->shift = 32;
ce->mult = div_sc(sparc_config.clock_rate, NSEC_PER_SEC,
ce->shift);
clockevents_register_device(ce);
}
static unsigned int sbus_cycles_offset(void)
{
u32 val, offset;
val = sbus_readl(master_l10_counter);
offset = (val >> TIMER_VALUE_SHIFT) & TIMER_VALUE_MASK;
/* Limit hit? */
if (val & TIMER_LIMIT_BIT)
offset += sparc_config.cs_period;
return offset;
}
static u64 timer_cs_read(struct clocksource *cs)
{
unsigned int seq, offset;
u64 cycles;
do {
seq = read_seqbegin(&timer_cs_lock);
cycles = timer_cs_internal_counter;
offset = sparc_config.get_cycles_offset();
} while (read_seqretry(&timer_cs_lock, seq));
/* Count absolute cycles */
cycles *= sparc_config.cs_period;
cycles += offset;
return cycles;
}
static struct clocksource timer_cs = {
.name = "timer_cs",
.rating = 100,
.read = timer_cs_read,
.mask = CLOCKSOURCE_MASK(64),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static __init int setup_timer_cs(void)
{
timer_cs_enabled = 1;
return clocksource_register_hz(&timer_cs, sparc_config.clock_rate);
}
#ifdef CONFIG_SMP
static int percpu_ce_shutdown(struct clock_event_device *evt)
{
int cpu = cpumask_first(evt->cpumask);
sparc_config.load_profile_irq(cpu, 0);
return 0;
}
static int percpu_ce_set_periodic(struct clock_event_device *evt)
{
int cpu = cpumask_first(evt->cpumask);
sparc_config.load_profile_irq(cpu, SBUS_CLOCK_RATE / HZ);
return 0;
}
static int percpu_ce_set_next_event(unsigned long delta,
struct clock_event_device *evt)
{
int cpu = cpumask_first(evt->cpumask);
unsigned int next = (unsigned int)delta;
sparc_config.load_profile_irq(cpu, next);
return 0;
}
void register_percpu_ce(int cpu)
{
struct clock_event_device *ce = &per_cpu(sparc32_clockevent, cpu);
unsigned int features = CLOCK_EVT_FEAT_PERIODIC;
if (sparc_config.features & FEAT_L14_ONESHOT)
features |= CLOCK_EVT_FEAT_ONESHOT;
ce->name = "percpu_ce";
ce->rating = 200;
ce->features = features;
ce->set_state_shutdown = percpu_ce_shutdown;
ce->set_state_periodic = percpu_ce_set_periodic;
ce->set_state_oneshot = percpu_ce_shutdown;
ce->set_next_event = percpu_ce_set_next_event;
ce->cpumask = cpumask_of(cpu);
ce->shift = 32;
ce->mult = div_sc(sparc_config.clock_rate, NSEC_PER_SEC,
ce->shift);
ce->max_delta_ns = clockevent_delta2ns(sparc_config.clock_rate, ce);
ce->max_delta_ticks = (unsigned long)sparc_config.clock_rate;
ce->min_delta_ns = clockevent_delta2ns(100, ce);
ce->min_delta_ticks = 100;
clockevents_register_device(ce);
}
#endif
static unsigned char mostek_read_byte(struct device *dev, u32 ofs)
{
struct platform_device *pdev = to_platform_device(dev);
struct m48t59_plat_data *pdata = pdev->dev.platform_data;
return readb(pdata->ioaddr + ofs);
}
static void mostek_write_byte(struct device *dev, u32 ofs, u8 val)
{
struct platform_device *pdev = to_platform_device(dev);
struct m48t59_plat_data *pdata = pdev->dev.platform_data;
writeb(val, pdata->ioaddr + ofs);
}
static struct m48t59_plat_data m48t59_data = {
.read_byte = mostek_read_byte,
.write_byte = mostek_write_byte,
};
/* resource is set at runtime */
static struct platform_device m48t59_rtc = {
.name = "rtc-m48t59",
.id = 0,
.num_resources = 1,
.dev = {
.platform_data = &m48t59_data,
},
};
static int clock_probe(struct platform_device *op)
{
struct device_node *dp = op->dev.of_node;
const char *model = of_get_property(dp, "model", NULL);
if (!model)
return -ENODEV;
/* Only the primary RTC has an address property */
if (!of_property_present(dp, "address"))
return -ENODEV;
m48t59_rtc.resource = &op->resource[0];
if (!strcmp(model, "mk48t02")) {
/* Map the clock register io area read-only */
m48t59_data.ioaddr = of_ioremap(&op->resource[0], 0,
2048, "rtc-m48t59");
m48t59_data.type = M48T59RTC_TYPE_M48T02;
} else if (!strcmp(model, "mk48t08")) {
m48t59_data.ioaddr = of_ioremap(&op->resource[0], 0,
8192, "rtc-m48t59");
m48t59_data.type = M48T59RTC_TYPE_M48T08;
} else
return -ENODEV;
if (platform_device_register(&m48t59_rtc) < 0)
printk(KERN_ERR "Registering RTC device failed\n");
return 0;
}
static const struct of_device_id clock_match[] = {
{
.name = "eeprom",
},
{},
};
static struct platform_driver clock_driver = {
.probe = clock_probe,
.driver = {
.name = "rtc",
.of_match_table = clock_match,
},
};
/* Probe for the mostek real time clock chip. */
static int __init clock_init(void)
{
return platform_driver_register(&clock_driver);
}
/* Must be after subsys_initcall() so that busses are probed. Must
* be before device_initcall() because things like the RTC driver
* need to see the clock registers.
*/
fs_initcall(clock_init);
static void __init sparc32_late_time_init(void)
{
if (sparc_config.features & FEAT_L10_CLOCKEVENT)
setup_timer_ce();
if (sparc_config.features & FEAT_L10_CLOCKSOURCE)
setup_timer_cs();
#ifdef CONFIG_SMP
register_percpu_ce(smp_processor_id());
#endif
}
static void __init sbus_time_init(void)
{
sparc_config.get_cycles_offset = sbus_cycles_offset;
sparc_config.init_timers();
}
void __init time_init(void)
{
sparc_config.features = 0;
late_time_init = sparc32_late_time_init;
if (pcic_present())
pci_time_init();
else
sbus_time_init();
}
| linux-master | arch/sparc/kernel/time_32.c |
// SPDX-License-Identifier: GPL-2.0
/* auxio.c: Probing for the Sparc AUXIO register at boot time.
*
* Copyright (C) 1996 David S. Miller ([email protected])
*/
#include <linux/stddef.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/of.h>
#include <linux/export.h>
#include <asm/oplib.h>
#include <asm/io.h>
#include <asm/auxio.h>
#include <asm/string.h> /* memset(), Linux has no bzero() */
#include <asm/cpu_type.h>
#include "kernel.h"
/* Probe and map in the Auxiliary I/O register */
/* auxio_register is not static because it is referenced
* in entry.S::floppy_tdone
*/
void __iomem *auxio_register = NULL;
static DEFINE_SPINLOCK(auxio_lock);
void __init auxio_probe(void)
{
phandle node, auxio_nd;
struct linux_prom_registers auxregs[1];
struct resource r;
switch (sparc_cpu_model) {
case sparc_leon:
case sun4d:
return;
default:
break;
}
node = prom_getchild(prom_root_node);
auxio_nd = prom_searchsiblings(node, "auxiliary-io");
if(!auxio_nd) {
node = prom_searchsiblings(node, "obio");
node = prom_getchild(node);
auxio_nd = prom_searchsiblings(node, "auxio");
if(!auxio_nd) {
#ifdef CONFIG_PCI
/* There may be auxio on Ebus */
return;
#else
if(prom_searchsiblings(node, "leds")) {
/* VME chassis sun4m machine, no auxio exists. */
return;
}
prom_printf("Cannot find auxio node, cannot continue...\n");
prom_halt();
#endif
}
}
if(prom_getproperty(auxio_nd, "reg", (char *) auxregs, sizeof(auxregs)) <= 0)
return;
prom_apply_obio_ranges(auxregs, 0x1);
/* Map the register both read and write */
r.flags = auxregs[0].which_io & 0xF;
r.start = auxregs[0].phys_addr;
r.end = auxregs[0].phys_addr + auxregs[0].reg_size - 1;
auxio_register = of_ioremap(&r, 0, auxregs[0].reg_size, "auxio");
/* Fix the address on sun4m. */
if ((((unsigned long) auxregs[0].phys_addr) & 3) == 3)
auxio_register += (3 - ((unsigned long)auxio_register & 3));
set_auxio(AUXIO_LED, 0);
}
unsigned char get_auxio(void)
{
if(auxio_register)
return sbus_readb(auxio_register);
return 0;
}
EXPORT_SYMBOL(get_auxio);
void set_auxio(unsigned char bits_on, unsigned char bits_off)
{
unsigned char regval;
unsigned long flags;
spin_lock_irqsave(&auxio_lock, flags);
switch (sparc_cpu_model) {
case sun4m:
if(!auxio_register)
break; /* VME chassis sun4m, no auxio. */
regval = sbus_readb(auxio_register);
sbus_writeb(((regval | bits_on) & ~bits_off) | AUXIO_ORMEIN4M,
auxio_register);
break;
case sun4d:
break;
default:
panic("Can't set AUXIO register on this machine.");
}
spin_unlock_irqrestore(&auxio_lock, flags);
}
EXPORT_SYMBOL(set_auxio);
/* sun4m power control register (AUXIO2) */
volatile u8 __iomem *auxio_power_register = NULL;
void __init auxio_power_probe(void)
{
struct linux_prom_registers regs;
phandle node;
struct resource r;
/* Attempt to find the sun4m power control node. */
node = prom_getchild(prom_root_node);
node = prom_searchsiblings(node, "obio");
node = prom_getchild(node);
node = prom_searchsiblings(node, "power");
if (node == 0 || (s32)node == -1)
return;
/* Map the power control register. */
if (prom_getproperty(node, "reg", (char *)®s, sizeof(regs)) <= 0)
return;
prom_apply_obio_ranges(®s, 1);
memset(&r, 0, sizeof(r));
r.flags = regs.which_io & 0xF;
r.start = regs.phys_addr;
r.end = regs.phys_addr + regs.reg_size - 1;
auxio_power_register =
(u8 __iomem *)of_ioremap(&r, 0, regs.reg_size, "auxpower");
/* Display a quick message on the console. */
if (auxio_power_register)
printk(KERN_INFO "Power off control detected.\n");
}
| linux-master | arch/sparc/kernel/auxio_32.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/mutex.h>
#include <linux/cpu.h>
#include <linux/jump_label.h>
#include <linux/memory.h>
#include <asm/cacheflush.h>
void arch_jump_label_transform(struct jump_entry *entry,
enum jump_label_type type)
{
u32 *insn = (u32 *) (unsigned long) entry->code;
u32 val;
if (type == JUMP_LABEL_JMP) {
s32 off = (s32)entry->target - (s32)entry->code;
bool use_v9_branch = false;
BUG_ON(off & 3);
#ifdef CONFIG_SPARC64
if (off <= 0xfffff && off >= -0x100000)
use_v9_branch = true;
#endif
if (use_v9_branch) {
/* WDISP19 - target is . + immed << 2 */
/* ba,pt %xcc, . + off */
val = 0x10680000 | (((u32) off >> 2) & 0x7ffff);
} else {
/* WDISP22 - target is . + immed << 2 */
BUG_ON(off > 0x7fffff);
BUG_ON(off < -0x800000);
/* ba . + off */
val = 0x10800000 | (((u32) off >> 2) & 0x3fffff);
}
} else {
val = 0x01000000;
}
mutex_lock(&text_mutex);
*insn = val;
flushi(insn);
mutex_unlock(&text_mutex);
}
| linux-master | arch/sparc/kernel/jump_label.c |
// SPDX-License-Identifier: GPL-2.0
/* kgdb.c: KGDB support for 64-bit sparc.
*
* Copyright (C) 2008 David S. Miller <[email protected]>
*/
#include <linux/kgdb.h>
#include <linux/kdebug.h>
#include <linux/ftrace.h>
#include <linux/context_tracking.h>
#include <asm/cacheflush.h>
#include <asm/kdebug.h>
#include <asm/ptrace.h>
#include <asm/irq.h>
#include "kernel.h"
void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
{
struct reg_window *win;
int i;
gdb_regs[GDB_G0] = 0;
for (i = 0; i < 15; i++)
gdb_regs[GDB_G1 + i] = regs->u_regs[UREG_G1 + i];
win = (struct reg_window *) (regs->u_regs[UREG_FP] + STACK_BIAS);
for (i = 0; i < 8; i++)
gdb_regs[GDB_L0 + i] = win->locals[i];
for (i = 0; i < 8; i++)
gdb_regs[GDB_I0 + i] = win->ins[i];
for (i = GDB_F0; i <= GDB_F62; i++)
gdb_regs[i] = 0;
gdb_regs[GDB_PC] = regs->tpc;
gdb_regs[GDB_NPC] = regs->tnpc;
gdb_regs[GDB_STATE] = regs->tstate;
gdb_regs[GDB_FSR] = 0;
gdb_regs[GDB_FPRS] = 0;
gdb_regs[GDB_Y] = regs->y;
}
void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
{
struct thread_info *t = task_thread_info(p);
extern unsigned int switch_to_pc;
extern unsigned int ret_from_fork;
struct reg_window *win;
unsigned long pc, cwp;
int i;
for (i = GDB_G0; i < GDB_G6; i++)
gdb_regs[i] = 0;
gdb_regs[GDB_G6] = (unsigned long) t;
gdb_regs[GDB_G7] = (unsigned long) p;
for (i = GDB_O0; i < GDB_SP; i++)
gdb_regs[i] = 0;
gdb_regs[GDB_SP] = t->ksp;
gdb_regs[GDB_O7] = 0;
win = (struct reg_window *) (t->ksp + STACK_BIAS);
for (i = 0; i < 8; i++)
gdb_regs[GDB_L0 + i] = win->locals[i];
for (i = 0; i < 8; i++)
gdb_regs[GDB_I0 + i] = win->ins[i];
for (i = GDB_F0; i <= GDB_F62; i++)
gdb_regs[i] = 0;
if (t->new_child)
pc = (unsigned long) &ret_from_fork;
else
pc = (unsigned long) &switch_to_pc;
gdb_regs[GDB_PC] = pc;
gdb_regs[GDB_NPC] = pc + 4;
cwp = __thread_flag_byte_ptr(t)[TI_FLAG_BYTE_CWP];
gdb_regs[GDB_STATE] = (TSTATE_PRIV | TSTATE_IE | cwp);
gdb_regs[GDB_FSR] = 0;
gdb_regs[GDB_FPRS] = 0;
gdb_regs[GDB_Y] = 0;
}
void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
{
struct reg_window *win;
int i;
for (i = 0; i < 15; i++)
regs->u_regs[UREG_G1 + i] = gdb_regs[GDB_G1 + i];
/* If the TSTATE register is changing, we have to preserve
* the CWP field, otherwise window save/restore explodes.
*/
if (regs->tstate != gdb_regs[GDB_STATE]) {
unsigned long cwp = regs->tstate & TSTATE_CWP;
regs->tstate = (gdb_regs[GDB_STATE] & ~TSTATE_CWP) | cwp;
}
regs->tpc = gdb_regs[GDB_PC];
regs->tnpc = gdb_regs[GDB_NPC];
regs->y = gdb_regs[GDB_Y];
win = (struct reg_window *) (regs->u_regs[UREG_FP] + STACK_BIAS);
for (i = 0; i < 8; i++)
win->locals[i] = gdb_regs[GDB_L0 + i];
for (i = 0; i < 8; i++)
win->ins[i] = gdb_regs[GDB_I0 + i];
}
#ifdef CONFIG_SMP
void __irq_entry smp_kgdb_capture_client(int irq, struct pt_regs *regs)
{
unsigned long flags;
__asm__ __volatile__("rdpr %%pstate, %0\n\t"
"wrpr %0, %1, %%pstate"
: "=r" (flags)
: "i" (PSTATE_IE));
flushw_all();
if (atomic_read(&kgdb_active) != -1)
kgdb_nmicallback(raw_smp_processor_id(), regs);
__asm__ __volatile__("wrpr %0, 0, %%pstate"
: : "r" (flags));
}
#endif
int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
char *remcomInBuffer, char *remcomOutBuffer,
struct pt_regs *linux_regs)
{
unsigned long addr;
char *ptr;
switch (remcomInBuffer[0]) {
case 'c':
/* try to read optional parameter, pc unchanged if no parm */
ptr = &remcomInBuffer[1];
if (kgdb_hex2long(&ptr, &addr)) {
linux_regs->tpc = addr;
linux_regs->tnpc = addr + 4;
}
fallthrough;
case 'D':
case 'k':
if (linux_regs->tpc == (unsigned long) arch_kgdb_breakpoint) {
linux_regs->tpc = linux_regs->tnpc;
linux_regs->tnpc += 4;
}
return 0;
}
return -1;
}
asmlinkage void kgdb_trap(unsigned long trap_level, struct pt_regs *regs)
{
enum ctx_state prev_state = exception_enter();
unsigned long flags;
if (user_mode(regs)) {
bad_trap(regs, trap_level);
goto out;
}
flushw_all();
local_irq_save(flags);
kgdb_handle_exception(0x172, SIGTRAP, 0, regs);
local_irq_restore(flags);
out:
exception_exit(prev_state);
}
int kgdb_arch_init(void)
{
return 0;
}
void kgdb_arch_exit(void)
{
}
void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
{
regs->tpc = ip;
regs->tnpc = regs->tpc + 4;
}
const struct kgdb_arch arch_kgdb_ops = {
/* Breakpoint instruction: ta 0x72 */
.gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x72 },
};
| linux-master | arch/sparc/kernel/kgdb_64.c |
// SPDX-License-Identifier: GPL-2.0
/* iommu.c: Generic sparc64 IOMMU support.
*
* Copyright (C) 1999, 2007, 2008 David S. Miller ([email protected])
* Copyright (C) 1999, 2000 Jakub Jelinek ([email protected])
*/
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dma-map-ops.h>
#include <linux/errno.h>
#include <linux/iommu-helper.h>
#include <linux/bitmap.h>
#include <asm/iommu-common.h>
#ifdef CONFIG_PCI
#include <linux/pci.h>
#endif
#include <asm/iommu.h>
#include "iommu_common.h"
#include "kernel.h"
#define STC_CTXMATCH_ADDR(STC, CTX) \
((STC)->strbuf_ctxmatch_base + ((CTX) << 3))
#define STC_FLUSHFLAG_INIT(STC) \
(*((STC)->strbuf_flushflag) = 0UL)
#define STC_FLUSHFLAG_SET(STC) \
(*((STC)->strbuf_flushflag) != 0UL)
#define iommu_read(__reg) \
({ u64 __ret; \
__asm__ __volatile__("ldxa [%1] %2, %0" \
: "=r" (__ret) \
: "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
: "memory"); \
__ret; \
})
#define iommu_write(__reg, __val) \
__asm__ __volatile__("stxa %0, [%1] %2" \
: /* no outputs */ \
: "r" (__val), "r" (__reg), \
"i" (ASI_PHYS_BYPASS_EC_E))
/* Must be invoked under the IOMMU lock. */
static void iommu_flushall(struct iommu_map_table *iommu_map_table)
{
struct iommu *iommu = container_of(iommu_map_table, struct iommu, tbl);
if (iommu->iommu_flushinv) {
iommu_write(iommu->iommu_flushinv, ~(u64)0);
} else {
unsigned long tag;
int entry;
tag = iommu->iommu_tags;
for (entry = 0; entry < 16; entry++) {
iommu_write(tag, 0);
tag += 8;
}
/* Ensure completion of previous PIO writes. */
(void) iommu_read(iommu->write_complete_reg);
}
}
#define IOPTE_CONSISTENT(CTX) \
(IOPTE_VALID | IOPTE_CACHE | \
(((CTX) << 47) & IOPTE_CONTEXT))
#define IOPTE_STREAMING(CTX) \
(IOPTE_CONSISTENT(CTX) | IOPTE_STBUF)
/* Existing mappings are never marked invalid, instead they
* are pointed to a dummy page.
*/
#define IOPTE_IS_DUMMY(iommu, iopte) \
((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa)
static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte)
{
unsigned long val = iopte_val(*iopte);
val &= ~IOPTE_PAGE;
val |= iommu->dummy_page_pa;
iopte_val(*iopte) = val;
}
int iommu_table_init(struct iommu *iommu, int tsbsize,
u32 dma_offset, u32 dma_addr_mask,
int numa_node)
{
unsigned long i, order, sz, num_tsb_entries;
struct page *page;
num_tsb_entries = tsbsize / sizeof(iopte_t);
/* Setup initial software IOMMU state. */
spin_lock_init(&iommu->lock);
iommu->ctx_lowest_free = 1;
iommu->tbl.table_map_base = dma_offset;
iommu->dma_addr_mask = dma_addr_mask;
/* Allocate and initialize the free area map. */
sz = num_tsb_entries / 8;
sz = (sz + 7UL) & ~7UL;
iommu->tbl.map = kzalloc_node(sz, GFP_KERNEL, numa_node);
if (!iommu->tbl.map)
return -ENOMEM;
iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT,
(tlb_type != hypervisor ? iommu_flushall : NULL),
false, 1, false);
/* Allocate and initialize the dummy page which we
* set inactive IO PTEs to point to.
*/
page = alloc_pages_node(numa_node, GFP_KERNEL, 0);
if (!page) {
printk(KERN_ERR "IOMMU: Error, gfp(dummy_page) failed.\n");
goto out_free_map;
}
iommu->dummy_page = (unsigned long) page_address(page);
memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
/* Now allocate and setup the IOMMU page table itself. */
order = get_order(tsbsize);
page = alloc_pages_node(numa_node, GFP_KERNEL, order);
if (!page) {
printk(KERN_ERR "IOMMU: Error, gfp(tsb) failed.\n");
goto out_free_dummy_page;
}
iommu->page_table = (iopte_t *)page_address(page);
for (i = 0; i < num_tsb_entries; i++)
iopte_make_dummy(iommu, &iommu->page_table[i]);
return 0;
out_free_dummy_page:
free_page(iommu->dummy_page);
iommu->dummy_page = 0UL;
out_free_map:
kfree(iommu->tbl.map);
iommu->tbl.map = NULL;
return -ENOMEM;
}
static inline iopte_t *alloc_npages(struct device *dev,
struct iommu *iommu,
unsigned long npages)
{
unsigned long entry;
entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL,
(unsigned long)(-1), 0);
if (unlikely(entry == IOMMU_ERROR_CODE))
return NULL;
return iommu->page_table + entry;
}
static int iommu_alloc_ctx(struct iommu *iommu)
{
int lowest = iommu->ctx_lowest_free;
int n = find_next_zero_bit(iommu->ctx_bitmap, IOMMU_NUM_CTXS, lowest);
if (unlikely(n == IOMMU_NUM_CTXS)) {
n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1);
if (unlikely(n == lowest)) {
printk(KERN_WARNING "IOMMU: Ran out of contexts.\n");
n = 0;
}
}
if (n)
__set_bit(n, iommu->ctx_bitmap);
return n;
}
static inline void iommu_free_ctx(struct iommu *iommu, int ctx)
{
if (likely(ctx)) {
__clear_bit(ctx, iommu->ctx_bitmap);
if (ctx < iommu->ctx_lowest_free)
iommu->ctx_lowest_free = ctx;
}
}
static void *dma_4u_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addrp, gfp_t gfp,
unsigned long attrs)
{
unsigned long order, first_page;
struct iommu *iommu;
struct page *page;
int npages, nid;
iopte_t *iopte;
void *ret;
size = IO_PAGE_ALIGN(size);
order = get_order(size);
if (order >= 10)
return NULL;
nid = dev->archdata.numa_node;
page = alloc_pages_node(nid, gfp, order);
if (unlikely(!page))
return NULL;
first_page = (unsigned long) page_address(page);
memset((char *)first_page, 0, PAGE_SIZE << order);
iommu = dev->archdata.iommu;
iopte = alloc_npages(dev, iommu, size >> IO_PAGE_SHIFT);
if (unlikely(iopte == NULL)) {
free_pages(first_page, order);
return NULL;
}
*dma_addrp = (iommu->tbl.table_map_base +
((iopte - iommu->page_table) << IO_PAGE_SHIFT));
ret = (void *) first_page;
npages = size >> IO_PAGE_SHIFT;
first_page = __pa(first_page);
while (npages--) {
iopte_val(*iopte) = (IOPTE_CONSISTENT(0UL) |
IOPTE_WRITE |
(first_page & IOPTE_PAGE));
iopte++;
first_page += IO_PAGE_SIZE;
}
return ret;
}
static void dma_4u_free_coherent(struct device *dev, size_t size,
void *cpu, dma_addr_t dvma,
unsigned long attrs)
{
struct iommu *iommu;
unsigned long order, npages;
npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
iommu = dev->archdata.iommu;
iommu_tbl_range_free(&iommu->tbl, dvma, npages, IOMMU_ERROR_CODE);
order = get_order(size);
if (order < 10)
free_pages((unsigned long)cpu, order);
}
static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t sz,
enum dma_data_direction direction,
unsigned long attrs)
{
struct iommu *iommu;
struct strbuf *strbuf;
iopte_t *base;
unsigned long flags, npages, oaddr;
unsigned long i, base_paddr, ctx;
u32 bus_addr, ret;
unsigned long iopte_protection;
iommu = dev->archdata.iommu;
strbuf = dev->archdata.stc;
if (unlikely(direction == DMA_NONE))
goto bad_no_ctx;
oaddr = (unsigned long)(page_address(page) + offset);
npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
npages >>= IO_PAGE_SHIFT;
base = alloc_npages(dev, iommu, npages);
spin_lock_irqsave(&iommu->lock, flags);
ctx = 0;
if (iommu->iommu_ctxflush)
ctx = iommu_alloc_ctx(iommu);
spin_unlock_irqrestore(&iommu->lock, flags);
if (unlikely(!base))
goto bad;
bus_addr = (iommu->tbl.table_map_base +
((base - iommu->page_table) << IO_PAGE_SHIFT));
ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
base_paddr = __pa(oaddr & IO_PAGE_MASK);
if (strbuf->strbuf_enabled)
iopte_protection = IOPTE_STREAMING(ctx);
else
iopte_protection = IOPTE_CONSISTENT(ctx);
if (direction != DMA_TO_DEVICE)
iopte_protection |= IOPTE_WRITE;
for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
iopte_val(*base) = iopte_protection | base_paddr;
return ret;
bad:
iommu_free_ctx(iommu, ctx);
bad_no_ctx:
if (printk_ratelimit())
WARN_ON(1);
return DMA_MAPPING_ERROR;
}
static void strbuf_flush(struct strbuf *strbuf, struct iommu *iommu,
u32 vaddr, unsigned long ctx, unsigned long npages,
enum dma_data_direction direction)
{
int limit;
if (strbuf->strbuf_ctxflush &&
iommu->iommu_ctxflush) {
unsigned long matchreg, flushreg;
u64 val;
flushreg = strbuf->strbuf_ctxflush;
matchreg = STC_CTXMATCH_ADDR(strbuf, ctx);
iommu_write(flushreg, ctx);
val = iommu_read(matchreg);
val &= 0xffff;
if (!val)
goto do_flush_sync;
while (val) {
if (val & 0x1)
iommu_write(flushreg, ctx);
val >>= 1;
}
val = iommu_read(matchreg);
if (unlikely(val)) {
printk(KERN_WARNING "strbuf_flush: ctx flush "
"timeout matchreg[%llx] ctx[%lx]\n",
val, ctx);
goto do_page_flush;
}
} else {
unsigned long i;
do_page_flush:
for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
iommu_write(strbuf->strbuf_pflush, vaddr);
}
do_flush_sync:
/* If the device could not have possibly put dirty data into
* the streaming cache, no flush-flag synchronization needs
* to be performed.
*/
if (direction == DMA_TO_DEVICE)
return;
STC_FLUSHFLAG_INIT(strbuf);
iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
(void) iommu_read(iommu->write_complete_reg);
limit = 100000;
while (!STC_FLUSHFLAG_SET(strbuf)) {
limit--;
if (!limit)
break;
udelay(1);
rmb();
}
if (!limit)
printk(KERN_WARNING "strbuf_flush: flushflag timeout "
"vaddr[%08x] ctx[%lx] npages[%ld]\n",
vaddr, ctx, npages);
}
static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
size_t sz, enum dma_data_direction direction,
unsigned long attrs)
{
struct iommu *iommu;
struct strbuf *strbuf;
iopte_t *base;
unsigned long flags, npages, ctx, i;
if (unlikely(direction == DMA_NONE)) {
if (printk_ratelimit())
WARN_ON(1);
return;
}
iommu = dev->archdata.iommu;
strbuf = dev->archdata.stc;
npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
npages >>= IO_PAGE_SHIFT;
base = iommu->page_table +
((bus_addr - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT);
bus_addr &= IO_PAGE_MASK;
spin_lock_irqsave(&iommu->lock, flags);
/* Record the context, if any. */
ctx = 0;
if (iommu->iommu_ctxflush)
ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
/* Step 1: Kick data out of streaming buffers if necessary. */
if (strbuf->strbuf_enabled && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
strbuf_flush(strbuf, iommu, bus_addr, ctx,
npages, direction);
/* Step 2: Clear out TSB entries. */
for (i = 0; i < npages; i++)
iopte_make_dummy(iommu, base + i);
iommu_free_ctx(iommu, ctx);
spin_unlock_irqrestore(&iommu->lock, flags);
iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE);
}
static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction,
unsigned long attrs)
{
struct scatterlist *s, *outs, *segstart;
unsigned long flags, handle, prot, ctx;
dma_addr_t dma_next = 0, dma_addr;
unsigned int max_seg_size;
unsigned long seg_boundary_size;
int outcount, incount, i;
struct strbuf *strbuf;
struct iommu *iommu;
unsigned long base_shift;
BUG_ON(direction == DMA_NONE);
iommu = dev->archdata.iommu;
strbuf = dev->archdata.stc;
if (nelems == 0 || !iommu)
return -EINVAL;
spin_lock_irqsave(&iommu->lock, flags);
ctx = 0;
if (iommu->iommu_ctxflush)
ctx = iommu_alloc_ctx(iommu);
if (strbuf->strbuf_enabled)
prot = IOPTE_STREAMING(ctx);
else
prot = IOPTE_CONSISTENT(ctx);
if (direction != DMA_TO_DEVICE)
prot |= IOPTE_WRITE;
outs = s = segstart = &sglist[0];
outcount = 1;
incount = nelems;
handle = 0;
/* Init first segment length for backout at failure */
outs->dma_length = 0;
max_seg_size = dma_get_max_seg_size(dev);
seg_boundary_size = dma_get_seg_boundary_nr_pages(dev, IO_PAGE_SHIFT);
base_shift = iommu->tbl.table_map_base >> IO_PAGE_SHIFT;
for_each_sg(sglist, s, nelems, i) {
unsigned long paddr, npages, entry, out_entry = 0, slen;
iopte_t *base;
slen = s->length;
/* Sanity check */
if (slen == 0) {
dma_next = 0;
continue;
}
/* Allocate iommu entries for that segment */
paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages,
&handle, (unsigned long)(-1), 0);
/* Handle failure */
if (unlikely(entry == IOMMU_ERROR_CODE)) {
if (printk_ratelimit())
printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
" npages %lx\n", iommu, paddr, npages);
goto iommu_map_failed;
}
base = iommu->page_table + entry;
/* Convert entry to a dma_addr_t */
dma_addr = iommu->tbl.table_map_base +
(entry << IO_PAGE_SHIFT);
dma_addr |= (s->offset & ~IO_PAGE_MASK);
/* Insert into HW table */
paddr &= IO_PAGE_MASK;
while (npages--) {
iopte_val(*base) = prot | paddr;
base++;
paddr += IO_PAGE_SIZE;
}
/* If we are in an open segment, try merging */
if (segstart != s) {
/* We cannot merge if:
* - allocated dma_addr isn't contiguous to previous allocation
*/
if ((dma_addr != dma_next) ||
(outs->dma_length + s->length > max_seg_size) ||
(is_span_boundary(out_entry, base_shift,
seg_boundary_size, outs, s))) {
/* Can't merge: create a new segment */
segstart = s;
outcount++;
outs = sg_next(outs);
} else {
outs->dma_length += s->length;
}
}
if (segstart == s) {
/* This is a new segment, fill entries */
outs->dma_address = dma_addr;
outs->dma_length = slen;
out_entry = entry;
}
/* Calculate next page pointer for contiguous check */
dma_next = dma_addr + slen;
}
spin_unlock_irqrestore(&iommu->lock, flags);
if (outcount < incount) {
outs = sg_next(outs);
outs->dma_length = 0;
}
return outcount;
iommu_map_failed:
for_each_sg(sglist, s, nelems, i) {
if (s->dma_length != 0) {
unsigned long vaddr, npages, entry, j;
iopte_t *base;
vaddr = s->dma_address & IO_PAGE_MASK;
npages = iommu_num_pages(s->dma_address, s->dma_length,
IO_PAGE_SIZE);
entry = (vaddr - iommu->tbl.table_map_base)
>> IO_PAGE_SHIFT;
base = iommu->page_table + entry;
for (j = 0; j < npages; j++)
iopte_make_dummy(iommu, base + j);
iommu_tbl_range_free(&iommu->tbl, vaddr, npages,
IOMMU_ERROR_CODE);
s->dma_length = 0;
}
if (s == outs)
break;
}
spin_unlock_irqrestore(&iommu->lock, flags);
return -EINVAL;
}
/* If contexts are being used, they are the same in all of the mappings
* we make for a particular SG.
*/
static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg)
{
unsigned long ctx = 0;
if (iommu->iommu_ctxflush) {
iopte_t *base;
u32 bus_addr;
struct iommu_map_table *tbl = &iommu->tbl;
bus_addr = sg->dma_address & IO_PAGE_MASK;
base = iommu->page_table +
((bus_addr - tbl->table_map_base) >> IO_PAGE_SHIFT);
ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
}
return ctx;
}
static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction,
unsigned long attrs)
{
unsigned long flags, ctx;
struct scatterlist *sg;
struct strbuf *strbuf;
struct iommu *iommu;
BUG_ON(direction == DMA_NONE);
iommu = dev->archdata.iommu;
strbuf = dev->archdata.stc;
ctx = fetch_sg_ctx(iommu, sglist);
spin_lock_irqsave(&iommu->lock, flags);
sg = sglist;
while (nelems--) {
dma_addr_t dma_handle = sg->dma_address;
unsigned int len = sg->dma_length;
unsigned long npages, entry;
iopte_t *base;
int i;
if (!len)
break;
npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
entry = ((dma_handle - iommu->tbl.table_map_base)
>> IO_PAGE_SHIFT);
base = iommu->page_table + entry;
dma_handle &= IO_PAGE_MASK;
if (strbuf->strbuf_enabled && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
strbuf_flush(strbuf, iommu, dma_handle, ctx,
npages, direction);
for (i = 0; i < npages; i++)
iopte_make_dummy(iommu, base + i);
iommu_tbl_range_free(&iommu->tbl, dma_handle, npages,
IOMMU_ERROR_CODE);
sg = sg_next(sg);
}
iommu_free_ctx(iommu, ctx);
spin_unlock_irqrestore(&iommu->lock, flags);
}
static void dma_4u_sync_single_for_cpu(struct device *dev,
dma_addr_t bus_addr, size_t sz,
enum dma_data_direction direction)
{
struct iommu *iommu;
struct strbuf *strbuf;
unsigned long flags, ctx, npages;
iommu = dev->archdata.iommu;
strbuf = dev->archdata.stc;
if (!strbuf->strbuf_enabled)
return;
spin_lock_irqsave(&iommu->lock, flags);
npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
npages >>= IO_PAGE_SHIFT;
bus_addr &= IO_PAGE_MASK;
/* Step 1: Record the context, if any. */
ctx = 0;
if (iommu->iommu_ctxflush &&
strbuf->strbuf_ctxflush) {
iopte_t *iopte;
struct iommu_map_table *tbl = &iommu->tbl;
iopte = iommu->page_table +
((bus_addr - tbl->table_map_base)>>IO_PAGE_SHIFT);
ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
}
/* Step 2: Kick data out of streaming buffers. */
strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
spin_unlock_irqrestore(&iommu->lock, flags);
}
static void dma_4u_sync_sg_for_cpu(struct device *dev,
struct scatterlist *sglist, int nelems,
enum dma_data_direction direction)
{
struct iommu *iommu;
struct strbuf *strbuf;
unsigned long flags, ctx, npages, i;
struct scatterlist *sg, *sgprv;
u32 bus_addr;
iommu = dev->archdata.iommu;
strbuf = dev->archdata.stc;
if (!strbuf->strbuf_enabled)
return;
spin_lock_irqsave(&iommu->lock, flags);
/* Step 1: Record the context, if any. */
ctx = 0;
if (iommu->iommu_ctxflush &&
strbuf->strbuf_ctxflush) {
iopte_t *iopte;
struct iommu_map_table *tbl = &iommu->tbl;
iopte = iommu->page_table + ((sglist[0].dma_address -
tbl->table_map_base) >> IO_PAGE_SHIFT);
ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
}
/* Step 2: Kick data out of streaming buffers. */
bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
sgprv = NULL;
for_each_sg(sglist, sg, nelems, i) {
if (sg->dma_length == 0)
break;
sgprv = sg;
}
npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length)
- bus_addr) >> IO_PAGE_SHIFT;
strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
spin_unlock_irqrestore(&iommu->lock, flags);
}
static int dma_4u_supported(struct device *dev, u64 device_mask)
{
struct iommu *iommu = dev->archdata.iommu;
if (ali_sound_dma_hack(dev, device_mask))
return 1;
if (device_mask < iommu->dma_addr_mask)
return 0;
return 1;
}
static const struct dma_map_ops sun4u_dma_ops = {
.alloc = dma_4u_alloc_coherent,
.free = dma_4u_free_coherent,
.map_page = dma_4u_map_page,
.unmap_page = dma_4u_unmap_page,
.map_sg = dma_4u_map_sg,
.unmap_sg = dma_4u_unmap_sg,
.sync_single_for_cpu = dma_4u_sync_single_for_cpu,
.sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
.dma_supported = dma_4u_supported,
};
const struct dma_map_ops *dma_ops = &sun4u_dma_ops;
EXPORT_SYMBOL(dma_ops);
| linux-master | arch/sparc/kernel/iommu.c |
// SPDX-License-Identifier: GPL-2.0
/* leon_smp.c: Sparc-Leon SMP support.
*
* based on sun4m_smp.c
* Copyright (C) 1996 David S. Miller ([email protected])
* Copyright (C) 2009 Daniel Hellstrom ([email protected]) Aeroflex Gaisler AB
* Copyright (C) 2009 Konrad Eisele ([email protected]) Aeroflex Gaisler AB
*/
#include <asm/head.h>
#include <linux/kernel.h>
#include <linux/sched/mm.h>
#include <linux/threads.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/of.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/profile.h>
#include <linux/pm.h>
#include <linux/delay.h>
#include <linux/gfp.h>
#include <linux/cpu.h>
#include <linux/clockchips.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <asm/ptrace.h>
#include <linux/atomic.h>
#include <asm/irq_regs.h>
#include <asm/traps.h>
#include <asm/delay.h>
#include <asm/irq.h>
#include <asm/page.h>
#include <asm/oplib.h>
#include <asm/cpudata.h>
#include <asm/asi.h>
#include <asm/leon.h>
#include <asm/leon_amba.h>
#include <asm/timer.h>
#include "kernel.h"
#include "irq.h"
extern ctxd_t *srmmu_ctx_table_phys;
static int smp_processors_ready;
extern volatile unsigned long cpu_callin_map[NR_CPUS];
extern cpumask_t smp_commenced_mask;
void leon_configure_cache_smp(void);
static void leon_ipi_init(void);
/* IRQ number of LEON IPIs */
int leon_ipi_irq = LEON3_IRQ_IPI_DEFAULT;
static inline unsigned long do_swap(volatile unsigned long *ptr,
unsigned long val)
{
__asm__ __volatile__("swapa [%2] %3, %0\n\t" : "=&r"(val)
: "0"(val), "r"(ptr), "i"(ASI_LEON_DCACHE_MISS)
: "memory");
return val;
}
void leon_cpu_pre_starting(void *arg)
{
leon_configure_cache_smp();
}
void leon_cpu_pre_online(void *arg)
{
int cpuid = hard_smp_processor_id();
/* Allow master to continue. The master will then give us the
* go-ahead by setting the smp_commenced_mask and will wait without
* timeouts until our setup is completed fully (signified by
* our bit being set in the cpu_online_mask).
*/
do_swap(&cpu_callin_map[cpuid], 1);
local_ops->cache_all();
local_ops->tlb_all();
/* Fix idle thread fields. */
__asm__ __volatile__("ld [%0], %%g6\n\t" : : "r"(¤t_set[cpuid])
: "memory" /* paranoid */);
/* Attach to the address space of init_task. */
mmgrab(&init_mm);
current->active_mm = &init_mm;
while (!cpumask_test_cpu(cpuid, &smp_commenced_mask))
mb();
}
/*
* Cycle through the processors asking the PROM to start each one.
*/
extern struct linux_prom_registers smp_penguin_ctable;
void leon_configure_cache_smp(void)
{
unsigned long cfg = sparc_leon3_get_dcachecfg();
int me = smp_processor_id();
if (ASI_LEON3_SYSCTRL_CFG_SSIZE(cfg) > 4) {
printk(KERN_INFO "Note: SMP with snooping only works on 4k cache, found %dk(0x%x) on cpu %d, disabling caches\n",
(unsigned int)ASI_LEON3_SYSCTRL_CFG_SSIZE(cfg),
(unsigned int)cfg, (unsigned int)me);
sparc_leon3_disable_cache();
} else {
if (cfg & ASI_LEON3_SYSCTRL_CFG_SNOOPING) {
sparc_leon3_enable_snooping();
} else {
printk(KERN_INFO "Note: You have to enable snooping in the vhdl model cpu %d, disabling caches\n",
me);
sparc_leon3_disable_cache();
}
}
local_ops->cache_all();
local_ops->tlb_all();
}
static void leon_smp_setbroadcast(unsigned int mask)
{
int broadcast =
((LEON3_BYPASS_LOAD_PA(&(leon3_irqctrl_regs->mpstatus)) >>
LEON3_IRQMPSTATUS_BROADCAST) & 1);
if (!broadcast) {
prom_printf("######## !!!! The irqmp-ctrl must have broadcast enabled, smp wont work !!!!! ####### nr cpus: %d\n",
leon_smp_nrcpus());
if (leon_smp_nrcpus() > 1) {
BUG();
} else {
prom_printf("continue anyway\n");
return;
}
}
LEON_BYPASS_STORE_PA(&(leon3_irqctrl_regs->mpbroadcast), mask);
}
int leon_smp_nrcpus(void)
{
int nrcpu =
((LEON3_BYPASS_LOAD_PA(&(leon3_irqctrl_regs->mpstatus)) >>
LEON3_IRQMPSTATUS_CPUNR) & 0xf) + 1;
return nrcpu;
}
void __init leon_boot_cpus(void)
{
int nrcpu = leon_smp_nrcpus();
int me = smp_processor_id();
/* Setup IPI */
leon_ipi_init();
printk(KERN_INFO "%d:(%d:%d) cpus mpirq at 0x%x\n", (unsigned int)me,
(unsigned int)nrcpu, (unsigned int)NR_CPUS,
(unsigned int)&(leon3_irqctrl_regs->mpstatus));
leon_enable_irq_cpu(LEON3_IRQ_CROSS_CALL, me);
leon_enable_irq_cpu(LEON3_IRQ_TICKER, me);
leon_enable_irq_cpu(leon_ipi_irq, me);
leon_smp_setbroadcast(1 << LEON3_IRQ_TICKER);
leon_configure_cache_smp();
local_ops->cache_all();
}
int leon_boot_one_cpu(int i, struct task_struct *idle)
{
int timeout;
current_set[i] = task_thread_info(idle);
/* See trampoline.S:leon_smp_cpu_startup for details...
* Initialize the contexts table
* Since the call to prom_startcpu() trashes the structure,
* we need to re-initialize it for each cpu
*/
smp_penguin_ctable.which_io = 0;
smp_penguin_ctable.phys_addr = (unsigned int)srmmu_ctx_table_phys;
smp_penguin_ctable.reg_size = 0;
/* whirrr, whirrr, whirrrrrrrrr... */
printk(KERN_INFO "Starting CPU %d : (irqmp: 0x%x)\n", (unsigned int)i,
(unsigned int)&leon3_irqctrl_regs->mpstatus);
local_ops->cache_all();
/* Make sure all IRQs are of from the start for this new CPU */
LEON_BYPASS_STORE_PA(&leon3_irqctrl_regs->mask[i], 0);
/* Wake one CPU */
LEON_BYPASS_STORE_PA(&(leon3_irqctrl_regs->mpstatus), 1 << i);
/* wheee... it's going... */
for (timeout = 0; timeout < 10000; timeout++) {
if (cpu_callin_map[i])
break;
udelay(200);
}
printk(KERN_INFO "Started CPU %d\n", (unsigned int)i);
if (!(cpu_callin_map[i])) {
printk(KERN_ERR "Processor %d is stuck.\n", i);
return -ENODEV;
} else {
leon_enable_irq_cpu(LEON3_IRQ_CROSS_CALL, i);
leon_enable_irq_cpu(LEON3_IRQ_TICKER, i);
leon_enable_irq_cpu(leon_ipi_irq, i);
}
local_ops->cache_all();
return 0;
}
void __init leon_smp_done(void)
{
int i, first;
int *prev;
/* setup cpu list for irq rotation */
first = 0;
prev = &first;
for (i = 0; i < NR_CPUS; i++) {
if (cpu_online(i)) {
*prev = i;
prev = &cpu_data(i).next;
}
}
*prev = first;
local_ops->cache_all();
/* Free unneeded trap tables */
if (!cpu_present(1)) {
free_reserved_page(virt_to_page(&trapbase_cpu1));
}
if (!cpu_present(2)) {
free_reserved_page(virt_to_page(&trapbase_cpu2));
}
if (!cpu_present(3)) {
free_reserved_page(virt_to_page(&trapbase_cpu3));
}
/* Ok, they are spinning and ready to go. */
smp_processors_ready = 1;
}
struct leon_ipi_work {
int single;
int msk;
int resched;
};
static DEFINE_PER_CPU_SHARED_ALIGNED(struct leon_ipi_work, leon_ipi_work);
/* Initialize IPIs on the LEON, in order to save IRQ resources only one IRQ
* is used for all three types of IPIs.
*/
static void __init leon_ipi_init(void)
{
int cpu, len;
struct leon_ipi_work *work;
struct property *pp;
struct device_node *rootnp;
struct tt_entry *trap_table;
unsigned long flags;
/* Find IPI IRQ or stick with default value */
rootnp = of_find_node_by_path("/ambapp0");
if (rootnp) {
pp = of_find_property(rootnp, "ipi_num", &len);
if (pp && (*(int *)pp->value))
leon_ipi_irq = *(int *)pp->value;
}
printk(KERN_INFO "leon: SMP IPIs at IRQ %d\n", leon_ipi_irq);
/* Adjust so that we jump directly to smpleon_ipi */
local_irq_save(flags);
trap_table = &sparc_ttable[SP_TRAP_IRQ1 + (leon_ipi_irq - 1)];
trap_table->inst_three += smpleon_ipi - real_irq_entry;
local_ops->cache_all();
local_irq_restore(flags);
for_each_possible_cpu(cpu) {
work = &per_cpu(leon_ipi_work, cpu);
work->single = work->msk = work->resched = 0;
}
}
static void leon_send_ipi(int cpu, int level)
{
unsigned long mask;
mask = leon_get_irqmask(level);
LEON3_BYPASS_STORE_PA(&leon3_irqctrl_regs->force[cpu], mask);
}
static void leon_ipi_single(int cpu)
{
struct leon_ipi_work *work = &per_cpu(leon_ipi_work, cpu);
/* Mark work */
work->single = 1;
/* Generate IRQ on the CPU */
leon_send_ipi(cpu, leon_ipi_irq);
}
static void leon_ipi_mask_one(int cpu)
{
struct leon_ipi_work *work = &per_cpu(leon_ipi_work, cpu);
/* Mark work */
work->msk = 1;
/* Generate IRQ on the CPU */
leon_send_ipi(cpu, leon_ipi_irq);
}
static void leon_ipi_resched(int cpu)
{
struct leon_ipi_work *work = &per_cpu(leon_ipi_work, cpu);
/* Mark work */
work->resched = 1;
/* Generate IRQ on the CPU (any IRQ will cause resched) */
leon_send_ipi(cpu, leon_ipi_irq);
}
void leonsmp_ipi_interrupt(void)
{
struct leon_ipi_work *work = this_cpu_ptr(&leon_ipi_work);
if (work->single) {
work->single = 0;
smp_call_function_single_interrupt();
}
if (work->msk) {
work->msk = 0;
smp_call_function_interrupt();
}
if (work->resched) {
work->resched = 0;
smp_resched_interrupt();
}
}
static struct smp_funcall {
void *func;
unsigned long arg1;
unsigned long arg2;
unsigned long arg3;
unsigned long arg4;
unsigned long arg5;
unsigned long processors_in[NR_CPUS]; /* Set when ipi entered. */
unsigned long processors_out[NR_CPUS]; /* Set when ipi exited. */
} ccall_info __attribute__((aligned(8)));
static DEFINE_SPINLOCK(cross_call_lock);
/* Cross calls must be serialized, at least currently. */
static void leon_cross_call(void *func, cpumask_t mask, unsigned long arg1,
unsigned long arg2, unsigned long arg3,
unsigned long arg4)
{
if (smp_processors_ready) {
register int high = NR_CPUS - 1;
unsigned long flags;
spin_lock_irqsave(&cross_call_lock, flags);
{
/* If you make changes here, make sure gcc generates proper code... */
register void *f asm("i0") = func;
register unsigned long a1 asm("i1") = arg1;
register unsigned long a2 asm("i2") = arg2;
register unsigned long a3 asm("i3") = arg3;
register unsigned long a4 asm("i4") = arg4;
register unsigned long a5 asm("i5") = 0;
__asm__ __volatile__("std %0, [%6]\n\t"
"std %2, [%6 + 8]\n\t"
"std %4, [%6 + 16]\n\t" : :
"r"(f), "r"(a1), "r"(a2), "r"(a3),
"r"(a4), "r"(a5),
"r"(&ccall_info.func));
}
/* Init receive/complete mapping, plus fire the IPI's off. */
{
register int i;
cpumask_clear_cpu(smp_processor_id(), &mask);
cpumask_and(&mask, cpu_online_mask, &mask);
for (i = 0; i <= high; i++) {
if (cpumask_test_cpu(i, &mask)) {
ccall_info.processors_in[i] = 0;
ccall_info.processors_out[i] = 0;
leon_send_ipi(i, LEON3_IRQ_CROSS_CALL);
}
}
}
{
register int i;
i = 0;
do {
if (!cpumask_test_cpu(i, &mask))
continue;
while (!ccall_info.processors_in[i])
barrier();
} while (++i <= high);
i = 0;
do {
if (!cpumask_test_cpu(i, &mask))
continue;
while (!ccall_info.processors_out[i])
barrier();
} while (++i <= high);
}
spin_unlock_irqrestore(&cross_call_lock, flags);
}
}
/* Running cross calls. */
void leon_cross_call_irq(void)
{
void (*func)(unsigned long, unsigned long, unsigned long, unsigned long,
unsigned long) = ccall_info.func;
int i = smp_processor_id();
ccall_info.processors_in[i] = 1;
func(ccall_info.arg1, ccall_info.arg2, ccall_info.arg3, ccall_info.arg4,
ccall_info.arg5);
ccall_info.processors_out[i] = 1;
}
static const struct sparc32_ipi_ops leon_ipi_ops = {
.cross_call = leon_cross_call,
.resched = leon_ipi_resched,
.single = leon_ipi_single,
.mask_one = leon_ipi_mask_one,
};
void __init leon_init_smp(void)
{
/* Patch ipi15 trap table */
t_nmi[1] = t_nmi[1] + (linux_trap_ipi15_leon - linux_trap_ipi15_sun4m);
sparc32_ipi_ops = &leon_ipi_ops;
}
| linux-master | arch/sparc/kernel/leon_smp.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* User-space Probes (UProbes) for sparc
*
* Copyright (C) 2013 Oracle Inc.
*
* Authors:
* Jose E. Marchesi <[email protected]>
* Eric Saint Etienne <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/highmem.h>
#include <linux/uprobes.h>
#include <linux/uaccess.h>
#include <linux/sched.h> /* For struct task_struct */
#include <linux/kdebug.h>
#include <asm/cacheflush.h>
/* Compute the address of the breakpoint instruction and return it.
*
* Note that uprobe_get_swbp_addr is defined as a weak symbol in
* kernel/events/uprobe.c.
*/
unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
{
return instruction_pointer(regs);
}
static void copy_to_page(struct page *page, unsigned long vaddr,
const void *src, int len)
{
void *kaddr = kmap_atomic(page);
memcpy(kaddr + (vaddr & ~PAGE_MASK), src, len);
kunmap_atomic(kaddr);
}
/* Fill in the xol area with the probed instruction followed by the
* single-step trap. Some fixups in the copied instruction are
* performed at this point.
*
* Note that uprobe_xol_copy is defined as a weak symbol in
* kernel/events/uprobe.c.
*/
void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
void *src, unsigned long len)
{
const u32 stp_insn = UPROBE_STP_INSN;
u32 insn = *(u32 *) src;
/* Branches annulling their delay slot must be fixed to not do
* so. Clearing the annul bit on these instructions we can be
* sure the single-step breakpoint in the XOL slot will be
* executed.
*/
u32 op = (insn >> 30) & 0x3;
u32 op2 = (insn >> 22) & 0x7;
if (op == 0 &&
(op2 == 1 || op2 == 2 || op2 == 3 || op2 == 5 || op2 == 6) &&
(insn & ANNUL_BIT) == ANNUL_BIT)
insn &= ~ANNUL_BIT;
copy_to_page(page, vaddr, &insn, len);
copy_to_page(page, vaddr+len, &stp_insn, 4);
}
/* Instruction analysis/validity.
*
* This function returns 0 on success or a -ve number on error.
*/
int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe,
struct mm_struct *mm, unsigned long addr)
{
/* Any unsupported instruction? Then return -EINVAL */
return 0;
}
/* If INSN is a relative control transfer instruction, return the
* corrected branch destination value.
*
* Note that regs->tpc and regs->tnpc still hold the values of the
* program counters at the time of the single-step trap due to the
* execution of the UPROBE_STP_INSN at utask->xol_vaddr + 4.
*
*/
static unsigned long relbranch_fixup(u32 insn, struct uprobe_task *utask,
struct pt_regs *regs)
{
/* Branch not taken, no mods necessary. */
if (regs->tnpc == regs->tpc + 0x4UL)
return utask->autask.saved_tnpc + 0x4UL;
/* The three cases are call, branch w/prediction,
* and traditional branch.
*/
if ((insn & 0xc0000000) == 0x40000000 ||
(insn & 0xc1c00000) == 0x00400000 ||
(insn & 0xc1c00000) == 0x00800000) {
unsigned long real_pc = (unsigned long) utask->vaddr;
unsigned long ixol_addr = utask->xol_vaddr;
/* The instruction did all the work for us
* already, just apply the offset to the correct
* instruction location.
*/
return (real_pc + (regs->tnpc - ixol_addr));
}
/* It is jmpl or some other absolute PC modification instruction,
* leave NPC as-is.
*/
return regs->tnpc;
}
/* If INSN is an instruction which writes its PC location
* into a destination register, fix that up.
*/
static int retpc_fixup(struct pt_regs *regs, u32 insn,
unsigned long real_pc)
{
unsigned long *slot = NULL;
int rc = 0;
/* Simplest case is 'call', which always uses %o7 */
if ((insn & 0xc0000000) == 0x40000000)
slot = ®s->u_regs[UREG_I7];
/* 'jmpl' encodes the register inside of the opcode */
if ((insn & 0xc1f80000) == 0x81c00000) {
unsigned long rd = ((insn >> 25) & 0x1f);
if (rd <= 15) {
slot = ®s->u_regs[rd];
} else {
unsigned long fp = regs->u_regs[UREG_FP];
/* Hard case, it goes onto the stack. */
flushw_all();
rd -= 16;
if (test_thread_64bit_stack(fp)) {
unsigned long __user *uslot =
(unsigned long __user *) (fp + STACK_BIAS) + rd;
rc = __put_user(real_pc, uslot);
} else {
unsigned int __user *uslot = (unsigned int
__user *) fp + rd;
rc = __put_user((u32) real_pc, uslot);
}
}
}
if (slot != NULL)
*slot = real_pc;
return rc;
}
/* Single-stepping can be avoided for certain instructions: NOPs and
* instructions that can be emulated. This function determines
* whether the instruction where the uprobe is installed falls in one
* of these cases and emulates it.
*
* This function returns true if the single-stepping can be skipped,
* false otherwise.
*/
bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
{
/* We currently only emulate NOP instructions.
*/
if (auprobe->ixol == (1 << 24)) {
regs->tnpc += 4;
regs->tpc += 4;
return true;
}
return false;
}
/* Prepare to execute out of line. At this point
* current->utask->xol_vaddr points to an allocated XOL slot properly
* initialized with the original instruction and the single-stepping
* trap instruction.
*
* This function returns 0 on success, any other number on error.
*/
int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
{
struct uprobe_task *utask = current->utask;
struct arch_uprobe_task *autask = ¤t->utask->autask;
/* Save the current program counters so they can be restored
* later.
*/
autask->saved_tpc = regs->tpc;
autask->saved_tnpc = regs->tnpc;
/* Adjust PC and NPC so the first instruction in the XOL slot
* will be executed by the user task.
*/
instruction_pointer_set(regs, utask->xol_vaddr);
return 0;
}
/* Prepare to resume execution after the single-step. Called after
* single-stepping. To avoid the SMP problems that can occur when we
* temporarily put back the original opcode to single-step, we
* single-stepped a copy of the instruction.
*
* This function returns 0 on success, any other number on error.
*/
int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
{
struct uprobe_task *utask = current->utask;
struct arch_uprobe_task *autask = &utask->autask;
u32 insn = auprobe->ixol;
int rc = 0;
if (utask->state == UTASK_SSTEP_ACK) {
regs->tnpc = relbranch_fixup(insn, utask, regs);
regs->tpc = autask->saved_tnpc;
rc = retpc_fixup(regs, insn, (unsigned long) utask->vaddr);
} else {
regs->tnpc = utask->vaddr+4;
regs->tpc = autask->saved_tnpc+4;
}
return rc;
}
/* Handler for uprobe traps. This is called from the traps table and
* triggers the proper die notification.
*/
asmlinkage void uprobe_trap(struct pt_regs *regs,
unsigned long trap_level)
{
BUG_ON(trap_level != 0x173 && trap_level != 0x174);
/* We are only interested in user-mode code. Uprobe traps
* shall not be present in kernel code.
*/
if (!user_mode(regs)) {
local_irq_enable();
bad_trap(regs, trap_level);
return;
}
/* trap_level == 0x173 --> ta 0x73
* trap_level == 0x174 --> ta 0x74
*/
if (notify_die((trap_level == 0x173) ? DIE_BPT : DIE_SSTEP,
(trap_level == 0x173) ? "bpt" : "sstep",
regs, 0, trap_level, SIGTRAP) != NOTIFY_STOP)
bad_trap(regs, trap_level);
}
/* Callback routine for handling die notifications.
*/
int arch_uprobe_exception_notify(struct notifier_block *self,
unsigned long val, void *data)
{
int ret = NOTIFY_DONE;
struct die_args *args = (struct die_args *)data;
/* We are only interested in userspace traps */
if (args->regs && !user_mode(args->regs))
return NOTIFY_DONE;
switch (val) {
case DIE_BPT:
if (uprobe_pre_sstep_notifier(args->regs))
ret = NOTIFY_STOP;
break;
case DIE_SSTEP:
if (uprobe_post_sstep_notifier(args->regs))
ret = NOTIFY_STOP;
default:
break;
}
return ret;
}
/* This function gets called when a XOL instruction either gets
* trapped or the thread has a fatal signal, so reset the instruction
* pointer to its probed address.
*/
void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
{
struct uprobe_task *utask = current->utask;
instruction_pointer_set(regs, utask->vaddr);
}
/* If xol insn itself traps and generates a signal(Say,
* SIGILL/SIGSEGV/etc), then detect the case where a singlestepped
* instruction jumps back to its own address.
*/
bool arch_uprobe_xol_was_trapped(struct task_struct *t)
{
return false;
}
unsigned long
arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr,
struct pt_regs *regs)
{
unsigned long orig_ret_vaddr = regs->u_regs[UREG_I7];
regs->u_regs[UREG_I7] = trampoline_vaddr-8;
return orig_ret_vaddr + 8;
}
| linux-master | arch/sparc/kernel/uprobes.c |
// SPDX-License-Identifier: GPL-2.0
/* vio.c: Virtual I/O channel devices probing infrastructure.
*
* Copyright (c) 2003-2005 IBM Corp.
* Dave Engebretsen [email protected]
* Santiago Leon [email protected]
* Hollis Blanchard <[email protected]>
* Stephen Rothwell
*
* Adapted to sparc64 by David S. Miller [email protected]
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/irq.h>
#include <linux/export.h>
#include <linux/init.h>
#include <asm/mdesc.h>
#include <asm/vio.h>
static const struct vio_device_id *vio_match_device(
const struct vio_device_id *matches,
const struct vio_dev *dev)
{
const char *type, *compat;
int len;
type = dev->type;
compat = dev->compat;
len = dev->compat_len;
while (matches->type[0] || matches->compat[0]) {
int match = 1;
if (matches->type[0])
match &= !strcmp(matches->type, type);
if (matches->compat[0]) {
match &= len &&
of_find_in_proplist(compat, matches->compat, len);
}
if (match)
return matches;
matches++;
}
return NULL;
}
static int vio_hotplug(const struct device *dev, struct kobj_uevent_env *env)
{
const struct vio_dev *vio_dev = to_vio_dev(dev);
add_uevent_var(env, "MODALIAS=vio:T%sS%s", vio_dev->type, vio_dev->compat);
return 0;
}
static int vio_bus_match(struct device *dev, struct device_driver *drv)
{
struct vio_dev *vio_dev = to_vio_dev(dev);
struct vio_driver *vio_drv = to_vio_driver(drv);
const struct vio_device_id *matches = vio_drv->id_table;
if (!matches)
return 0;
return vio_match_device(matches, vio_dev) != NULL;
}
static int vio_device_probe(struct device *dev)
{
struct vio_dev *vdev = to_vio_dev(dev);
struct vio_driver *drv = to_vio_driver(dev->driver);
const struct vio_device_id *id;
if (!drv->probe)
return -ENODEV;
id = vio_match_device(drv->id_table, vdev);
if (!id)
return -ENODEV;
/* alloc irqs (unless the driver specified not to) */
if (!drv->no_irq) {
if (vdev->tx_irq == 0 && vdev->tx_ino != ~0UL)
vdev->tx_irq = sun4v_build_virq(vdev->cdev_handle,
vdev->tx_ino);
if (vdev->rx_irq == 0 && vdev->rx_ino != ~0UL)
vdev->rx_irq = sun4v_build_virq(vdev->cdev_handle,
vdev->rx_ino);
}
return drv->probe(vdev, id);
}
static void vio_device_remove(struct device *dev)
{
struct vio_dev *vdev = to_vio_dev(dev);
struct vio_driver *drv = to_vio_driver(dev->driver);
if (drv->remove) {
/*
* Ideally, we would remove/deallocate tx/rx virqs
* here - however, there are currently no support
* routines to do so at the moment. TBD
*/
drv->remove(vdev);
}
}
static ssize_t devspec_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct vio_dev *vdev = to_vio_dev(dev);
const char *str = "none";
if (!strcmp(vdev->type, "vnet-port"))
str = "vnet";
else if (!strcmp(vdev->type, "vdc-port"))
str = "vdisk";
return sprintf(buf, "%s\n", str);
}
static DEVICE_ATTR_RO(devspec);
static ssize_t type_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct vio_dev *vdev = to_vio_dev(dev);
return sprintf(buf, "%s\n", vdev->type);
}
static DEVICE_ATTR_RO(type);
static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
const struct vio_dev *vdev = to_vio_dev(dev);
return sprintf(buf, "vio:T%sS%s\n", vdev->type, vdev->compat);
}
static DEVICE_ATTR_RO(modalias);
static struct attribute *vio_dev_attrs[] = {
&dev_attr_devspec.attr,
&dev_attr_type.attr,
&dev_attr_modalias.attr,
NULL,
};
ATTRIBUTE_GROUPS(vio_dev);
static struct bus_type vio_bus_type = {
.name = "vio",
.dev_groups = vio_dev_groups,
.uevent = vio_hotplug,
.match = vio_bus_match,
.probe = vio_device_probe,
.remove = vio_device_remove,
};
int __vio_register_driver(struct vio_driver *viodrv, struct module *owner,
const char *mod_name)
{
viodrv->driver.bus = &vio_bus_type;
viodrv->driver.name = viodrv->name;
viodrv->driver.owner = owner;
viodrv->driver.mod_name = mod_name;
return driver_register(&viodrv->driver);
}
EXPORT_SYMBOL(__vio_register_driver);
void vio_unregister_driver(struct vio_driver *viodrv)
{
driver_unregister(&viodrv->driver);
}
EXPORT_SYMBOL(vio_unregister_driver);
static void vio_dev_release(struct device *dev)
{
kfree(to_vio_dev(dev));
}
static ssize_t
show_pciobppath_attr(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct vio_dev *vdev;
struct device_node *dp;
vdev = to_vio_dev(dev);
dp = vdev->dp;
return scnprintf(buf, PAGE_SIZE, "%pOF\n", dp);
}
static DEVICE_ATTR(obppath, S_IRUSR | S_IRGRP | S_IROTH,
show_pciobppath_attr, NULL);
static struct device_node *cdev_node;
static struct vio_dev *root_vdev;
static u64 cdev_cfg_handle;
static const u64 *vio_cfg_handle(struct mdesc_handle *hp, u64 node)
{
const u64 *cfg_handle = NULL;
u64 a;
mdesc_for_each_arc(a, hp, node, MDESC_ARC_TYPE_BACK) {
u64 target;
target = mdesc_arc_target(hp, a);
cfg_handle = mdesc_get_property(hp, target,
"cfg-handle", NULL);
if (cfg_handle)
break;
}
return cfg_handle;
}
/**
* vio_vdev_node() - Find VDEV node in MD
* @hp: Handle to the MD
* @vdev: Pointer to VDEV
*
* Find the node in the current MD which matches the given vio_dev. This
* must be done dynamically since the node value can change if the MD
* is updated.
*
* NOTE: the MD must be locked, using mdesc_grab(), when calling this routine
*
* Return: The VDEV node in MDESC
*/
u64 vio_vdev_node(struct mdesc_handle *hp, struct vio_dev *vdev)
{
u64 node;
if (vdev == NULL)
return MDESC_NODE_NULL;
node = mdesc_get_node(hp, (const char *)vdev->node_name,
&vdev->md_node_info);
return node;
}
EXPORT_SYMBOL(vio_vdev_node);
static void vio_fill_channel_info(struct mdesc_handle *hp, u64 mp,
struct vio_dev *vdev)
{
u64 a;
vdev->tx_ino = ~0UL;
vdev->rx_ino = ~0UL;
vdev->channel_id = ~0UL;
mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_FWD) {
const u64 *chan_id;
const u64 *irq;
u64 target;
target = mdesc_arc_target(hp, a);
irq = mdesc_get_property(hp, target, "tx-ino", NULL);
if (irq)
vdev->tx_ino = *irq;
irq = mdesc_get_property(hp, target, "rx-ino", NULL);
if (irq)
vdev->rx_ino = *irq;
chan_id = mdesc_get_property(hp, target, "id", NULL);
if (chan_id)
vdev->channel_id = *chan_id;
}
vdev->cdev_handle = cdev_cfg_handle;
}
int vio_set_intr(unsigned long dev_ino, int state)
{
int err;
err = sun4v_vintr_set_valid(cdev_cfg_handle, dev_ino, state);
return err;
}
EXPORT_SYMBOL(vio_set_intr);
static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp,
const char *node_name,
struct device *parent)
{
const char *type, *compat;
struct device_node *dp;
struct vio_dev *vdev;
int err, tlen, clen;
const u64 *id, *cfg_handle;
type = mdesc_get_property(hp, mp, "device-type", &tlen);
if (!type) {
type = mdesc_get_property(hp, mp, "name", &tlen);
if (!type) {
type = mdesc_node_name(hp, mp);
tlen = strlen(type) + 1;
}
}
if (tlen > VIO_MAX_TYPE_LEN || strlen(type) >= VIO_MAX_TYPE_LEN) {
printk(KERN_ERR "VIO: Type string [%s] is too long.\n",
type);
return NULL;
}
id = mdesc_get_property(hp, mp, "id", NULL);
cfg_handle = vio_cfg_handle(hp, mp);
compat = mdesc_get_property(hp, mp, "device-type", &clen);
if (!compat) {
clen = 0;
} else if (clen > VIO_MAX_COMPAT_LEN) {
printk(KERN_ERR "VIO: Compat len %d for [%s] is too long.\n",
clen, type);
return NULL;
}
vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
if (!vdev) {
printk(KERN_ERR "VIO: Could not allocate vio_dev\n");
return NULL;
}
vdev->mp = mp;
memcpy(vdev->type, type, tlen);
if (compat)
memcpy(vdev->compat, compat, clen);
else
memset(vdev->compat, 0, sizeof(vdev->compat));
vdev->compat_len = clen;
vdev->port_id = ~0UL;
vdev->tx_irq = 0;
vdev->rx_irq = 0;
vio_fill_channel_info(hp, mp, vdev);
if (!id) {
dev_set_name(&vdev->dev, "%s", type);
vdev->dev_no = ~(u64)0;
} else if (!cfg_handle) {
dev_set_name(&vdev->dev, "%s-%llu", type, *id);
vdev->dev_no = *id;
} else {
dev_set_name(&vdev->dev, "%s-%llu-%llu", type,
*cfg_handle, *id);
vdev->dev_no = *cfg_handle;
vdev->port_id = *id;
}
vdev->dev.parent = parent;
vdev->dev.bus = &vio_bus_type;
vdev->dev.release = vio_dev_release;
if (parent == NULL) {
dp = cdev_node;
} else if (to_vio_dev(parent) == root_vdev) {
for_each_child_of_node(cdev_node, dp) {
if (of_node_is_type(dp, type))
break;
}
} else {
dp = to_vio_dev(parent)->dp;
}
vdev->dp = dp;
/*
* node_name is NULL for the parent/channel-devices node and
* the parent doesn't require the MD node info.
*/
if (node_name != NULL) {
(void) snprintf(vdev->node_name, VIO_MAX_NAME_LEN, "%s",
node_name);
err = mdesc_get_node_info(hp, mp, node_name,
&vdev->md_node_info);
if (err) {
pr_err("VIO: Could not get MD node info %s, err=%d\n",
dev_name(&vdev->dev), err);
kfree(vdev);
return NULL;
}
}
pr_info("VIO: Adding device %s (tx_ino = %llx, rx_ino = %llx)\n",
dev_name(&vdev->dev), vdev->tx_ino, vdev->rx_ino);
err = device_register(&vdev->dev);
if (err) {
printk(KERN_ERR "VIO: Could not register device %s, err=%d\n",
dev_name(&vdev->dev), err);
put_device(&vdev->dev);
return NULL;
}
if (vdev->dp)
err = sysfs_create_file(&vdev->dev.kobj,
&dev_attr_obppath.attr);
return vdev;
}
static void vio_add(struct mdesc_handle *hp, u64 node,
const char *node_name)
{
(void) vio_create_one(hp, node, node_name, &root_vdev->dev);
}
struct vio_remove_node_data {
struct mdesc_handle *hp;
u64 node;
};
static int vio_md_node_match(struct device *dev, void *arg)
{
struct vio_dev *vdev = to_vio_dev(dev);
struct vio_remove_node_data *node_data;
u64 node;
node_data = (struct vio_remove_node_data *)arg;
node = vio_vdev_node(node_data->hp, vdev);
if (node == node_data->node)
return 1;
else
return 0;
}
static void vio_remove(struct mdesc_handle *hp, u64 node, const char *node_name)
{
struct vio_remove_node_data node_data;
struct device *dev;
node_data.hp = hp;
node_data.node = node;
dev = device_find_child(&root_vdev->dev, (void *)&node_data,
vio_md_node_match);
if (dev) {
printk(KERN_INFO "VIO: Removing device %s\n", dev_name(dev));
device_unregister(dev);
put_device(dev);
} else {
pr_err("VIO: %s node not found in MDESC\n", node_name);
}
}
static struct mdesc_notifier_client vio_device_notifier = {
.add = vio_add,
.remove = vio_remove,
.node_name = "virtual-device-port",
};
/* We are only interested in domain service ports under the
* "domain-services" node. On control nodes there is another port
* under "openboot" that we should not mess with as aparently that is
* reserved exclusively for OBP use.
*/
static void vio_add_ds(struct mdesc_handle *hp, u64 node,
const char *node_name)
{
int found;
u64 a;
found = 0;
mdesc_for_each_arc(a, hp, node, MDESC_ARC_TYPE_BACK) {
u64 target = mdesc_arc_target(hp, a);
const char *name = mdesc_node_name(hp, target);
if (!strcmp(name, "domain-services")) {
found = 1;
break;
}
}
if (found)
(void) vio_create_one(hp, node, node_name, &root_vdev->dev);
}
static struct mdesc_notifier_client vio_ds_notifier = {
.add = vio_add_ds,
.remove = vio_remove,
.node_name = "domain-services-port",
};
static const char *channel_devices_node = "channel-devices";
static const char *channel_devices_compat = "SUNW,sun4v-channel-devices";
static const char *cfg_handle_prop = "cfg-handle";
static int __init vio_init(void)
{
struct mdesc_handle *hp;
const char *compat;
const u64 *cfg_handle;
int err, len;
u64 root;
err = bus_register(&vio_bus_type);
if (err) {
printk(KERN_ERR "VIO: Could not register bus type err=%d\n",
err);
return err;
}
hp = mdesc_grab();
if (!hp)
return 0;
root = mdesc_node_by_name(hp, MDESC_NODE_NULL, channel_devices_node);
if (root == MDESC_NODE_NULL) {
printk(KERN_INFO "VIO: No channel-devices MDESC node.\n");
mdesc_release(hp);
return 0;
}
cdev_node = of_find_node_by_name(NULL, "channel-devices");
err = -ENODEV;
if (!cdev_node) {
printk(KERN_INFO "VIO: No channel-devices OBP node.\n");
goto out_release;
}
compat = mdesc_get_property(hp, root, "compatible", &len);
if (!compat) {
printk(KERN_ERR "VIO: Channel devices lacks compatible "
"property\n");
goto out_release;
}
if (!of_find_in_proplist(compat, channel_devices_compat, len)) {
printk(KERN_ERR "VIO: Channel devices node lacks (%s) "
"compat entry.\n", channel_devices_compat);
goto out_release;
}
cfg_handle = mdesc_get_property(hp, root, cfg_handle_prop, NULL);
if (!cfg_handle) {
printk(KERN_ERR "VIO: Channel devices lacks %s property\n",
cfg_handle_prop);
goto out_release;
}
cdev_cfg_handle = *cfg_handle;
root_vdev = vio_create_one(hp, root, NULL, NULL);
err = -ENODEV;
if (!root_vdev) {
printk(KERN_ERR "VIO: Could not create root device.\n");
goto out_release;
}
mdesc_register_notifier(&vio_device_notifier);
mdesc_register_notifier(&vio_ds_notifier);
mdesc_release(hp);
return err;
out_release:
mdesc_release(hp);
return err;
}
postcore_initcall(vio_init);
| linux-master | arch/sparc/kernel/vio.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/stacktrace.h>
#include <linux/thread_info.h>
#include <linux/ftrace.h>
#include <linux/export.h>
#include <asm/ptrace.h>
#include <asm/stacktrace.h>
#include "kstack.h"
static void __save_stack_trace(struct thread_info *tp,
struct stack_trace *trace,
bool skip_sched)
{
unsigned long ksp, fp;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
struct task_struct *t;
int graph = 0;
#endif
if (tp == current_thread_info()) {
stack_trace_flush();
__asm__ __volatile__("mov %%fp, %0" : "=r" (ksp));
} else {
ksp = tp->ksp;
}
fp = ksp + STACK_BIAS;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
t = tp->task;
#endif
do {
struct sparc_stackf *sf;
struct pt_regs *regs;
unsigned long pc;
if (!kstack_valid(tp, fp))
break;
sf = (struct sparc_stackf *) fp;
regs = (struct pt_regs *) (sf + 1);
if (kstack_is_trap_frame(tp, regs)) {
if (!(regs->tstate & TSTATE_PRIV))
break;
pc = regs->tpc;
fp = regs->u_regs[UREG_I6] + STACK_BIAS;
} else {
pc = sf->callers_pc;
fp = (unsigned long)sf->fp + STACK_BIAS;
}
if (trace->skip > 0)
trace->skip--;
else if (!skip_sched || !in_sched_functions(pc)) {
trace->entries[trace->nr_entries++] = pc;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
if ((pc + 8UL) == (unsigned long) &return_to_handler) {
struct ftrace_ret_stack *ret_stack;
ret_stack = ftrace_graph_get_ret_stack(t,
graph);
if (ret_stack) {
pc = ret_stack->ret;
if (trace->nr_entries <
trace->max_entries)
trace->entries[trace->nr_entries++] = pc;
graph++;
}
}
#endif
}
} while (trace->nr_entries < trace->max_entries);
}
void save_stack_trace(struct stack_trace *trace)
{
__save_stack_trace(current_thread_info(), trace, false);
}
EXPORT_SYMBOL_GPL(save_stack_trace);
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{
struct thread_info *tp = task_thread_info(tsk);
__save_stack_trace(tp, trace, true);
}
EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
| linux-master | arch/sparc/kernel/stacktrace.c |
// SPDX-License-Identifier: GPL-2.0
/*
* unaligned.c: Unaligned load/store trap handling with special
* cases for the kernel to do them more quickly.
*
* Copyright (C) 1996,2008 David S. Miller ([email protected])
* Copyright (C) 1996,1997 Jakub Jelinek ([email protected])
*/
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/extable.h>
#include <asm/asi.h>
#include <asm/ptrace.h>
#include <asm/pstate.h>
#include <asm/processor.h>
#include <linux/uaccess.h>
#include <linux/smp.h>
#include <linux/bitops.h>
#include <linux/perf_event.h>
#include <linux/ratelimit.h>
#include <linux/context_tracking.h>
#include <asm/fpumacro.h>
#include <asm/cacheflush.h>
#include <asm/setup.h>
#include "entry.h"
#include "kernel.h"
enum direction {
load, /* ld, ldd, ldh, ldsh */
store, /* st, std, sth, stsh */
both, /* Swap, ldstub, cas, ... */
fpld,
fpst,
invalid,
};
static inline enum direction decode_direction(unsigned int insn)
{
unsigned long tmp = (insn >> 21) & 1;
if (!tmp)
return load;
else {
switch ((insn>>19)&0xf) {
case 15: /* swap* */
return both;
default:
return store;
}
}
}
/* 16 = double-word, 8 = extra-word, 4 = word, 2 = half-word */
static inline int decode_access_size(struct pt_regs *regs, unsigned int insn)
{
unsigned int tmp;
tmp = ((insn >> 19) & 0xf);
if (tmp == 11 || tmp == 14) /* ldx/stx */
return 8;
tmp &= 3;
if (!tmp)
return 4;
else if (tmp == 3)
return 16; /* ldd/std - Although it is actually 8 */
else if (tmp == 2)
return 2;
else {
printk("Impossible unaligned trap. insn=%08x\n", insn);
die_if_kernel("Byte sized unaligned access?!?!", regs);
/* GCC should never warn that control reaches the end
* of this function without returning a value because
* die_if_kernel() is marked with attribute 'noreturn'.
* Alas, some versions do...
*/
return 0;
}
}
static inline int decode_asi(unsigned int insn, struct pt_regs *regs)
{
if (insn & 0x800000) {
if (insn & 0x2000)
return (unsigned char)(regs->tstate >> 24); /* %asi */
else
return (unsigned char)(insn >> 5); /* imm_asi */
} else
return ASI_P;
}
/* 0x400000 = signed, 0 = unsigned */
static inline int decode_signedness(unsigned int insn)
{
return (insn & 0x400000);
}
static inline void maybe_flush_windows(unsigned int rs1, unsigned int rs2,
unsigned int rd, int from_kernel)
{
if (rs2 >= 16 || rs1 >= 16 || rd >= 16) {
if (from_kernel != 0)
__asm__ __volatile__("flushw");
else
flushw_user();
}
}
static inline long sign_extend_imm13(long imm)
{
return imm << 51 >> 51;
}
static unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs)
{
unsigned long value, fp;
if (reg < 16)
return (!reg ? 0 : regs->u_regs[reg]);
fp = regs->u_regs[UREG_FP];
if (regs->tstate & TSTATE_PRIV) {
struct reg_window *win;
win = (struct reg_window *)(fp + STACK_BIAS);
value = win->locals[reg - 16];
} else if (!test_thread_64bit_stack(fp)) {
struct reg_window32 __user *win32;
win32 = (struct reg_window32 __user *)((unsigned long)((u32)fp));
get_user(value, &win32->locals[reg - 16]);
} else {
struct reg_window __user *win;
win = (struct reg_window __user *)(fp + STACK_BIAS);
get_user(value, &win->locals[reg - 16]);
}
return value;
}
static unsigned long *fetch_reg_addr(unsigned int reg, struct pt_regs *regs)
{
unsigned long fp;
if (reg < 16)
return ®s->u_regs[reg];
fp = regs->u_regs[UREG_FP];
if (regs->tstate & TSTATE_PRIV) {
struct reg_window *win;
win = (struct reg_window *)(fp + STACK_BIAS);
return &win->locals[reg - 16];
} else if (!test_thread_64bit_stack(fp)) {
struct reg_window32 *win32;
win32 = (struct reg_window32 *)((unsigned long)((u32)fp));
return (unsigned long *)&win32->locals[reg - 16];
} else {
struct reg_window *win;
win = (struct reg_window *)(fp + STACK_BIAS);
return &win->locals[reg - 16];
}
}
unsigned long compute_effective_address(struct pt_regs *regs,
unsigned int insn, unsigned int rd)
{
int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
unsigned int rs1 = (insn >> 14) & 0x1f;
unsigned int rs2 = insn & 0x1f;
unsigned long addr;
if (insn & 0x2000) {
maybe_flush_windows(rs1, 0, rd, from_kernel);
addr = (fetch_reg(rs1, regs) + sign_extend_imm13(insn));
} else {
maybe_flush_windows(rs1, rs2, rd, from_kernel);
addr = (fetch_reg(rs1, regs) + fetch_reg(rs2, regs));
}
if (!from_kernel && test_thread_flag(TIF_32BIT))
addr &= 0xffffffff;
return addr;
}
/* This is just to make gcc think die_if_kernel does return... */
static void __used unaligned_panic(char *str, struct pt_regs *regs)
{
die_if_kernel(str, regs);
}
extern int do_int_load(unsigned long *dest_reg, int size,
unsigned long *saddr, int is_signed, int asi);
extern int __do_int_store(unsigned long *dst_addr, int size,
unsigned long src_val, int asi);
static inline int do_int_store(int reg_num, int size, unsigned long *dst_addr,
struct pt_regs *regs, int asi, int orig_asi)
{
unsigned long zero = 0;
unsigned long *src_val_p = &zero;
unsigned long src_val;
if (size == 16) {
size = 8;
zero = (((long)(reg_num ?
(unsigned int)fetch_reg(reg_num, regs) : 0)) << 32) |
(unsigned int)fetch_reg(reg_num + 1, regs);
} else if (reg_num) {
src_val_p = fetch_reg_addr(reg_num, regs);
}
src_val = *src_val_p;
if (unlikely(asi != orig_asi)) {
switch (size) {
case 2:
src_val = swab16(src_val);
break;
case 4:
src_val = swab32(src_val);
break;
case 8:
src_val = swab64(src_val);
break;
case 16:
default:
BUG();
break;
}
}
return __do_int_store(dst_addr, size, src_val, asi);
}
static inline void advance(struct pt_regs *regs)
{
regs->tpc = regs->tnpc;
regs->tnpc += 4;
if (test_thread_flag(TIF_32BIT)) {
regs->tpc &= 0xffffffff;
regs->tnpc &= 0xffffffff;
}
}
static inline int floating_point_load_or_store_p(unsigned int insn)
{
return (insn >> 24) & 1;
}
static inline int ok_for_kernel(unsigned int insn)
{
return !floating_point_load_or_store_p(insn);
}
static void kernel_mna_trap_fault(int fixup_tstate_asi)
{
struct pt_regs *regs = current_thread_info()->kern_una_regs;
unsigned int insn = current_thread_info()->kern_una_insn;
const struct exception_table_entry *entry;
entry = search_exception_tables(regs->tpc);
if (!entry) {
unsigned long address;
address = compute_effective_address(regs, insn,
((insn >> 25) & 0x1f));
if (address < PAGE_SIZE) {
printk(KERN_ALERT "Unable to handle kernel NULL "
"pointer dereference in mna handler");
} else
printk(KERN_ALERT "Unable to handle kernel paging "
"request in mna handler");
printk(KERN_ALERT " at virtual address %016lx\n",address);
printk(KERN_ALERT "current->{active_,}mm->context = %016lx\n",
(current->mm ? CTX_HWBITS(current->mm->context) :
CTX_HWBITS(current->active_mm->context)));
printk(KERN_ALERT "current->{active_,}mm->pgd = %016lx\n",
(current->mm ? (unsigned long) current->mm->pgd :
(unsigned long) current->active_mm->pgd));
die_if_kernel("Oops", regs);
/* Not reached */
}
regs->tpc = entry->fixup;
regs->tnpc = regs->tpc + 4;
if (fixup_tstate_asi) {
regs->tstate &= ~TSTATE_ASI;
regs->tstate |= (ASI_AIUS << 24UL);
}
}
static void log_unaligned(struct pt_regs *regs)
{
static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
if (__ratelimit(&ratelimit)) {
printk("Kernel unaligned access at TPC[%lx] %pS\n",
regs->tpc, (void *) regs->tpc);
}
}
asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
{
enum direction dir = decode_direction(insn);
int size = decode_access_size(regs, insn);
int orig_asi, asi;
current_thread_info()->kern_una_regs = regs;
current_thread_info()->kern_una_insn = insn;
orig_asi = asi = decode_asi(insn, regs);
/* If this is a {get,put}_user() on an unaligned userspace pointer,
* just signal a fault and do not log the event.
*/
if (asi == ASI_AIUS) {
kernel_mna_trap_fault(0);
return;
}
log_unaligned(regs);
if (!ok_for_kernel(insn) || dir == both) {
printk("Unsupported unaligned load/store trap for kernel "
"at <%016lx>.\n", regs->tpc);
unaligned_panic("Kernel does fpu/atomic "
"unaligned load/store.", regs);
kernel_mna_trap_fault(0);
} else {
unsigned long addr, *reg_addr;
int err;
addr = compute_effective_address(regs, insn,
((insn >> 25) & 0x1f));
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr);
switch (asi) {
case ASI_NL:
case ASI_AIUPL:
case ASI_AIUSL:
case ASI_PL:
case ASI_SL:
case ASI_PNFL:
case ASI_SNFL:
asi &= ~0x08;
break;
}
switch (dir) {
case load:
reg_addr = fetch_reg_addr(((insn>>25)&0x1f), regs);
err = do_int_load(reg_addr, size,
(unsigned long *) addr,
decode_signedness(insn), asi);
if (likely(!err) && unlikely(asi != orig_asi)) {
unsigned long val_in = *reg_addr;
switch (size) {
case 2:
val_in = swab16(val_in);
break;
case 4:
val_in = swab32(val_in);
break;
case 8:
val_in = swab64(val_in);
break;
case 16:
default:
BUG();
break;
}
*reg_addr = val_in;
}
break;
case store:
err = do_int_store(((insn>>25)&0x1f), size,
(unsigned long *) addr, regs,
asi, orig_asi);
break;
default:
panic("Impossible kernel unaligned trap.");
/* Not reached... */
}
if (unlikely(err))
kernel_mna_trap_fault(1);
else
advance(regs);
}
}
int handle_popc(u32 insn, struct pt_regs *regs)
{
int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
int ret, rd = ((insn >> 25) & 0x1f);
u64 value;
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
if (insn & 0x2000) {
maybe_flush_windows(0, 0, rd, from_kernel);
value = sign_extend_imm13(insn);
} else {
maybe_flush_windows(0, insn & 0x1f, rd, from_kernel);
value = fetch_reg(insn & 0x1f, regs);
}
ret = hweight64(value);
if (rd < 16) {
if (rd)
regs->u_regs[rd] = ret;
} else {
unsigned long fp = regs->u_regs[UREG_FP];
if (!test_thread_64bit_stack(fp)) {
struct reg_window32 __user *win32;
win32 = (struct reg_window32 __user *)((unsigned long)((u32)fp));
put_user(ret, &win32->locals[rd - 16]);
} else {
struct reg_window __user *win;
win = (struct reg_window __user *)(fp + STACK_BIAS);
put_user(ret, &win->locals[rd - 16]);
}
}
advance(regs);
return 1;
}
extern void do_fpother(struct pt_regs *regs);
extern void do_privact(struct pt_regs *regs);
extern void sun4v_data_access_exception(struct pt_regs *regs,
unsigned long addr,
unsigned long type_ctx);
int handle_ldf_stq(u32 insn, struct pt_regs *regs)
{
unsigned long addr = compute_effective_address(regs, insn, 0);
int freg;
struct fpustate *f = FPUSTATE;
int asi = decode_asi(insn, regs);
int flag;
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
save_and_clear_fpu();
current_thread_info()->xfsr[0] &= ~0x1c000;
if (insn & 0x200000) {
/* STQ */
u64 first = 0, second = 0;
freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
flag = (freg < 32) ? FPRS_DL : FPRS_DU;
if (freg & 3) {
current_thread_info()->xfsr[0] |= (6 << 14) /* invalid_fp_register */;
do_fpother(regs);
return 0;
}
if (current_thread_info()->fpsaved[0] & flag) {
first = *(u64 *)&f->regs[freg];
second = *(u64 *)&f->regs[freg+2];
}
if (asi < 0x80) {
do_privact(regs);
return 1;
}
switch (asi) {
case ASI_P:
case ASI_S: break;
case ASI_PL:
case ASI_SL:
{
/* Need to convert endians */
u64 tmp = __swab64p(&first);
first = __swab64p(&second);
second = tmp;
break;
}
default:
if (tlb_type == hypervisor)
sun4v_data_access_exception(regs, addr, 0);
else
spitfire_data_access_exception(regs, 0, addr);
return 1;
}
if (put_user (first >> 32, (u32 __user *)addr) ||
__put_user ((u32)first, (u32 __user *)(addr + 4)) ||
__put_user (second >> 32, (u32 __user *)(addr + 8)) ||
__put_user ((u32)second, (u32 __user *)(addr + 12))) {
if (tlb_type == hypervisor)
sun4v_data_access_exception(regs, addr, 0);
else
spitfire_data_access_exception(regs, 0, addr);
return 1;
}
} else {
/* LDF, LDDF, LDQF */
u32 data[4] __attribute__ ((aligned(8)));
int size, i;
int err;
if (asi < 0x80) {
do_privact(regs);
return 1;
} else if (asi > ASI_SNFL) {
if (tlb_type == hypervisor)
sun4v_data_access_exception(regs, addr, 0);
else
spitfire_data_access_exception(regs, 0, addr);
return 1;
}
switch (insn & 0x180000) {
case 0x000000: size = 1; break;
case 0x100000: size = 4; break;
default: size = 2; break;
}
if (size == 1)
freg = (insn >> 25) & 0x1f;
else
freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
flag = (freg < 32) ? FPRS_DL : FPRS_DU;
for (i = 0; i < size; i++)
data[i] = 0;
err = get_user (data[0], (u32 __user *) addr);
if (!err) {
for (i = 1; i < size; i++)
err |= __get_user (data[i], (u32 __user *)(addr + 4*i));
}
if (err && !(asi & 0x2 /* NF */)) {
if (tlb_type == hypervisor)
sun4v_data_access_exception(regs, addr, 0);
else
spitfire_data_access_exception(regs, 0, addr);
return 1;
}
if (asi & 0x8) /* Little */ {
u64 tmp;
switch (size) {
case 1: data[0] = le32_to_cpup(data + 0); break;
default:*(u64 *)(data + 0) = le64_to_cpup((u64 *)(data + 0));
break;
case 4: tmp = le64_to_cpup((u64 *)(data + 0));
*(u64 *)(data + 0) = le64_to_cpup((u64 *)(data + 2));
*(u64 *)(data + 2) = tmp;
break;
}
}
if (!(current_thread_info()->fpsaved[0] & FPRS_FEF)) {
current_thread_info()->fpsaved[0] = FPRS_FEF;
current_thread_info()->gsr[0] = 0;
}
if (!(current_thread_info()->fpsaved[0] & flag)) {
if (freg < 32)
memset(f->regs, 0, 32*sizeof(u32));
else
memset(f->regs+32, 0, 32*sizeof(u32));
}
memcpy(f->regs + freg, data, size * 4);
current_thread_info()->fpsaved[0] |= flag;
}
advance(regs);
return 1;
}
void handle_ld_nf(u32 insn, struct pt_regs *regs)
{
int rd = ((insn >> 25) & 0x1f);
int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
unsigned long *reg;
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
maybe_flush_windows(0, 0, rd, from_kernel);
reg = fetch_reg_addr(rd, regs);
if (from_kernel || rd < 16) {
reg[0] = 0;
if ((insn & 0x780000) == 0x180000)
reg[1] = 0;
} else if (!test_thread_64bit_stack(regs->u_regs[UREG_FP])) {
put_user(0, (int __user *) reg);
if ((insn & 0x780000) == 0x180000)
put_user(0, ((int __user *) reg) + 1);
} else {
put_user(0, (unsigned long __user *) reg);
if ((insn & 0x780000) == 0x180000)
put_user(0, (unsigned long __user *) reg + 1);
}
advance(regs);
}
void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
{
enum ctx_state prev_state = exception_enter();
unsigned long pc = regs->tpc;
unsigned long tstate = regs->tstate;
u32 insn;
u64 value;
u8 freg;
int flag;
struct fpustate *f = FPUSTATE;
if (tstate & TSTATE_PRIV)
die_if_kernel("lddfmna from kernel", regs);
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, sfar);
if (test_thread_flag(TIF_32BIT))
pc = (u32)pc;
if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
int asi = decode_asi(insn, regs);
u32 first, second;
int err;
if ((asi > ASI_SNFL) ||
(asi < ASI_P))
goto daex;
first = second = 0;
err = get_user(first, (u32 __user *)sfar);
if (!err)
err = get_user(second, (u32 __user *)(sfar + 4));
if (err) {
if (!(asi & 0x2))
goto daex;
first = second = 0;
}
save_and_clear_fpu();
freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
value = (((u64)first) << 32) | second;
if (asi & 0x8) /* Little */
value = __swab64p(&value);
flag = (freg < 32) ? FPRS_DL : FPRS_DU;
if (!(current_thread_info()->fpsaved[0] & FPRS_FEF)) {
current_thread_info()->fpsaved[0] = FPRS_FEF;
current_thread_info()->gsr[0] = 0;
}
if (!(current_thread_info()->fpsaved[0] & flag)) {
if (freg < 32)
memset(f->regs, 0, 32*sizeof(u32));
else
memset(f->regs+32, 0, 32*sizeof(u32));
}
*(u64 *)(f->regs + freg) = value;
current_thread_info()->fpsaved[0] |= flag;
} else {
daex:
if (tlb_type == hypervisor)
sun4v_data_access_exception(regs, sfar, sfsr);
else
spitfire_data_access_exception(regs, sfsr, sfar);
goto out;
}
advance(regs);
out:
exception_exit(prev_state);
}
void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
{
enum ctx_state prev_state = exception_enter();
unsigned long pc = regs->tpc;
unsigned long tstate = regs->tstate;
u32 insn;
u64 value;
u8 freg;
int flag;
struct fpustate *f = FPUSTATE;
if (tstate & TSTATE_PRIV)
die_if_kernel("stdfmna from kernel", regs);
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, sfar);
if (test_thread_flag(TIF_32BIT))
pc = (u32)pc;
if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
int asi = decode_asi(insn, regs);
freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
value = 0;
flag = (freg < 32) ? FPRS_DL : FPRS_DU;
if ((asi > ASI_SNFL) ||
(asi < ASI_P))
goto daex;
save_and_clear_fpu();
if (current_thread_info()->fpsaved[0] & flag)
value = *(u64 *)&f->regs[freg];
switch (asi) {
case ASI_P:
case ASI_S: break;
case ASI_PL:
case ASI_SL:
value = __swab64p(&value); break;
default: goto daex;
}
if (put_user (value >> 32, (u32 __user *) sfar) ||
__put_user ((u32)value, (u32 __user *)(sfar + 4)))
goto daex;
} else {
daex:
if (tlb_type == hypervisor)
sun4v_data_access_exception(regs, sfar, sfsr);
else
spitfire_data_access_exception(regs, sfsr, sfar);
goto out;
}
advance(regs);
out:
exception_exit(prev_state);
}
| linux-master | arch/sparc/kernel/unaligned_64.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sparc64/kernel/signal.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright (C) 1995, 2008 David S. Miller ([email protected])
* Copyright (C) 1996 Miguel de Icaza ([email protected])
* Copyright (C) 1997 Eddie C. Dost ([email protected])
* Copyright (C) 1997,1998 Jakub Jelinek ([email protected])
*/
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/errno.h>
#include <linux/wait.h>
#include <linux/ptrace.h>
#include <linux/resume_user_mode.h>
#include <linux/unistd.h>
#include <linux/mm.h>
#include <linux/tty.h>
#include <linux/binfmts.h>
#include <linux/bitops.h>
#include <linux/context_tracking.h>
#include <linux/uaccess.h>
#include <asm/ptrace.h>
#include <asm/fpumacro.h>
#include <asm/uctx.h>
#include <asm/siginfo.h>
#include <asm/visasm.h>
#include <asm/switch_to.h>
#include <asm/cacheflush.h>
#include "sigutil.h"
#include "systbls.h"
#include "kernel.h"
#include "entry.h"
/* {set, get}context() needed for 64-bit SparcLinux userland. */
asmlinkage void sparc64_set_context(struct pt_regs *regs)
{
struct ucontext __user *ucp = (struct ucontext __user *)
regs->u_regs[UREG_I0];
enum ctx_state prev_state = exception_enter();
mc_gregset_t __user *grp;
unsigned long pc, npc, tstate;
unsigned long fp, i7;
unsigned char fenab;
int err;
synchronize_user_stack();
if (get_thread_wsaved() ||
(((unsigned long)ucp) & (sizeof(unsigned long)-1)) ||
(!__access_ok(ucp, sizeof(*ucp))))
goto do_sigsegv;
grp = &ucp->uc_mcontext.mc_gregs;
err = __get_user(pc, &((*grp)[MC_PC]));
err |= __get_user(npc, &((*grp)[MC_NPC]));
if (err || ((pc | npc) & 3))
goto do_sigsegv;
if (regs->u_regs[UREG_I1]) {
sigset_t set;
if (_NSIG_WORDS == 1) {
if (__get_user(set.sig[0], &ucp->uc_sigmask.sig[0]))
goto do_sigsegv;
} else {
if (__copy_from_user(&set, &ucp->uc_sigmask, sizeof(sigset_t)))
goto do_sigsegv;
}
set_current_blocked(&set);
}
if (test_thread_flag(TIF_32BIT)) {
pc &= 0xffffffff;
npc &= 0xffffffff;
}
regs->tpc = pc;
regs->tnpc = npc;
err |= __get_user(regs->y, &((*grp)[MC_Y]));
err |= __get_user(tstate, &((*grp)[MC_TSTATE]));
regs->tstate &= ~(TSTATE_ASI | TSTATE_ICC | TSTATE_XCC);
regs->tstate |= (tstate & (TSTATE_ASI | TSTATE_ICC | TSTATE_XCC));
err |= __get_user(regs->u_regs[UREG_G1], (&(*grp)[MC_G1]));
err |= __get_user(regs->u_regs[UREG_G2], (&(*grp)[MC_G2]));
err |= __get_user(regs->u_regs[UREG_G3], (&(*grp)[MC_G3]));
err |= __get_user(regs->u_regs[UREG_G4], (&(*grp)[MC_G4]));
err |= __get_user(regs->u_regs[UREG_G5], (&(*grp)[MC_G5]));
err |= __get_user(regs->u_regs[UREG_G6], (&(*grp)[MC_G6]));
/* Skip %g7 as that's the thread register in userspace. */
err |= __get_user(regs->u_regs[UREG_I0], (&(*grp)[MC_O0]));
err |= __get_user(regs->u_regs[UREG_I1], (&(*grp)[MC_O1]));
err |= __get_user(regs->u_regs[UREG_I2], (&(*grp)[MC_O2]));
err |= __get_user(regs->u_regs[UREG_I3], (&(*grp)[MC_O3]));
err |= __get_user(regs->u_regs[UREG_I4], (&(*grp)[MC_O4]));
err |= __get_user(regs->u_regs[UREG_I5], (&(*grp)[MC_O5]));
err |= __get_user(regs->u_regs[UREG_I6], (&(*grp)[MC_O6]));
err |= __get_user(regs->u_regs[UREG_I7], (&(*grp)[MC_O7]));
err |= __get_user(fp, &(ucp->uc_mcontext.mc_fp));
err |= __get_user(i7, &(ucp->uc_mcontext.mc_i7));
err |= __put_user(fp,
(&(((struct reg_window __user *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[6])));
err |= __put_user(i7,
(&(((struct reg_window __user *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[7])));
err |= __get_user(fenab, &(ucp->uc_mcontext.mc_fpregs.mcfpu_enab));
if (fenab) {
unsigned long *fpregs = current_thread_info()->fpregs;
unsigned long fprs;
fprs_write(0);
err |= __get_user(fprs, &(ucp->uc_mcontext.mc_fpregs.mcfpu_fprs));
if (fprs & FPRS_DL)
err |= copy_from_user(fpregs,
&(ucp->uc_mcontext.mc_fpregs.mcfpu_fregs),
(sizeof(unsigned int) * 32));
if (fprs & FPRS_DU)
err |= copy_from_user(fpregs+16,
((unsigned long __user *)&(ucp->uc_mcontext.mc_fpregs.mcfpu_fregs))+16,
(sizeof(unsigned int) * 32));
err |= __get_user(current_thread_info()->xfsr[0],
&(ucp->uc_mcontext.mc_fpregs.mcfpu_fsr));
err |= __get_user(current_thread_info()->gsr[0],
&(ucp->uc_mcontext.mc_fpregs.mcfpu_gsr));
regs->tstate &= ~TSTATE_PEF;
}
if (err)
goto do_sigsegv;
out:
exception_exit(prev_state);
return;
do_sigsegv:
force_sig(SIGSEGV);
goto out;
}
asmlinkage void sparc64_get_context(struct pt_regs *regs)
{
struct ucontext __user *ucp = (struct ucontext __user *)
regs->u_regs[UREG_I0];
enum ctx_state prev_state = exception_enter();
mc_gregset_t __user *grp;
mcontext_t __user *mcp;
unsigned long fp, i7;
unsigned char fenab;
int err;
synchronize_user_stack();
if (get_thread_wsaved() || clear_user(ucp, sizeof(*ucp)))
goto do_sigsegv;
#if 1
fenab = 0; /* IMO get_context is like any other system call, thus modifies FPU state -jj */
#else
fenab = (current_thread_info()->fpsaved[0] & FPRS_FEF);
#endif
mcp = &ucp->uc_mcontext;
grp = &mcp->mc_gregs;
/* Skip over the trap instruction, first. */
if (test_thread_flag(TIF_32BIT)) {
regs->tpc = (regs->tnpc & 0xffffffff);
regs->tnpc = (regs->tnpc + 4) & 0xffffffff;
} else {
regs->tpc = regs->tnpc;
regs->tnpc += 4;
}
err = 0;
if (_NSIG_WORDS == 1)
err |= __put_user(current->blocked.sig[0],
(unsigned long __user *)&ucp->uc_sigmask);
else
err |= __copy_to_user(&ucp->uc_sigmask, ¤t->blocked,
sizeof(sigset_t));
err |= __put_user(regs->tstate, &((*grp)[MC_TSTATE]));
err |= __put_user(regs->tpc, &((*grp)[MC_PC]));
err |= __put_user(regs->tnpc, &((*grp)[MC_NPC]));
err |= __put_user(regs->y, &((*grp)[MC_Y]));
err |= __put_user(regs->u_regs[UREG_G1], &((*grp)[MC_G1]));
err |= __put_user(regs->u_regs[UREG_G2], &((*grp)[MC_G2]));
err |= __put_user(regs->u_regs[UREG_G3], &((*grp)[MC_G3]));
err |= __put_user(regs->u_regs[UREG_G4], &((*grp)[MC_G4]));
err |= __put_user(regs->u_regs[UREG_G5], &((*grp)[MC_G5]));
err |= __put_user(regs->u_regs[UREG_G6], &((*grp)[MC_G6]));
err |= __put_user(regs->u_regs[UREG_G7], &((*grp)[MC_G7]));
err |= __put_user(regs->u_regs[UREG_I0], &((*grp)[MC_O0]));
err |= __put_user(regs->u_regs[UREG_I1], &((*grp)[MC_O1]));
err |= __put_user(regs->u_regs[UREG_I2], &((*grp)[MC_O2]));
err |= __put_user(regs->u_regs[UREG_I3], &((*grp)[MC_O3]));
err |= __put_user(regs->u_regs[UREG_I4], &((*grp)[MC_O4]));
err |= __put_user(regs->u_regs[UREG_I5], &((*grp)[MC_O5]));
err |= __put_user(regs->u_regs[UREG_I6], &((*grp)[MC_O6]));
err |= __put_user(regs->u_regs[UREG_I7], &((*grp)[MC_O7]));
err |= __get_user(fp,
(&(((struct reg_window __user *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[6])));
err |= __get_user(i7,
(&(((struct reg_window __user *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[7])));
err |= __put_user(fp, &(mcp->mc_fp));
err |= __put_user(i7, &(mcp->mc_i7));
err |= __put_user(fenab, &(mcp->mc_fpregs.mcfpu_enab));
if (fenab) {
unsigned long *fpregs = current_thread_info()->fpregs;
unsigned long fprs;
fprs = current_thread_info()->fpsaved[0];
if (fprs & FPRS_DL)
err |= copy_to_user(&(mcp->mc_fpregs.mcfpu_fregs), fpregs,
(sizeof(unsigned int) * 32));
if (fprs & FPRS_DU)
err |= copy_to_user(
((unsigned long __user *)&(mcp->mc_fpregs.mcfpu_fregs))+16, fpregs+16,
(sizeof(unsigned int) * 32));
err |= __put_user(current_thread_info()->xfsr[0], &(mcp->mc_fpregs.mcfpu_fsr));
err |= __put_user(current_thread_info()->gsr[0], &(mcp->mc_fpregs.mcfpu_gsr));
err |= __put_user(fprs, &(mcp->mc_fpregs.mcfpu_fprs));
}
if (err)
goto do_sigsegv;
out:
exception_exit(prev_state);
return;
do_sigsegv:
force_sig(SIGSEGV);
goto out;
}
/* Checks if the fp is valid. We always build rt signal frames which
* are 16-byte aligned, therefore we can always enforce that the
* restore frame has that property as well.
*/
static bool invalid_frame_pointer(void __user *fp)
{
if (((unsigned long) fp) & 15)
return true;
return false;
}
struct rt_signal_frame {
struct sparc_stackf ss;
siginfo_t info;
struct pt_regs regs;
__siginfo_fpu_t __user *fpu_save;
stack_t stack;
sigset_t mask;
__siginfo_rwin_t *rwin_save;
};
void do_rt_sigreturn(struct pt_regs *regs)
{
unsigned long tpc, tnpc, tstate, ufp;
struct rt_signal_frame __user *sf;
__siginfo_fpu_t __user *fpu_save;
__siginfo_rwin_t __user *rwin_save;
sigset_t set;
int err;
/* Always make any pending restarted system calls return -EINTR */
current->restart_block.fn = do_no_restart_syscall;
synchronize_user_stack ();
sf = (struct rt_signal_frame __user *)
(regs->u_regs [UREG_FP] + STACK_BIAS);
/* 1. Make sure we are not getting garbage from the user */
if (invalid_frame_pointer(sf))
goto segv;
if (get_user(ufp, &sf->regs.u_regs[UREG_FP]))
goto segv;
if ((ufp + STACK_BIAS) & 0x7)
goto segv;
err = __get_user(tpc, &sf->regs.tpc);
err |= __get_user(tnpc, &sf->regs.tnpc);
if (test_thread_flag(TIF_32BIT)) {
tpc &= 0xffffffff;
tnpc &= 0xffffffff;
}
err |= ((tpc | tnpc) & 3);
/* 2. Restore the state */
err |= __get_user(regs->y, &sf->regs.y);
err |= __get_user(tstate, &sf->regs.tstate);
err |= copy_from_user(regs->u_regs, sf->regs.u_regs, sizeof(regs->u_regs));
/* User can only change condition codes and %asi in %tstate. */
regs->tstate &= ~(TSTATE_ASI | TSTATE_ICC | TSTATE_XCC);
regs->tstate |= (tstate & (TSTATE_ASI | TSTATE_ICC | TSTATE_XCC));
err |= __get_user(fpu_save, &sf->fpu_save);
if (!err && fpu_save)
err |= restore_fpu_state(regs, fpu_save);
err |= __copy_from_user(&set, &sf->mask, sizeof(sigset_t));
err |= restore_altstack(&sf->stack);
if (err)
goto segv;
err |= __get_user(rwin_save, &sf->rwin_save);
if (!err && rwin_save) {
if (restore_rwin_state(rwin_save))
goto segv;
}
regs->tpc = tpc;
regs->tnpc = tnpc;
/* Prevent syscall restart. */
pt_regs_clear_syscall(regs);
set_current_blocked(&set);
return;
segv:
force_sig(SIGSEGV);
}
static inline void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize)
{
unsigned long sp = regs->u_regs[UREG_FP] + STACK_BIAS;
/*
* If we are on the alternate signal stack and would overflow it, don't.
* Return an always-bogus address instead so we will die with SIGSEGV.
*/
if (on_sig_stack(sp) && !likely(on_sig_stack(sp - framesize)))
return (void __user *) -1L;
/* This is the X/Open sanctioned signal stack switching. */
sp = sigsp(sp, ksig) - framesize;
/* Always align the stack frame. This handles two cases. First,
* sigaltstack need not be mindful of platform specific stack
* alignment. Second, if we took this signal because the stack
* is not aligned properly, we'd like to take the signal cleanly
* and report that.
*/
sp &= ~15UL;
return (void __user *) sp;
}
static inline int
setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
{
struct rt_signal_frame __user *sf;
int wsaved, err, sf_size;
void __user *tail;
/* 1. Make sure everything is clean */
synchronize_user_stack();
save_and_clear_fpu();
wsaved = get_thread_wsaved();
sf_size = sizeof(struct rt_signal_frame);
if (current_thread_info()->fpsaved[0] & FPRS_FEF)
sf_size += sizeof(__siginfo_fpu_t);
if (wsaved)
sf_size += sizeof(__siginfo_rwin_t);
sf = (struct rt_signal_frame __user *)
get_sigframe(ksig, regs, sf_size);
if (invalid_frame_pointer (sf)) {
if (show_unhandled_signals)
pr_info("%s[%d] bad frame in setup_rt_frame: %016lx TPC %016lx O7 %016lx\n",
current->comm, current->pid, (unsigned long)sf,
regs->tpc, regs->u_regs[UREG_I7]);
force_sigsegv(ksig->sig);
return -EINVAL;
}
tail = (sf + 1);
/* 2. Save the current process state */
err = copy_to_user(&sf->regs, regs, sizeof (*regs));
if (current_thread_info()->fpsaved[0] & FPRS_FEF) {
__siginfo_fpu_t __user *fpu_save = tail;
tail += sizeof(__siginfo_fpu_t);
err |= save_fpu_state(regs, fpu_save);
err |= __put_user((u64)fpu_save, &sf->fpu_save);
} else {
err |= __put_user(0, &sf->fpu_save);
}
if (wsaved) {
__siginfo_rwin_t __user *rwin_save = tail;
tail += sizeof(__siginfo_rwin_t);
err |= save_rwin_state(wsaved, rwin_save);
err |= __put_user((u64)rwin_save, &sf->rwin_save);
set_thread_wsaved(0);
} else {
err |= __put_user(0, &sf->rwin_save);
}
/* Setup sigaltstack */
err |= __save_altstack(&sf->stack, regs->u_regs[UREG_FP]);
err |= copy_to_user(&sf->mask, sigmask_to_save(), sizeof(sigset_t));
if (!wsaved) {
err |= raw_copy_in_user((u64 __user *)sf,
(u64 __user *)(regs->u_regs[UREG_FP] +
STACK_BIAS),
sizeof(struct reg_window));
} else {
struct reg_window *rp;
rp = ¤t_thread_info()->reg_window[wsaved - 1];
err |= copy_to_user(sf, rp, sizeof(struct reg_window));
}
if (ksig->ka.sa.sa_flags & SA_SIGINFO)
err |= copy_siginfo_to_user(&sf->info, &ksig->info);
else {
err |= __put_user(ksig->sig, &sf->info.si_signo);
err |= __put_user(SI_NOINFO, &sf->info.si_code);
}
if (err)
return err;
/* 3. signal handler back-trampoline and parameters */
regs->u_regs[UREG_FP] = ((unsigned long) sf) - STACK_BIAS;
regs->u_regs[UREG_I0] = ksig->sig;
regs->u_regs[UREG_I1] = (unsigned long) &sf->info;
/* The sigcontext is passed in this way because of how it
* is defined in GLIBC's /usr/include/bits/sigcontext.h
* for sparc64. It includes the 128 bytes of siginfo_t.
*/
regs->u_regs[UREG_I2] = (unsigned long) &sf->info;
/* 5. signal handler */
regs->tpc = (unsigned long) ksig->ka.sa.sa_handler;
regs->tnpc = (regs->tpc + 4);
if (test_thread_flag(TIF_32BIT)) {
regs->tpc &= 0xffffffff;
regs->tnpc &= 0xffffffff;
}
/* 4. return to kernel instructions */
regs->u_regs[UREG_I7] = (unsigned long)ksig->ka.ka_restorer;
return 0;
}
static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs,
struct sigaction *sa)
{
switch (regs->u_regs[UREG_I0]) {
case ERESTART_RESTARTBLOCK:
case ERESTARTNOHAND:
no_system_call_restart:
regs->u_regs[UREG_I0] = EINTR;
regs->tstate |= (TSTATE_ICARRY|TSTATE_XCARRY);
break;
case ERESTARTSYS:
if (!(sa->sa_flags & SA_RESTART))
goto no_system_call_restart;
fallthrough;
case ERESTARTNOINTR:
regs->u_regs[UREG_I0] = orig_i0;
regs->tpc -= 4;
regs->tnpc -= 4;
}
}
/* Note that 'init' is a special process: it doesn't get signals it doesn't
* want to handle. Thus you cannot kill init even with a SIGKILL even by
* mistake.
*/
static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
{
struct ksignal ksig;
int restart_syscall;
bool has_handler;
/* It's a lot of work and synchronization to add a new ptrace
* register for GDB to save and restore in order to get
* orig_i0 correct for syscall restarts when debugging.
*
* Although it should be the case that most of the global
* registers are volatile across a system call, glibc already
* depends upon that fact that we preserve them. So we can't
* just use any global register to save away the orig_i0 value.
*
* In particular %g2, %g3, %g4, and %g5 are all assumed to be
* preserved across a system call trap by various pieces of
* code in glibc.
*
* %g7 is used as the "thread register". %g6 is not used in
* any fixed manner. %g6 is used as a scratch register and
* a compiler temporary, but it's value is never used across
* a system call. Therefore %g6 is usable for orig_i0 storage.
*/
if (pt_regs_is_syscall(regs) &&
(regs->tstate & (TSTATE_XCARRY | TSTATE_ICARRY)))
regs->u_regs[UREG_G6] = orig_i0;
#ifdef CONFIG_COMPAT
if (test_thread_flag(TIF_32BIT)) {
do_signal32(regs);
return;
}
#endif
has_handler = get_signal(&ksig);
restart_syscall = 0;
if (pt_regs_is_syscall(regs) &&
(regs->tstate & (TSTATE_XCARRY | TSTATE_ICARRY))) {
restart_syscall = 1;
orig_i0 = regs->u_regs[UREG_G6];
}
if (has_handler) {
if (restart_syscall)
syscall_restart(orig_i0, regs, &ksig.ka.sa);
signal_setup_done(setup_rt_frame(&ksig, regs), &ksig, 0);
} else {
if (restart_syscall) {
switch (regs->u_regs[UREG_I0]) {
case ERESTARTNOHAND:
case ERESTARTSYS:
case ERESTARTNOINTR:
/* replay the system call when we are done */
regs->u_regs[UREG_I0] = orig_i0;
regs->tpc -= 4;
regs->tnpc -= 4;
pt_regs_clear_syscall(regs);
fallthrough;
case ERESTART_RESTARTBLOCK:
regs->u_regs[UREG_G1] = __NR_restart_syscall;
regs->tpc -= 4;
regs->tnpc -= 4;
pt_regs_clear_syscall(regs);
}
}
restore_saved_sigmask();
}
}
void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0, unsigned long thread_info_flags)
{
user_exit();
if (thread_info_flags & _TIF_UPROBE)
uprobe_notify_resume(regs);
if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
do_signal(regs, orig_i0);
if (thread_info_flags & _TIF_NOTIFY_RESUME)
resume_user_mode_work(regs);
user_enter();
}
/*
* Compile-time assertions for siginfo_t offsets. Check NSIG* as well, as
* changes likely come with new fields that should be added below.
*/
static_assert(NSIGILL == 11);
static_assert(NSIGFPE == 15);
static_assert(NSIGSEGV == 10);
static_assert(NSIGBUS == 5);
static_assert(NSIGTRAP == 6);
static_assert(NSIGCHLD == 6);
static_assert(NSIGSYS == 2);
static_assert(sizeof(siginfo_t) == 128);
static_assert(__alignof__(siginfo_t) == 8);
static_assert(offsetof(siginfo_t, si_signo) == 0x00);
static_assert(offsetof(siginfo_t, si_errno) == 0x04);
static_assert(offsetof(siginfo_t, si_code) == 0x08);
static_assert(offsetof(siginfo_t, si_pid) == 0x10);
static_assert(offsetof(siginfo_t, si_uid) == 0x14);
static_assert(offsetof(siginfo_t, si_tid) == 0x10);
static_assert(offsetof(siginfo_t, si_overrun) == 0x14);
static_assert(offsetof(siginfo_t, si_status) == 0x18);
static_assert(offsetof(siginfo_t, si_utime) == 0x20);
static_assert(offsetof(siginfo_t, si_stime) == 0x28);
static_assert(offsetof(siginfo_t, si_value) == 0x18);
static_assert(offsetof(siginfo_t, si_int) == 0x18);
static_assert(offsetof(siginfo_t, si_ptr) == 0x18);
static_assert(offsetof(siginfo_t, si_addr) == 0x10);
static_assert(offsetof(siginfo_t, si_trapno) == 0x18);
static_assert(offsetof(siginfo_t, si_addr_lsb) == 0x18);
static_assert(offsetof(siginfo_t, si_lower) == 0x20);
static_assert(offsetof(siginfo_t, si_upper) == 0x28);
static_assert(offsetof(siginfo_t, si_pkey) == 0x20);
static_assert(offsetof(siginfo_t, si_perf_data) == 0x18);
static_assert(offsetof(siginfo_t, si_perf_type) == 0x20);
static_assert(offsetof(siginfo_t, si_perf_flags) == 0x24);
static_assert(offsetof(siginfo_t, si_band) == 0x10);
static_assert(offsetof(siginfo_t, si_fd) == 0x14);
| linux-master | arch/sparc/kernel/signal_64.c |
// SPDX-License-Identifier: GPL-2.0
/*
* sun4m SMP support.
*
* Copyright (C) 1996 David S. Miller ([email protected])
*/
#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/profile.h>
#include <linux/delay.h>
#include <linux/sched/mm.h>
#include <linux/cpu.h>
#include <asm/cacheflush.h>
#include <asm/switch_to.h>
#include <asm/tlbflush.h>
#include <asm/timer.h>
#include <asm/oplib.h>
#include "irq.h"
#include "kernel.h"
#define IRQ_IPI_SINGLE 12
#define IRQ_IPI_MASK 13
#define IRQ_IPI_RESCHED 14
#define IRQ_CROSS_CALL 15
static inline unsigned long
swap_ulong(volatile unsigned long *ptr, unsigned long val)
{
__asm__ __volatile__("swap [%1], %0\n\t" :
"=&r" (val), "=&r" (ptr) :
"0" (val), "1" (ptr));
return val;
}
void sun4m_cpu_pre_starting(void *arg)
{
}
void sun4m_cpu_pre_online(void *arg)
{
int cpuid = hard_smp_processor_id();
/* Allow master to continue. The master will then give us the
* go-ahead by setting the smp_commenced_mask and will wait without
* timeouts until our setup is completed fully (signified by
* our bit being set in the cpu_online_mask).
*/
swap_ulong(&cpu_callin_map[cpuid], 1);
/* XXX: What's up with all the flushes? */
local_ops->cache_all();
local_ops->tlb_all();
/* Fix idle thread fields. */
__asm__ __volatile__("ld [%0], %%g6\n\t"
: : "r" (¤t_set[cpuid])
: "memory" /* paranoid */);
/* Attach to the address space of init_task. */
mmgrab(&init_mm);
current->active_mm = &init_mm;
while (!cpumask_test_cpu(cpuid, &smp_commenced_mask))
mb();
}
/*
* Cycle through the processors asking the PROM to start each one.
*/
void __init smp4m_boot_cpus(void)
{
sun4m_unmask_profile_irq();
local_ops->cache_all();
}
int smp4m_boot_one_cpu(int i, struct task_struct *idle)
{
unsigned long *entry = &sun4m_cpu_startup;
int timeout;
int cpu_node;
cpu_find_by_mid(i, &cpu_node);
current_set[i] = task_thread_info(idle);
/* See trampoline.S for details... */
entry += ((i - 1) * 3);
/*
* Initialize the contexts table
* Since the call to prom_startcpu() trashes the structure,
* we need to re-initialize it for each cpu
*/
smp_penguin_ctable.which_io = 0;
smp_penguin_ctable.phys_addr = (unsigned int) srmmu_ctx_table_phys;
smp_penguin_ctable.reg_size = 0;
/* whirrr, whirrr, whirrrrrrrrr... */
printk(KERN_INFO "Starting CPU %d at %p\n", i, entry);
local_ops->cache_all();
prom_startcpu(cpu_node, &smp_penguin_ctable, 0, (char *)entry);
/* wheee... it's going... */
for (timeout = 0; timeout < 10000; timeout++) {
if (cpu_callin_map[i])
break;
udelay(200);
}
if (!(cpu_callin_map[i])) {
printk(KERN_ERR "Processor %d is stuck.\n", i);
return -ENODEV;
}
local_ops->cache_all();
return 0;
}
void __init smp4m_smp_done(void)
{
int i, first;
int *prev;
/* setup cpu list for irq rotation */
first = 0;
prev = &first;
for_each_online_cpu(i) {
*prev = i;
prev = &cpu_data(i).next;
}
*prev = first;
local_ops->cache_all();
/* Ok, they are spinning and ready to go. */
}
static void sun4m_send_ipi(int cpu, int level)
{
sbus_writel(SUN4M_SOFT_INT(level), &sun4m_irq_percpu[cpu]->set);
}
static void sun4m_ipi_resched(int cpu)
{
sun4m_send_ipi(cpu, IRQ_IPI_RESCHED);
}
static void sun4m_ipi_single(int cpu)
{
sun4m_send_ipi(cpu, IRQ_IPI_SINGLE);
}
static void sun4m_ipi_mask_one(int cpu)
{
sun4m_send_ipi(cpu, IRQ_IPI_MASK);
}
static struct smp_funcall {
void *func;
unsigned long arg1;
unsigned long arg2;
unsigned long arg3;
unsigned long arg4;
unsigned long arg5;
unsigned long processors_in[SUN4M_NCPUS]; /* Set when ipi entered. */
unsigned long processors_out[SUN4M_NCPUS]; /* Set when ipi exited. */
} ccall_info;
static DEFINE_SPINLOCK(cross_call_lock);
/* Cross calls must be serialized, at least currently. */
static void sun4m_cross_call(void *func, cpumask_t mask, unsigned long arg1,
unsigned long arg2, unsigned long arg3,
unsigned long arg4)
{
register int ncpus = SUN4M_NCPUS;
unsigned long flags;
spin_lock_irqsave(&cross_call_lock, flags);
/* Init function glue. */
ccall_info.func = func;
ccall_info.arg1 = arg1;
ccall_info.arg2 = arg2;
ccall_info.arg3 = arg3;
ccall_info.arg4 = arg4;
ccall_info.arg5 = 0;
/* Init receive/complete mapping, plus fire the IPI's off. */
{
register int i;
cpumask_clear_cpu(smp_processor_id(), &mask);
cpumask_and(&mask, cpu_online_mask, &mask);
for (i = 0; i < ncpus; i++) {
if (cpumask_test_cpu(i, &mask)) {
ccall_info.processors_in[i] = 0;
ccall_info.processors_out[i] = 0;
sun4m_send_ipi(i, IRQ_CROSS_CALL);
} else {
ccall_info.processors_in[i] = 1;
ccall_info.processors_out[i] = 1;
}
}
}
{
register int i;
i = 0;
do {
if (!cpumask_test_cpu(i, &mask))
continue;
while (!ccall_info.processors_in[i])
barrier();
} while (++i < ncpus);
i = 0;
do {
if (!cpumask_test_cpu(i, &mask))
continue;
while (!ccall_info.processors_out[i])
barrier();
} while (++i < ncpus);
}
spin_unlock_irqrestore(&cross_call_lock, flags);
}
/* Running cross calls. */
void smp4m_cross_call_irq(void)
{
void (*func)(unsigned long, unsigned long, unsigned long, unsigned long,
unsigned long) = ccall_info.func;
int i = smp_processor_id();
ccall_info.processors_in[i] = 1;
func(ccall_info.arg1, ccall_info.arg2, ccall_info.arg3, ccall_info.arg4,
ccall_info.arg5);
ccall_info.processors_out[i] = 1;
}
void smp4m_percpu_timer_interrupt(struct pt_regs *regs)
{
struct pt_regs *old_regs;
struct clock_event_device *ce;
int cpu = smp_processor_id();
old_regs = set_irq_regs(regs);
ce = &per_cpu(sparc32_clockevent, cpu);
if (clockevent_state_periodic(ce))
sun4m_clear_profile_irq(cpu);
else
sparc_config.load_profile_irq(cpu, 0); /* Is this needless? */
irq_enter();
ce->event_handler(ce);
irq_exit();
set_irq_regs(old_regs);
}
static const struct sparc32_ipi_ops sun4m_ipi_ops = {
.cross_call = sun4m_cross_call,
.resched = sun4m_ipi_resched,
.single = sun4m_ipi_single,
.mask_one = sun4m_ipi_mask_one,
};
void __init sun4m_init_smp(void)
{
sparc32_ipi_ops = &sun4m_ipi_ops;
}
| linux-master | arch/sparc/kernel/sun4m_smp.c |
// SPDX-License-Identifier: GPL-2.0-only
/* ds.c: Domain Services driver for Logical Domains
*
* Copyright (C) 2007, 2008 David S. Miller <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/sched/clock.h>
#include <linux/delay.h>
#include <linux/mutex.h>
#include <linux/kthread.h>
#include <linux/reboot.h>
#include <linux/cpu.h>
#include <asm/hypervisor.h>
#include <asm/ldc.h>
#include <asm/vio.h>
#include <asm/mdesc.h>
#include <asm/head.h>
#include <asm/irq.h>
#include "kernel.h"
#define DRV_MODULE_NAME "ds"
#define PFX DRV_MODULE_NAME ": "
#define DRV_MODULE_VERSION "1.0"
#define DRV_MODULE_RELDATE "Jul 11, 2007"
static char version[] =
DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
MODULE_AUTHOR("David S. Miller ([email protected])");
MODULE_DESCRIPTION("Sun LDOM domain services driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_MODULE_VERSION);
struct ds_msg_tag {
__u32 type;
#define DS_INIT_REQ 0x00
#define DS_INIT_ACK 0x01
#define DS_INIT_NACK 0x02
#define DS_REG_REQ 0x03
#define DS_REG_ACK 0x04
#define DS_REG_NACK 0x05
#define DS_UNREG_REQ 0x06
#define DS_UNREG_ACK 0x07
#define DS_UNREG_NACK 0x08
#define DS_DATA 0x09
#define DS_NACK 0x0a
__u32 len;
};
/* Result codes */
#define DS_OK 0x00
#define DS_REG_VER_NACK 0x01
#define DS_REG_DUP 0x02
#define DS_INV_HDL 0x03
#define DS_TYPE_UNKNOWN 0x04
struct ds_version {
__u16 major;
__u16 minor;
};
struct ds_ver_req {
struct ds_msg_tag tag;
struct ds_version ver;
};
struct ds_ver_ack {
struct ds_msg_tag tag;
__u16 minor;
};
struct ds_ver_nack {
struct ds_msg_tag tag;
__u16 major;
};
struct ds_reg_req {
struct ds_msg_tag tag;
__u64 handle;
__u16 major;
__u16 minor;
char svc_id[];
};
struct ds_reg_ack {
struct ds_msg_tag tag;
__u64 handle;
__u16 minor;
};
struct ds_reg_nack {
struct ds_msg_tag tag;
__u64 handle;
__u16 major;
};
struct ds_unreg_req {
struct ds_msg_tag tag;
__u64 handle;
};
struct ds_unreg_ack {
struct ds_msg_tag tag;
__u64 handle;
};
struct ds_unreg_nack {
struct ds_msg_tag tag;
__u64 handle;
};
struct ds_data {
struct ds_msg_tag tag;
__u64 handle;
};
struct ds_data_nack {
struct ds_msg_tag tag;
__u64 handle;
__u64 result;
};
struct ds_info;
struct ds_cap_state {
__u64 handle;
void (*data)(struct ds_info *dp,
struct ds_cap_state *cp,
void *buf, int len);
const char *service_id;
u8 state;
#define CAP_STATE_UNKNOWN 0x00
#define CAP_STATE_REG_SENT 0x01
#define CAP_STATE_REGISTERED 0x02
};
static void md_update_data(struct ds_info *dp, struct ds_cap_state *cp,
void *buf, int len);
static void domain_shutdown_data(struct ds_info *dp,
struct ds_cap_state *cp,
void *buf, int len);
static void domain_panic_data(struct ds_info *dp,
struct ds_cap_state *cp,
void *buf, int len);
#ifdef CONFIG_HOTPLUG_CPU
static void dr_cpu_data(struct ds_info *dp,
struct ds_cap_state *cp,
void *buf, int len);
#endif
static void ds_pri_data(struct ds_info *dp,
struct ds_cap_state *cp,
void *buf, int len);
static void ds_var_data(struct ds_info *dp,
struct ds_cap_state *cp,
void *buf, int len);
static struct ds_cap_state ds_states_template[] = {
{
.service_id = "md-update",
.data = md_update_data,
},
{
.service_id = "domain-shutdown",
.data = domain_shutdown_data,
},
{
.service_id = "domain-panic",
.data = domain_panic_data,
},
#ifdef CONFIG_HOTPLUG_CPU
{
.service_id = "dr-cpu",
.data = dr_cpu_data,
},
#endif
{
.service_id = "pri",
.data = ds_pri_data,
},
{
.service_id = "var-config",
.data = ds_var_data,
},
{
.service_id = "var-config-backup",
.data = ds_var_data,
},
};
static DEFINE_SPINLOCK(ds_lock);
struct ds_info {
struct ldc_channel *lp;
u8 hs_state;
#define DS_HS_START 0x01
#define DS_HS_DONE 0x02
u64 id;
void *rcv_buf;
int rcv_buf_len;
struct ds_cap_state *ds_states;
int num_ds_states;
struct ds_info *next;
};
static struct ds_info *ds_info_list;
static struct ds_cap_state *find_cap(struct ds_info *dp, u64 handle)
{
unsigned int index = handle >> 32;
if (index >= dp->num_ds_states)
return NULL;
return &dp->ds_states[index];
}
static struct ds_cap_state *find_cap_by_string(struct ds_info *dp,
const char *name)
{
int i;
for (i = 0; i < dp->num_ds_states; i++) {
if (strcmp(dp->ds_states[i].service_id, name))
continue;
return &dp->ds_states[i];
}
return NULL;
}
static int __ds_send(struct ldc_channel *lp, void *data, int len)
{
int err, limit = 1000;
err = -EINVAL;
while (limit-- > 0) {
err = ldc_write(lp, data, len);
if (!err || (err != -EAGAIN))
break;
udelay(1);
}
return err;
}
static int ds_send(struct ldc_channel *lp, void *data, int len)
{
unsigned long flags;
int err;
spin_lock_irqsave(&ds_lock, flags);
err = __ds_send(lp, data, len);
spin_unlock_irqrestore(&ds_lock, flags);
return err;
}
struct ds_md_update_req {
__u64 req_num;
};
struct ds_md_update_res {
__u64 req_num;
__u32 result;
};
static void md_update_data(struct ds_info *dp,
struct ds_cap_state *cp,
void *buf, int len)
{
struct ldc_channel *lp = dp->lp;
struct ds_data *dpkt = buf;
struct ds_md_update_req *rp;
struct {
struct ds_data data;
struct ds_md_update_res res;
} pkt;
rp = (struct ds_md_update_req *) (dpkt + 1);
printk(KERN_INFO "ds-%llu: Machine description update.\n", dp->id);
mdesc_update();
memset(&pkt, 0, sizeof(pkt));
pkt.data.tag.type = DS_DATA;
pkt.data.tag.len = sizeof(pkt) - sizeof(struct ds_msg_tag);
pkt.data.handle = cp->handle;
pkt.res.req_num = rp->req_num;
pkt.res.result = DS_OK;
ds_send(lp, &pkt, sizeof(pkt));
}
struct ds_shutdown_req {
__u64 req_num;
__u32 ms_delay;
};
struct ds_shutdown_res {
__u64 req_num;
__u32 result;
char reason[1];
};
static void domain_shutdown_data(struct ds_info *dp,
struct ds_cap_state *cp,
void *buf, int len)
{
struct ldc_channel *lp = dp->lp;
struct ds_data *dpkt = buf;
struct ds_shutdown_req *rp;
struct {
struct ds_data data;
struct ds_shutdown_res res;
} pkt;
rp = (struct ds_shutdown_req *) (dpkt + 1);
printk(KERN_ALERT "ds-%llu: Shutdown request from "
"LDOM manager received.\n", dp->id);
memset(&pkt, 0, sizeof(pkt));
pkt.data.tag.type = DS_DATA;
pkt.data.tag.len = sizeof(pkt) - sizeof(struct ds_msg_tag);
pkt.data.handle = cp->handle;
pkt.res.req_num = rp->req_num;
pkt.res.result = DS_OK;
pkt.res.reason[0] = 0;
ds_send(lp, &pkt, sizeof(pkt));
orderly_poweroff(true);
}
struct ds_panic_req {
__u64 req_num;
};
struct ds_panic_res {
__u64 req_num;
__u32 result;
char reason[1];
};
static void domain_panic_data(struct ds_info *dp,
struct ds_cap_state *cp,
void *buf, int len)
{
struct ldc_channel *lp = dp->lp;
struct ds_data *dpkt = buf;
struct ds_panic_req *rp;
struct {
struct ds_data data;
struct ds_panic_res res;
} pkt;
rp = (struct ds_panic_req *) (dpkt + 1);
printk(KERN_ALERT "ds-%llu: Panic request from "
"LDOM manager received.\n", dp->id);
memset(&pkt, 0, sizeof(pkt));
pkt.data.tag.type = DS_DATA;
pkt.data.tag.len = sizeof(pkt) - sizeof(struct ds_msg_tag);
pkt.data.handle = cp->handle;
pkt.res.req_num = rp->req_num;
pkt.res.result = DS_OK;
pkt.res.reason[0] = 0;
ds_send(lp, &pkt, sizeof(pkt));
panic("PANIC requested by LDOM manager.");
}
#ifdef CONFIG_HOTPLUG_CPU
struct dr_cpu_tag {
__u64 req_num;
__u32 type;
#define DR_CPU_CONFIGURE 0x43
#define DR_CPU_UNCONFIGURE 0x55
#define DR_CPU_FORCE_UNCONFIGURE 0x46
#define DR_CPU_STATUS 0x53
/* Responses */
#define DR_CPU_OK 0x6f
#define DR_CPU_ERROR 0x65
__u32 num_records;
};
struct dr_cpu_resp_entry {
__u32 cpu;
__u32 result;
#define DR_CPU_RES_OK 0x00
#define DR_CPU_RES_FAILURE 0x01
#define DR_CPU_RES_BLOCKED 0x02
#define DR_CPU_RES_CPU_NOT_RESPONDING 0x03
#define DR_CPU_RES_NOT_IN_MD 0x04
__u32 stat;
#define DR_CPU_STAT_NOT_PRESENT 0x00
#define DR_CPU_STAT_UNCONFIGURED 0x01
#define DR_CPU_STAT_CONFIGURED 0x02
__u32 str_off;
};
static void __dr_cpu_send_error(struct ds_info *dp,
struct ds_cap_state *cp,
struct ds_data *data)
{
struct dr_cpu_tag *tag = (struct dr_cpu_tag *) (data + 1);
struct {
struct ds_data data;
struct dr_cpu_tag tag;
} pkt;
int msg_len;
memset(&pkt, 0, sizeof(pkt));
pkt.data.tag.type = DS_DATA;
pkt.data.handle = cp->handle;
pkt.tag.req_num = tag->req_num;
pkt.tag.type = DR_CPU_ERROR;
pkt.tag.num_records = 0;
msg_len = (sizeof(struct ds_data) +
sizeof(struct dr_cpu_tag));
pkt.data.tag.len = msg_len - sizeof(struct ds_msg_tag);
__ds_send(dp->lp, &pkt, msg_len);
}
static void dr_cpu_send_error(struct ds_info *dp,
struct ds_cap_state *cp,
struct ds_data *data)
{
unsigned long flags;
spin_lock_irqsave(&ds_lock, flags);
__dr_cpu_send_error(dp, cp, data);
spin_unlock_irqrestore(&ds_lock, flags);
}
#define CPU_SENTINEL 0xffffffff
static void purge_dups(u32 *list, u32 num_ents)
{
unsigned int i;
for (i = 0; i < num_ents; i++) {
u32 cpu = list[i];
unsigned int j;
if (cpu == CPU_SENTINEL)
continue;
for (j = i + 1; j < num_ents; j++) {
if (list[j] == cpu)
list[j] = CPU_SENTINEL;
}
}
}
static int dr_cpu_size_response(int ncpus)
{
return (sizeof(struct ds_data) +
sizeof(struct dr_cpu_tag) +
(sizeof(struct dr_cpu_resp_entry) * ncpus));
}
static void dr_cpu_init_response(struct ds_data *resp, u64 req_num,
u64 handle, int resp_len, int ncpus,
cpumask_t *mask, u32 default_stat)
{
struct dr_cpu_resp_entry *ent;
struct dr_cpu_tag *tag;
int i, cpu;
tag = (struct dr_cpu_tag *) (resp + 1);
ent = (struct dr_cpu_resp_entry *) (tag + 1);
resp->tag.type = DS_DATA;
resp->tag.len = resp_len - sizeof(struct ds_msg_tag);
resp->handle = handle;
tag->req_num = req_num;
tag->type = DR_CPU_OK;
tag->num_records = ncpus;
i = 0;
for_each_cpu(cpu, mask) {
ent[i].cpu = cpu;
ent[i].result = DR_CPU_RES_OK;
ent[i].stat = default_stat;
i++;
}
BUG_ON(i != ncpus);
}
static void dr_cpu_mark(struct ds_data *resp, int cpu, int ncpus,
u32 res, u32 stat)
{
struct dr_cpu_resp_entry *ent;
struct dr_cpu_tag *tag;
int i;
tag = (struct dr_cpu_tag *) (resp + 1);
ent = (struct dr_cpu_resp_entry *) (tag + 1);
for (i = 0; i < ncpus; i++) {
if (ent[i].cpu != cpu)
continue;
ent[i].result = res;
ent[i].stat = stat;
break;
}
}
static int dr_cpu_configure(struct ds_info *dp, struct ds_cap_state *cp,
u64 req_num, cpumask_t *mask)
{
struct ds_data *resp;
int resp_len, ncpus, cpu;
unsigned long flags;
ncpus = cpumask_weight(mask);
resp_len = dr_cpu_size_response(ncpus);
resp = kzalloc(resp_len, GFP_KERNEL);
if (!resp)
return -ENOMEM;
dr_cpu_init_response(resp, req_num, cp->handle,
resp_len, ncpus, mask,
DR_CPU_STAT_CONFIGURED);
mdesc_populate_present_mask(mask);
mdesc_fill_in_cpu_data(mask);
for_each_cpu(cpu, mask) {
int err;
printk(KERN_INFO "ds-%llu: Starting cpu %d...\n",
dp->id, cpu);
err = add_cpu(cpu);
if (err) {
__u32 res = DR_CPU_RES_FAILURE;
__u32 stat = DR_CPU_STAT_UNCONFIGURED;
if (!cpu_present(cpu)) {
/* CPU not present in MD */
res = DR_CPU_RES_NOT_IN_MD;
stat = DR_CPU_STAT_NOT_PRESENT;
} else if (err == -ENODEV) {
/* CPU did not call in successfully */
res = DR_CPU_RES_CPU_NOT_RESPONDING;
}
printk(KERN_INFO "ds-%llu: CPU startup failed err=%d\n",
dp->id, err);
dr_cpu_mark(resp, cpu, ncpus, res, stat);
}
}
spin_lock_irqsave(&ds_lock, flags);
__ds_send(dp->lp, resp, resp_len);
spin_unlock_irqrestore(&ds_lock, flags);
kfree(resp);
/* Redistribute IRQs, taking into account the new cpus. */
fixup_irqs();
return 0;
}
static int dr_cpu_unconfigure(struct ds_info *dp,
struct ds_cap_state *cp,
u64 req_num,
cpumask_t *mask)
{
struct ds_data *resp;
int resp_len, ncpus, cpu;
unsigned long flags;
ncpus = cpumask_weight(mask);
resp_len = dr_cpu_size_response(ncpus);
resp = kzalloc(resp_len, GFP_KERNEL);
if (!resp)
return -ENOMEM;
dr_cpu_init_response(resp, req_num, cp->handle,
resp_len, ncpus, mask,
DR_CPU_STAT_UNCONFIGURED);
for_each_cpu(cpu, mask) {
int err;
printk(KERN_INFO "ds-%llu: Shutting down cpu %d...\n",
dp->id, cpu);
err = remove_cpu(cpu);
if (err)
dr_cpu_mark(resp, cpu, ncpus,
DR_CPU_RES_FAILURE,
DR_CPU_STAT_CONFIGURED);
}
spin_lock_irqsave(&ds_lock, flags);
__ds_send(dp->lp, resp, resp_len);
spin_unlock_irqrestore(&ds_lock, flags);
kfree(resp);
return 0;
}
static void dr_cpu_data(struct ds_info *dp, struct ds_cap_state *cp, void *buf,
int len)
{
struct ds_data *data = buf;
struct dr_cpu_tag *tag = (struct dr_cpu_tag *) (data + 1);
u32 *cpu_list = (u32 *) (tag + 1);
u64 req_num = tag->req_num;
cpumask_t mask;
unsigned int i;
int err;
switch (tag->type) {
case DR_CPU_CONFIGURE:
case DR_CPU_UNCONFIGURE:
case DR_CPU_FORCE_UNCONFIGURE:
break;
default:
dr_cpu_send_error(dp, cp, data);
return;
}
purge_dups(cpu_list, tag->num_records);
cpumask_clear(&mask);
for (i = 0; i < tag->num_records; i++) {
if (cpu_list[i] == CPU_SENTINEL)
continue;
if (cpu_list[i] < nr_cpu_ids)
cpumask_set_cpu(cpu_list[i], &mask);
}
if (tag->type == DR_CPU_CONFIGURE)
err = dr_cpu_configure(dp, cp, req_num, &mask);
else
err = dr_cpu_unconfigure(dp, cp, req_num, &mask);
if (err)
dr_cpu_send_error(dp, cp, data);
}
#endif /* CONFIG_HOTPLUG_CPU */
struct ds_pri_msg {
__u64 req_num;
__u64 type;
#define DS_PRI_REQUEST 0x00
#define DS_PRI_DATA 0x01
#define DS_PRI_UPDATE 0x02
};
static void ds_pri_data(struct ds_info *dp,
struct ds_cap_state *cp,
void *buf, int len)
{
struct ds_data *dpkt = buf;
struct ds_pri_msg *rp;
rp = (struct ds_pri_msg *) (dpkt + 1);
printk(KERN_INFO "ds-%llu: PRI REQ [%llx:%llx], len=%d\n",
dp->id, rp->req_num, rp->type, len);
}
struct ds_var_hdr {
__u32 type;
#define DS_VAR_SET_REQ 0x00
#define DS_VAR_DELETE_REQ 0x01
#define DS_VAR_SET_RESP 0x02
#define DS_VAR_DELETE_RESP 0x03
};
struct ds_var_set_msg {
struct ds_var_hdr hdr;
char name_and_value[];
};
struct ds_var_delete_msg {
struct ds_var_hdr hdr;
char name[];
};
struct ds_var_resp {
struct ds_var_hdr hdr;
__u32 result;
#define DS_VAR_SUCCESS 0x00
#define DS_VAR_NO_SPACE 0x01
#define DS_VAR_INVALID_VAR 0x02
#define DS_VAR_INVALID_VAL 0x03
#define DS_VAR_NOT_PRESENT 0x04
};
static DEFINE_MUTEX(ds_var_mutex);
static int ds_var_doorbell;
static int ds_var_response;
static void ds_var_data(struct ds_info *dp,
struct ds_cap_state *cp,
void *buf, int len)
{
struct ds_data *dpkt = buf;
struct ds_var_resp *rp;
rp = (struct ds_var_resp *) (dpkt + 1);
if (rp->hdr.type != DS_VAR_SET_RESP &&
rp->hdr.type != DS_VAR_DELETE_RESP)
return;
ds_var_response = rp->result;
wmb();
ds_var_doorbell = 1;
}
void ldom_set_var(const char *var, const char *value)
{
struct ds_cap_state *cp;
struct ds_info *dp;
unsigned long flags;
spin_lock_irqsave(&ds_lock, flags);
cp = NULL;
for (dp = ds_info_list; dp; dp = dp->next) {
struct ds_cap_state *tmp;
tmp = find_cap_by_string(dp, "var-config");
if (tmp && tmp->state == CAP_STATE_REGISTERED) {
cp = tmp;
break;
}
}
if (!cp) {
for (dp = ds_info_list; dp; dp = dp->next) {
struct ds_cap_state *tmp;
tmp = find_cap_by_string(dp, "var-config-backup");
if (tmp && tmp->state == CAP_STATE_REGISTERED) {
cp = tmp;
break;
}
}
}
spin_unlock_irqrestore(&ds_lock, flags);
if (cp) {
union {
struct {
struct ds_data data;
struct ds_var_set_msg msg;
} header;
char all[512];
} pkt;
char *base, *p;
int msg_len, loops;
if (strlen(var) + strlen(value) + 2 >
sizeof(pkt) - sizeof(pkt.header)) {
printk(KERN_ERR PFX
"contents length: %zu, which more than max: %lu,"
"so could not set (%s) variable to (%s).\n",
strlen(var) + strlen(value) + 2,
sizeof(pkt) - sizeof(pkt.header), var, value);
return;
}
memset(&pkt, 0, sizeof(pkt));
pkt.header.data.tag.type = DS_DATA;
pkt.header.data.handle = cp->handle;
pkt.header.msg.hdr.type = DS_VAR_SET_REQ;
base = p = &pkt.header.msg.name_and_value[0];
strcpy(p, var);
p += strlen(var) + 1;
strcpy(p, value);
p += strlen(value) + 1;
msg_len = (sizeof(struct ds_data) +
sizeof(struct ds_var_set_msg) +
(p - base));
msg_len = (msg_len + 3) & ~3;
pkt.header.data.tag.len = msg_len - sizeof(struct ds_msg_tag);
mutex_lock(&ds_var_mutex);
spin_lock_irqsave(&ds_lock, flags);
ds_var_doorbell = 0;
ds_var_response = -1;
__ds_send(dp->lp, &pkt, msg_len);
spin_unlock_irqrestore(&ds_lock, flags);
loops = 1000;
while (ds_var_doorbell == 0) {
if (loops-- < 0)
break;
barrier();
udelay(100);
}
mutex_unlock(&ds_var_mutex);
if (ds_var_doorbell == 0 ||
ds_var_response != DS_VAR_SUCCESS)
printk(KERN_ERR "ds-%llu: var-config [%s:%s] "
"failed, response(%d).\n",
dp->id, var, value,
ds_var_response);
} else {
printk(KERN_ERR PFX "var-config not registered so "
"could not set (%s) variable to (%s).\n",
var, value);
}
}
static char full_boot_str[256] __attribute__((aligned(32)));
static int reboot_data_supported;
void ldom_reboot(const char *boot_command)
{
/* Don't bother with any of this if the boot_command
* is empty.
*/
if (boot_command && strlen(boot_command)) {
unsigned long len;
snprintf(full_boot_str, sizeof(full_boot_str), "boot %s",
boot_command);
len = strlen(full_boot_str);
if (reboot_data_supported) {
unsigned long ra = kimage_addr_to_ra(full_boot_str);
unsigned long hv_ret;
hv_ret = sun4v_reboot_data_set(ra, len);
if (hv_ret != HV_EOK)
pr_err("SUN4V: Unable to set reboot data "
"hv_ret=%lu\n", hv_ret);
} else {
ldom_set_var("reboot-command", full_boot_str);
}
}
sun4v_mach_sir();
}
void ldom_power_off(void)
{
sun4v_mach_exit(0);
}
static void ds_conn_reset(struct ds_info *dp)
{
printk(KERN_ERR "ds-%llu: ds_conn_reset() from %ps\n",
dp->id, __builtin_return_address(0));
}
static int register_services(struct ds_info *dp)
{
struct ldc_channel *lp = dp->lp;
int i;
for (i = 0; i < dp->num_ds_states; i++) {
struct {
struct ds_reg_req req;
u8 id_buf[256];
} pbuf;
struct ds_cap_state *cp = &dp->ds_states[i];
int err, msg_len;
u64 new_count;
if (cp->state == CAP_STATE_REGISTERED)
continue;
new_count = sched_clock() & 0xffffffff;
cp->handle = ((u64) i << 32) | new_count;
msg_len = (sizeof(struct ds_reg_req) +
strlen(cp->service_id));
memset(&pbuf, 0, sizeof(pbuf));
pbuf.req.tag.type = DS_REG_REQ;
pbuf.req.tag.len = (msg_len - sizeof(struct ds_msg_tag));
pbuf.req.handle = cp->handle;
pbuf.req.major = 1;
pbuf.req.minor = 0;
strcpy(pbuf.id_buf, cp->service_id);
err = __ds_send(lp, &pbuf, msg_len);
if (err > 0)
cp->state = CAP_STATE_REG_SENT;
}
return 0;
}
static int ds_handshake(struct ds_info *dp, struct ds_msg_tag *pkt)
{
if (dp->hs_state == DS_HS_START) {
if (pkt->type != DS_INIT_ACK)
goto conn_reset;
dp->hs_state = DS_HS_DONE;
return register_services(dp);
}
if (dp->hs_state != DS_HS_DONE)
goto conn_reset;
if (pkt->type == DS_REG_ACK) {
struct ds_reg_ack *ap = (struct ds_reg_ack *) pkt;
struct ds_cap_state *cp = find_cap(dp, ap->handle);
if (!cp) {
printk(KERN_ERR "ds-%llu: REG ACK for unknown "
"handle %llx\n", dp->id, ap->handle);
return 0;
}
printk(KERN_INFO "ds-%llu: Registered %s service.\n",
dp->id, cp->service_id);
cp->state = CAP_STATE_REGISTERED;
} else if (pkt->type == DS_REG_NACK) {
struct ds_reg_nack *np = (struct ds_reg_nack *) pkt;
struct ds_cap_state *cp = find_cap(dp, np->handle);
if (!cp) {
printk(KERN_ERR "ds-%llu: REG NACK for "
"unknown handle %llx\n",
dp->id, np->handle);
return 0;
}
cp->state = CAP_STATE_UNKNOWN;
}
return 0;
conn_reset:
ds_conn_reset(dp);
return -ECONNRESET;
}
static void __send_ds_nack(struct ds_info *dp, u64 handle)
{
struct ds_data_nack nack = {
.tag = {
.type = DS_NACK,
.len = (sizeof(struct ds_data_nack) -
sizeof(struct ds_msg_tag)),
},
.handle = handle,
.result = DS_INV_HDL,
};
__ds_send(dp->lp, &nack, sizeof(nack));
}
static LIST_HEAD(ds_work_list);
static DECLARE_WAIT_QUEUE_HEAD(ds_wait);
struct ds_queue_entry {
struct list_head list;
struct ds_info *dp;
int req_len;
int __pad;
u64 req[];
};
static void process_ds_work(void)
{
struct ds_queue_entry *qp, *tmp;
unsigned long flags;
LIST_HEAD(todo);
spin_lock_irqsave(&ds_lock, flags);
list_splice_init(&ds_work_list, &todo);
spin_unlock_irqrestore(&ds_lock, flags);
list_for_each_entry_safe(qp, tmp, &todo, list) {
struct ds_data *dpkt = (struct ds_data *) qp->req;
struct ds_info *dp = qp->dp;
struct ds_cap_state *cp = find_cap(dp, dpkt->handle);
int req_len = qp->req_len;
if (!cp) {
printk(KERN_ERR "ds-%llu: Data for unknown "
"handle %llu\n",
dp->id, dpkt->handle);
spin_lock_irqsave(&ds_lock, flags);
__send_ds_nack(dp, dpkt->handle);
spin_unlock_irqrestore(&ds_lock, flags);
} else {
cp->data(dp, cp, dpkt, req_len);
}
list_del(&qp->list);
kfree(qp);
}
}
static int ds_thread(void *__unused)
{
DEFINE_WAIT(wait);
while (1) {
prepare_to_wait(&ds_wait, &wait, TASK_INTERRUPTIBLE);
if (list_empty(&ds_work_list))
schedule();
finish_wait(&ds_wait, &wait);
if (kthread_should_stop())
break;
process_ds_work();
}
return 0;
}
static int ds_data(struct ds_info *dp, struct ds_msg_tag *pkt, int len)
{
struct ds_data *dpkt = (struct ds_data *) pkt;
struct ds_queue_entry *qp;
qp = kmalloc(sizeof(struct ds_queue_entry) + len, GFP_ATOMIC);
if (!qp) {
__send_ds_nack(dp, dpkt->handle);
} else {
qp->dp = dp;
memcpy(&qp->req, pkt, len);
list_add_tail(&qp->list, &ds_work_list);
wake_up(&ds_wait);
}
return 0;
}
static void ds_up(struct ds_info *dp)
{
struct ldc_channel *lp = dp->lp;
struct ds_ver_req req;
int err;
req.tag.type = DS_INIT_REQ;
req.tag.len = sizeof(req) - sizeof(struct ds_msg_tag);
req.ver.major = 1;
req.ver.minor = 0;
err = __ds_send(lp, &req, sizeof(req));
if (err > 0)
dp->hs_state = DS_HS_START;
}
static void ds_reset(struct ds_info *dp)
{
int i;
dp->hs_state = 0;
for (i = 0; i < dp->num_ds_states; i++) {
struct ds_cap_state *cp = &dp->ds_states[i];
cp->state = CAP_STATE_UNKNOWN;
}
}
static void ds_event(void *arg, int event)
{
struct ds_info *dp = arg;
struct ldc_channel *lp = dp->lp;
unsigned long flags;
int err;
spin_lock_irqsave(&ds_lock, flags);
if (event == LDC_EVENT_UP) {
ds_up(dp);
spin_unlock_irqrestore(&ds_lock, flags);
return;
}
if (event == LDC_EVENT_RESET) {
ds_reset(dp);
spin_unlock_irqrestore(&ds_lock, flags);
return;
}
if (event != LDC_EVENT_DATA_READY) {
printk(KERN_WARNING "ds-%llu: Unexpected LDC event %d\n",
dp->id, event);
spin_unlock_irqrestore(&ds_lock, flags);
return;
}
err = 0;
while (1) {
struct ds_msg_tag *tag;
err = ldc_read(lp, dp->rcv_buf, sizeof(*tag));
if (unlikely(err < 0)) {
if (err == -ECONNRESET)
ds_conn_reset(dp);
break;
}
if (err == 0)
break;
tag = dp->rcv_buf;
err = ldc_read(lp, tag + 1, tag->len);
if (unlikely(err < 0)) {
if (err == -ECONNRESET)
ds_conn_reset(dp);
break;
}
if (err < tag->len)
break;
if (tag->type < DS_DATA)
err = ds_handshake(dp, dp->rcv_buf);
else
err = ds_data(dp, dp->rcv_buf,
sizeof(*tag) + err);
if (err == -ECONNRESET)
break;
}
spin_unlock_irqrestore(&ds_lock, flags);
}
static int ds_probe(struct vio_dev *vdev, const struct vio_device_id *id)
{
static int ds_version_printed;
struct ldc_channel_config ds_cfg = {
.event = ds_event,
.mtu = 4096,
.mode = LDC_MODE_STREAM,
};
struct mdesc_handle *hp;
struct ldc_channel *lp;
struct ds_info *dp;
const u64 *val;
int err, i;
if (ds_version_printed++ == 0)
printk(KERN_INFO "%s", version);
dp = kzalloc(sizeof(*dp), GFP_KERNEL);
err = -ENOMEM;
if (!dp)
goto out_err;
hp = mdesc_grab();
val = mdesc_get_property(hp, vdev->mp, "id", NULL);
if (val)
dp->id = *val;
mdesc_release(hp);
dp->rcv_buf = kzalloc(4096, GFP_KERNEL);
if (!dp->rcv_buf)
goto out_free_dp;
dp->rcv_buf_len = 4096;
dp->ds_states = kmemdup(ds_states_template,
sizeof(ds_states_template), GFP_KERNEL);
if (!dp->ds_states)
goto out_free_rcv_buf;
dp->num_ds_states = ARRAY_SIZE(ds_states_template);
for (i = 0; i < dp->num_ds_states; i++)
dp->ds_states[i].handle = ((u64)i << 32);
ds_cfg.tx_irq = vdev->tx_irq;
ds_cfg.rx_irq = vdev->rx_irq;
lp = ldc_alloc(vdev->channel_id, &ds_cfg, dp, "DS");
if (IS_ERR(lp)) {
err = PTR_ERR(lp);
goto out_free_ds_states;
}
dp->lp = lp;
err = ldc_bind(lp);
if (err)
goto out_free_ldc;
spin_lock_irq(&ds_lock);
dp->next = ds_info_list;
ds_info_list = dp;
spin_unlock_irq(&ds_lock);
return err;
out_free_ldc:
ldc_free(dp->lp);
out_free_ds_states:
kfree(dp->ds_states);
out_free_rcv_buf:
kfree(dp->rcv_buf);
out_free_dp:
kfree(dp);
out_err:
return err;
}
static const struct vio_device_id ds_match[] = {
{
.type = "domain-services-port",
},
{},
};
static struct vio_driver ds_driver = {
.id_table = ds_match,
.probe = ds_probe,
.name = "ds",
};
static int __init ds_init(void)
{
unsigned long hv_ret, major, minor;
if (tlb_type == hypervisor) {
hv_ret = sun4v_get_version(HV_GRP_REBOOT_DATA, &major, &minor);
if (hv_ret == HV_EOK) {
pr_info("SUN4V: Reboot data supported (maj=%lu,min=%lu).\n",
major, minor);
reboot_data_supported = 1;
}
}
kthread_run(ds_thread, NULL, "kldomd");
return vio_register_driver(&ds_driver);
}
fs_initcall(ds_init);
| linux-master | arch/sparc/kernel/ds.c |
// SPDX-License-Identifier: GPL-2.0
/* pci_msi.c: Sparc64 MSI support common layer.
*
* Copyright (C) 2007 David S. Miller ([email protected])
*/
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/irq.h>
#include "pci_impl.h"
static irqreturn_t sparc64_msiq_interrupt(int irq, void *cookie)
{
struct sparc64_msiq_cookie *msiq_cookie = cookie;
struct pci_pbm_info *pbm = msiq_cookie->pbm;
unsigned long msiqid = msiq_cookie->msiqid;
const struct sparc64_msiq_ops *ops;
unsigned long orig_head, head;
int err;
ops = pbm->msi_ops;
err = ops->get_head(pbm, msiqid, &head);
if (unlikely(err < 0))
goto err_get_head;
orig_head = head;
for (;;) {
unsigned long msi;
err = ops->dequeue_msi(pbm, msiqid, &head, &msi);
if (likely(err > 0)) {
unsigned int irq;
irq = pbm->msi_irq_table[msi - pbm->msi_first];
generic_handle_irq(irq);
}
if (unlikely(err < 0))
goto err_dequeue;
if (err == 0)
break;
}
if (likely(head != orig_head)) {
err = ops->set_head(pbm, msiqid, head);
if (unlikely(err < 0))
goto err_set_head;
}
return IRQ_HANDLED;
err_get_head:
printk(KERN_EMERG "MSI: Get head on msiqid[%lu] gives error %d\n",
msiqid, err);
goto err_out;
err_dequeue:
printk(KERN_EMERG "MSI: Dequeue head[%lu] from msiqid[%lu] "
"gives error %d\n",
head, msiqid, err);
goto err_out;
err_set_head:
printk(KERN_EMERG "MSI: Set head[%lu] on msiqid[%lu] "
"gives error %d\n",
head, msiqid, err);
goto err_out;
err_out:
return IRQ_NONE;
}
static u32 pick_msiq(struct pci_pbm_info *pbm)
{
static DEFINE_SPINLOCK(rotor_lock);
unsigned long flags;
u32 ret, rotor;
spin_lock_irqsave(&rotor_lock, flags);
rotor = pbm->msiq_rotor;
ret = pbm->msiq_first + rotor;
if (++rotor >= pbm->msiq_num)
rotor = 0;
pbm->msiq_rotor = rotor;
spin_unlock_irqrestore(&rotor_lock, flags);
return ret;
}
static int alloc_msi(struct pci_pbm_info *pbm)
{
int i;
for (i = 0; i < pbm->msi_num; i++) {
if (!test_and_set_bit(i, pbm->msi_bitmap))
return i + pbm->msi_first;
}
return -ENOENT;
}
static void free_msi(struct pci_pbm_info *pbm, int msi_num)
{
msi_num -= pbm->msi_first;
clear_bit(msi_num, pbm->msi_bitmap);
}
static struct irq_chip msi_irq = {
.name = "PCI-MSI",
.irq_mask = pci_msi_mask_irq,
.irq_unmask = pci_msi_unmask_irq,
.irq_enable = pci_msi_unmask_irq,
.irq_disable = pci_msi_mask_irq,
/* XXX affinity XXX */
};
static int sparc64_setup_msi_irq(unsigned int *irq_p,
struct pci_dev *pdev,
struct msi_desc *entry)
{
struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
const struct sparc64_msiq_ops *ops = pbm->msi_ops;
struct msi_msg msg;
int msi, err;
u32 msiqid;
*irq_p = irq_alloc(0, 0);
err = -ENOMEM;
if (!*irq_p)
goto out_err;
irq_set_chip_and_handler_name(*irq_p, &msi_irq, handle_simple_irq,
"MSI");
err = alloc_msi(pbm);
if (unlikely(err < 0))
goto out_irq_free;
msi = err;
msiqid = pick_msiq(pbm);
err = ops->msi_setup(pbm, msiqid, msi,
(entry->pci.msi_attrib.is_64 ? 1 : 0));
if (err)
goto out_msi_free;
pbm->msi_irq_table[msi - pbm->msi_first] = *irq_p;
if (entry->pci.msi_attrib.is_64) {
msg.address_hi = pbm->msi64_start >> 32;
msg.address_lo = pbm->msi64_start & 0xffffffff;
} else {
msg.address_hi = 0;
msg.address_lo = pbm->msi32_start;
}
msg.data = msi;
irq_set_msi_desc(*irq_p, entry);
pci_write_msi_msg(*irq_p, &msg);
return 0;
out_msi_free:
free_msi(pbm, msi);
out_irq_free:
irq_set_chip(*irq_p, NULL);
irq_free(*irq_p);
*irq_p = 0;
out_err:
return err;
}
static void sparc64_teardown_msi_irq(unsigned int irq,
struct pci_dev *pdev)
{
struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
const struct sparc64_msiq_ops *ops = pbm->msi_ops;
unsigned int msi_num;
int i, err;
for (i = 0; i < pbm->msi_num; i++) {
if (pbm->msi_irq_table[i] == irq)
break;
}
if (i >= pbm->msi_num) {
pci_err(pdev, "%s: teardown: No MSI for irq %u\n", pbm->name,
irq);
return;
}
msi_num = pbm->msi_first + i;
pbm->msi_irq_table[i] = ~0U;
err = ops->msi_teardown(pbm, msi_num);
if (err) {
pci_err(pdev, "%s: teardown: ops->teardown() on MSI %u, "
"irq %u, gives error %d\n", pbm->name, msi_num, irq,
err);
return;
}
free_msi(pbm, msi_num);
irq_set_chip(irq, NULL);
irq_free(irq);
}
static int msi_bitmap_alloc(struct pci_pbm_info *pbm)
{
unsigned long size, bits_per_ulong;
bits_per_ulong = sizeof(unsigned long) * 8;
size = (pbm->msi_num + (bits_per_ulong - 1)) & ~(bits_per_ulong - 1);
size /= 8;
BUG_ON(size % sizeof(unsigned long));
pbm->msi_bitmap = kzalloc(size, GFP_KERNEL);
if (!pbm->msi_bitmap)
return -ENOMEM;
return 0;
}
static void msi_bitmap_free(struct pci_pbm_info *pbm)
{
kfree(pbm->msi_bitmap);
pbm->msi_bitmap = NULL;
}
static int msi_table_alloc(struct pci_pbm_info *pbm)
{
int size, i;
size = pbm->msiq_num * sizeof(struct sparc64_msiq_cookie);
pbm->msiq_irq_cookies = kzalloc(size, GFP_KERNEL);
if (!pbm->msiq_irq_cookies)
return -ENOMEM;
for (i = 0; i < pbm->msiq_num; i++) {
struct sparc64_msiq_cookie *p;
p = &pbm->msiq_irq_cookies[i];
p->pbm = pbm;
p->msiqid = pbm->msiq_first + i;
}
size = pbm->msi_num * sizeof(unsigned int);
pbm->msi_irq_table = kzalloc(size, GFP_KERNEL);
if (!pbm->msi_irq_table) {
kfree(pbm->msiq_irq_cookies);
pbm->msiq_irq_cookies = NULL;
return -ENOMEM;
}
return 0;
}
static void msi_table_free(struct pci_pbm_info *pbm)
{
kfree(pbm->msiq_irq_cookies);
pbm->msiq_irq_cookies = NULL;
kfree(pbm->msi_irq_table);
pbm->msi_irq_table = NULL;
}
static int bringup_one_msi_queue(struct pci_pbm_info *pbm,
const struct sparc64_msiq_ops *ops,
unsigned long msiqid,
unsigned long devino)
{
int irq = ops->msiq_build_irq(pbm, msiqid, devino);
int err, nid;
if (irq < 0)
return irq;
nid = pbm->numa_node;
if (nid != -1) {
cpumask_t numa_mask;
cpumask_copy(&numa_mask, cpumask_of_node(nid));
irq_set_affinity(irq, &numa_mask);
}
err = request_irq(irq, sparc64_msiq_interrupt, 0,
"MSIQ",
&pbm->msiq_irq_cookies[msiqid - pbm->msiq_first]);
if (err)
return err;
return 0;
}
static int sparc64_bringup_msi_queues(struct pci_pbm_info *pbm,
const struct sparc64_msiq_ops *ops)
{
int i;
for (i = 0; i < pbm->msiq_num; i++) {
unsigned long msiqid = i + pbm->msiq_first;
unsigned long devino = i + pbm->msiq_first_devino;
int err;
err = bringup_one_msi_queue(pbm, ops, msiqid, devino);
if (err)
return err;
}
return 0;
}
void sparc64_pbm_msi_init(struct pci_pbm_info *pbm,
const struct sparc64_msiq_ops *ops)
{
const u32 *val;
int len;
val = of_get_property(pbm->op->dev.of_node, "#msi-eqs", &len);
if (!val || len != 4)
goto no_msi;
pbm->msiq_num = *val;
if (pbm->msiq_num) {
const struct msiq_prop {
u32 first_msiq;
u32 num_msiq;
u32 first_devino;
} *mqp;
const struct msi_range_prop {
u32 first_msi;
u32 num_msi;
} *mrng;
const struct addr_range_prop {
u32 msi32_high;
u32 msi32_low;
u32 msi32_len;
u32 msi64_high;
u32 msi64_low;
u32 msi64_len;
} *arng;
val = of_get_property(pbm->op->dev.of_node, "msi-eq-size", &len);
if (!val || len != 4)
goto no_msi;
pbm->msiq_ent_count = *val;
mqp = of_get_property(pbm->op->dev.of_node,
"msi-eq-to-devino", &len);
if (!mqp)
mqp = of_get_property(pbm->op->dev.of_node,
"msi-eq-devino", &len);
if (!mqp || len != sizeof(struct msiq_prop))
goto no_msi;
pbm->msiq_first = mqp->first_msiq;
pbm->msiq_first_devino = mqp->first_devino;
val = of_get_property(pbm->op->dev.of_node, "#msi", &len);
if (!val || len != 4)
goto no_msi;
pbm->msi_num = *val;
mrng = of_get_property(pbm->op->dev.of_node, "msi-ranges", &len);
if (!mrng || len != sizeof(struct msi_range_prop))
goto no_msi;
pbm->msi_first = mrng->first_msi;
val = of_get_property(pbm->op->dev.of_node, "msi-data-mask", &len);
if (!val || len != 4)
goto no_msi;
pbm->msi_data_mask = *val;
val = of_get_property(pbm->op->dev.of_node, "msix-data-width", &len);
if (!val || len != 4)
goto no_msi;
pbm->msix_data_width = *val;
arng = of_get_property(pbm->op->dev.of_node, "msi-address-ranges",
&len);
if (!arng || len != sizeof(struct addr_range_prop))
goto no_msi;
pbm->msi32_start = ((u64)arng->msi32_high << 32) |
(u64) arng->msi32_low;
pbm->msi64_start = ((u64)arng->msi64_high << 32) |
(u64) arng->msi64_low;
pbm->msi32_len = arng->msi32_len;
pbm->msi64_len = arng->msi64_len;
if (msi_bitmap_alloc(pbm))
goto no_msi;
if (msi_table_alloc(pbm)) {
msi_bitmap_free(pbm);
goto no_msi;
}
if (ops->msiq_alloc(pbm)) {
msi_table_free(pbm);
msi_bitmap_free(pbm);
goto no_msi;
}
if (sparc64_bringup_msi_queues(pbm, ops)) {
ops->msiq_free(pbm);
msi_table_free(pbm);
msi_bitmap_free(pbm);
goto no_msi;
}
printk(KERN_INFO "%s: MSI Queue first[%u] num[%u] count[%u] "
"devino[0x%x]\n",
pbm->name,
pbm->msiq_first, pbm->msiq_num,
pbm->msiq_ent_count,
pbm->msiq_first_devino);
printk(KERN_INFO "%s: MSI first[%u] num[%u] mask[0x%x] "
"width[%u]\n",
pbm->name,
pbm->msi_first, pbm->msi_num, pbm->msi_data_mask,
pbm->msix_data_width);
printk(KERN_INFO "%s: MSI addr32[0x%llx:0x%x] "
"addr64[0x%llx:0x%x]\n",
pbm->name,
pbm->msi32_start, pbm->msi32_len,
pbm->msi64_start, pbm->msi64_len);
printk(KERN_INFO "%s: MSI queues at RA [%016lx]\n",
pbm->name,
__pa(pbm->msi_queues));
pbm->msi_ops = ops;
pbm->setup_msi_irq = sparc64_setup_msi_irq;
pbm->teardown_msi_irq = sparc64_teardown_msi_irq;
}
return;
no_msi:
pbm->msiq_num = 0;
printk(KERN_INFO "%s: No MSI support.\n", pbm->name);
}
| linux-master | arch/sparc/kernel/pci_msi.c |
// SPDX-License-Identifier: GPL-2.0
/* linux/arch/sparc/kernel/process.c
*
* Copyright (C) 1995, 2008 David S. Miller ([email protected])
* Copyright (C) 1996 Eddie C. Dost ([email protected])
*/
/*
* This file handles the architecture-dependent parts of process handling..
*/
#include <linux/elfcore.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/sched/task.h>
#include <linux/sched/task_stack.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/stddef.h>
#include <linux/ptrace.h>
#include <linux/user.h>
#include <linux/smp.h>
#include <linux/reboot.h>
#include <linux/delay.h>
#include <linux/pm.h>
#include <linux/slab.h>
#include <linux/cpu.h>
#include <asm/auxio.h>
#include <asm/oplib.h>
#include <linux/uaccess.h>
#include <asm/page.h>
#include <asm/delay.h>
#include <asm/processor.h>
#include <asm/psr.h>
#include <asm/elf.h>
#include <asm/prom.h>
#include <asm/unistd.h>
#include <asm/setup.h>
#include "kernel.h"
/*
* Power management idle function
* Set in pm platform drivers (apc.c and pmc.c)
*/
void (*sparc_idle)(void);
/*
* Power-off handler instantiation for pm.h compliance
* This is done via auxio, but could be used as a fallback
* handler when auxio is not present-- unused for now...
*/
void (*pm_power_off)(void) = machine_power_off;
EXPORT_SYMBOL(pm_power_off);
/*
* sysctl - toggle power-off restriction for serial console
* systems in machine_power_off()
*/
int scons_pwroff = 1;
extern void fpsave(unsigned long *, unsigned long *, void *, unsigned long *);
struct task_struct *last_task_used_math = NULL;
struct thread_info *current_set[NR_CPUS];
/* Idle loop support. */
void arch_cpu_idle(void)
{
if (sparc_idle)
(*sparc_idle)();
}
/* XXX cli/sti -> local_irq_xxx here, check this works once SMP is fixed. */
void machine_halt(void)
{
local_irq_enable();
mdelay(8);
local_irq_disable();
prom_halt();
panic("Halt failed!");
}
void machine_restart(char * cmd)
{
char *p;
local_irq_enable();
mdelay(8);
local_irq_disable();
p = strchr (reboot_command, '\n');
if (p) *p = 0;
if (cmd)
prom_reboot(cmd);
if (*reboot_command)
prom_reboot(reboot_command);
prom_feval ("reset");
panic("Reboot failed!");
}
void machine_power_off(void)
{
if (auxio_power_register &&
(!of_node_is_type(of_console_device, "serial") || scons_pwroff)) {
u8 power_register = sbus_readb(auxio_power_register);
power_register |= AUXIO_POWER_OFF;
sbus_writeb(power_register, auxio_power_register);
}
machine_halt();
}
void show_regs(struct pt_regs *r)
{
struct reg_window32 *rw = (struct reg_window32 *) r->u_regs[14];
show_regs_print_info(KERN_DEFAULT);
printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
r->psr, r->pc, r->npc, r->y, print_tainted());
printk("PC: <%pS>\n", (void *) r->pc);
printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
rw->locals[4], rw->locals[5], rw->locals[6], rw->locals[7]);
printk("%%I: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
rw->ins[0], rw->ins[1], rw->ins[2], rw->ins[3],
rw->ins[4], rw->ins[5], rw->ins[6], rw->ins[7]);
}
/*
* The show_stack() is external API which we do not use ourselves.
* The oops is printed in die_if_kernel.
*/
void show_stack(struct task_struct *tsk, unsigned long *_ksp, const char *loglvl)
{
unsigned long pc, fp;
unsigned long task_base;
struct reg_window32 *rw;
int count = 0;
if (!tsk)
tsk = current;
if (tsk == current && !_ksp)
__asm__ __volatile__("mov %%fp, %0" : "=r" (_ksp));
task_base = (unsigned long) task_stack_page(tsk);
fp = (unsigned long) _ksp;
do {
/* Bogus frame pointer? */
if (fp < (task_base + sizeof(struct thread_info)) ||
fp >= (task_base + (PAGE_SIZE << 1)))
break;
rw = (struct reg_window32 *) fp;
pc = rw->ins[7];
printk("%s[%08lx : ", loglvl, pc);
printk("%s%pS ] ", loglvl, (void *) pc);
fp = rw->ins[6];
} while (++count < 16);
printk("%s\n", loglvl);
}
/*
* Free current thread data structures etc..
*/
void exit_thread(struct task_struct *tsk)
{
#ifndef CONFIG_SMP
if (last_task_used_math == tsk) {
#else
if (test_tsk_thread_flag(tsk, TIF_USEDFPU)) {
#endif
/* Keep process from leaving FPU in a bogon state. */
put_psr(get_psr() | PSR_EF);
fpsave(&tsk->thread.float_regs[0], &tsk->thread.fsr,
&tsk->thread.fpqueue[0], &tsk->thread.fpqdepth);
#ifndef CONFIG_SMP
last_task_used_math = NULL;
#else
clear_ti_thread_flag(task_thread_info(tsk), TIF_USEDFPU);
#endif
}
}
void flush_thread(void)
{
current_thread_info()->w_saved = 0;
#ifndef CONFIG_SMP
if(last_task_used_math == current) {
#else
if (test_thread_flag(TIF_USEDFPU)) {
#endif
/* Clean the fpu. */
put_psr(get_psr() | PSR_EF);
fpsave(¤t->thread.float_regs[0], ¤t->thread.fsr,
¤t->thread.fpqueue[0], ¤t->thread.fpqdepth);
#ifndef CONFIG_SMP
last_task_used_math = NULL;
#else
clear_thread_flag(TIF_USEDFPU);
#endif
}
}
static inline struct sparc_stackf __user *
clone_stackframe(struct sparc_stackf __user *dst,
struct sparc_stackf __user *src)
{
unsigned long size, fp;
struct sparc_stackf *tmp;
struct sparc_stackf __user *sp;
if (get_user(tmp, &src->fp))
return NULL;
fp = (unsigned long) tmp;
size = (fp - ((unsigned long) src));
fp = (unsigned long) dst;
sp = (struct sparc_stackf __user *)(fp - size);
/* do_fork() grabs the parent semaphore, we must release it
* temporarily so we can build the child clone stack frame
* without deadlocking.
*/
if (__copy_user(sp, src, size))
sp = NULL;
else if (put_user(fp, &sp->fp))
sp = NULL;
return sp;
}
/* Copy a Sparc thread. The fork() return value conventions
* under SunOS are nothing short of bletcherous:
* Parent --> %o0 == childs pid, %o1 == 0
* Child --> %o0 == parents pid, %o1 == 1
*
* NOTE: We have a separate fork kpsr/kwim because
* the parent could change these values between
* sys_fork invocation and when we reach here
* if the parent should sleep while trying to
* allocate the task_struct and kernel stack in
* do_fork().
* XXX See comment above sys_vfork in sparc64. todo.
*/
extern void ret_from_fork(void);
extern void ret_from_kernel_thread(void);
int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
{
unsigned long clone_flags = args->flags;
unsigned long sp = args->stack;
unsigned long tls = args->tls;
struct thread_info *ti = task_thread_info(p);
struct pt_regs *childregs, *regs = current_pt_regs();
char *new_stack;
#ifndef CONFIG_SMP
if(last_task_used_math == current) {
#else
if (test_thread_flag(TIF_USEDFPU)) {
#endif
put_psr(get_psr() | PSR_EF);
fpsave(&p->thread.float_regs[0], &p->thread.fsr,
&p->thread.fpqueue[0], &p->thread.fpqdepth);
}
/*
* p->thread_info new_stack childregs stack bottom
* ! ! ! !
* V V (stk.fr.) V (pt_regs) V
* +----- - - - - - ------+===========+=============+
*/
new_stack = task_stack_page(p) + THREAD_SIZE;
new_stack -= STACKFRAME_SZ + TRACEREG_SZ;
childregs = (struct pt_regs *) (new_stack + STACKFRAME_SZ);
/*
* A new process must start with interrupts disabled, see schedule_tail()
* and finish_task_switch(). (If we do not do it and if a timer interrupt
* hits before we unlock and attempts to take the rq->lock, we deadlock.)
*
* Thus, kpsr |= PSR_PIL.
*/
ti->ksp = (unsigned long) new_stack;
p->thread.kregs = childregs;
if (unlikely(args->fn)) {
extern int nwindows;
unsigned long psr;
memset(new_stack, 0, STACKFRAME_SZ + TRACEREG_SZ);
ti->kpc = (((unsigned long) ret_from_kernel_thread) - 0x8);
childregs->u_regs[UREG_G1] = (unsigned long) args->fn;
childregs->u_regs[UREG_G2] = (unsigned long) args->fn_arg;
psr = childregs->psr = get_psr();
ti->kpsr = psr | PSR_PIL;
ti->kwim = 1 << (((psr & PSR_CWP) + 1) % nwindows);
return 0;
}
memcpy(new_stack, (char *)regs - STACKFRAME_SZ, STACKFRAME_SZ + TRACEREG_SZ);
childregs->u_regs[UREG_FP] = sp;
ti->kpc = (((unsigned long) ret_from_fork) - 0x8);
ti->kpsr = current->thread.fork_kpsr | PSR_PIL;
ti->kwim = current->thread.fork_kwim;
if (sp != regs->u_regs[UREG_FP]) {
struct sparc_stackf __user *childstack;
struct sparc_stackf __user *parentstack;
/*
* This is a clone() call with supplied user stack.
* Set some valid stack frames to give to the child.
*/
childstack = (struct sparc_stackf __user *)
(sp & ~0xfUL);
parentstack = (struct sparc_stackf __user *)
regs->u_regs[UREG_FP];
#if 0
printk("clone: parent stack:\n");
show_stackframe(parentstack);
#endif
childstack = clone_stackframe(childstack, parentstack);
if (!childstack)
return -EFAULT;
#if 0
printk("clone: child stack:\n");
show_stackframe(childstack);
#endif
childregs->u_regs[UREG_FP] = (unsigned long)childstack;
}
#ifdef CONFIG_SMP
/* FPU must be disabled on SMP. */
childregs->psr &= ~PSR_EF;
clear_tsk_thread_flag(p, TIF_USEDFPU);
#endif
/* Set the return value for the child. */
childregs->u_regs[UREG_I0] = current->pid;
childregs->u_regs[UREG_I1] = 1;
/* Set the return value for the parent. */
regs->u_regs[UREG_I1] = 0;
if (clone_flags & CLONE_SETTLS)
childregs->u_regs[UREG_G7] = tls;
return 0;
}
unsigned long __get_wchan(struct task_struct *task)
{
unsigned long pc, fp, bias = 0;
unsigned long task_base = (unsigned long) task;
unsigned long ret = 0;
struct reg_window32 *rw;
int count = 0;
fp = task_thread_info(task)->ksp + bias;
do {
/* Bogus frame pointer? */
if (fp < (task_base + sizeof(struct thread_info)) ||
fp >= (task_base + (2 * PAGE_SIZE)))
break;
rw = (struct reg_window32 *) fp;
pc = rw->ins[7];
if (!in_sched_functions(pc)) {
ret = pc;
goto out;
}
fp = rw->ins[6] + bias;
} while (++count < 16);
out:
return ret;
}
| linux-master | arch/sparc/kernel/process_32.c |
// SPDX-License-Identifier: GPL-2.0-only
/* chmc.c: Driver for UltraSPARC-III memory controller.
*
* Copyright (C) 2001, 2007, 2008 David S. Miller ([email protected])
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/string.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <asm/spitfire.h>
#include <asm/chmctrl.h>
#include <asm/cpudata.h>
#include <asm/oplib.h>
#include <asm/prom.h>
#include <asm/head.h>
#include <asm/io.h>
#include <asm/memctrl.h>
#define DRV_MODULE_NAME "chmc"
#define PFX DRV_MODULE_NAME ": "
#define DRV_MODULE_VERSION "0.2"
MODULE_AUTHOR("David S. Miller ([email protected])");
MODULE_DESCRIPTION("UltraSPARC-III memory controller driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_MODULE_VERSION);
static int mc_type;
#define MC_TYPE_SAFARI 1
#define MC_TYPE_JBUS 2
static dimm_printer_t us3mc_dimm_printer;
#define CHMCTRL_NDGRPS 2
#define CHMCTRL_NDIMMS 4
#define CHMC_DIMMS_PER_MC (CHMCTRL_NDGRPS * CHMCTRL_NDIMMS)
/* OBP memory-layout property format. */
struct chmc_obp_map {
unsigned char dimm_map[144];
unsigned char pin_map[576];
};
#define DIMM_LABEL_SZ 8
struct chmc_obp_mem_layout {
/* One max 8-byte string label per DIMM. Usually
* this matches the label on the motherboard where
* that DIMM resides.
*/
char dimm_labels[CHMC_DIMMS_PER_MC][DIMM_LABEL_SZ];
/* If symmetric use map[0], else it is
* asymmetric and map[1] should be used.
*/
char symmetric;
struct chmc_obp_map map[2];
};
#define CHMCTRL_NBANKS 4
struct chmc_bank_info {
struct chmc *p;
int bank_id;
u64 raw_reg;
int valid;
int uk;
int um;
int lk;
int lm;
int interleave;
unsigned long base;
unsigned long size;
};
struct chmc {
struct list_head list;
int portid;
struct chmc_obp_mem_layout layout_prop;
int layout_size;
void __iomem *regs;
u64 timing_control1;
u64 timing_control2;
u64 timing_control3;
u64 timing_control4;
u64 memaddr_control;
struct chmc_bank_info logical_banks[CHMCTRL_NBANKS];
};
#define JBUSMC_REGS_SIZE 8
#define JB_MC_REG1_DIMM2_BANK3 0x8000000000000000UL
#define JB_MC_REG1_DIMM1_BANK1 0x4000000000000000UL
#define JB_MC_REG1_DIMM2_BANK2 0x2000000000000000UL
#define JB_MC_REG1_DIMM1_BANK0 0x1000000000000000UL
#define JB_MC_REG1_XOR 0x0000010000000000UL
#define JB_MC_REG1_ADDR_GEN_2 0x000000e000000000UL
#define JB_MC_REG1_ADDR_GEN_2_SHIFT 37
#define JB_MC_REG1_ADDR_GEN_1 0x0000001c00000000UL
#define JB_MC_REG1_ADDR_GEN_1_SHIFT 34
#define JB_MC_REG1_INTERLEAVE 0x0000000001800000UL
#define JB_MC_REG1_INTERLEAVE_SHIFT 23
#define JB_MC_REG1_DIMM2_PTYPE 0x0000000000200000UL
#define JB_MC_REG1_DIMM2_PTYPE_SHIFT 21
#define JB_MC_REG1_DIMM1_PTYPE 0x0000000000100000UL
#define JB_MC_REG1_DIMM1_PTYPE_SHIFT 20
#define PART_TYPE_X8 0
#define PART_TYPE_X4 1
#define INTERLEAVE_NONE 0
#define INTERLEAVE_SAME 1
#define INTERLEAVE_INTERNAL 2
#define INTERLEAVE_BOTH 3
#define ADDR_GEN_128MB 0
#define ADDR_GEN_256MB 1
#define ADDR_GEN_512MB 2
#define ADDR_GEN_1GB 3
#define JB_NUM_DIMM_GROUPS 2
#define JB_NUM_DIMMS_PER_GROUP 2
#define JB_NUM_DIMMS (JB_NUM_DIMM_GROUPS * JB_NUM_DIMMS_PER_GROUP)
struct jbusmc_obp_map {
unsigned char dimm_map[18];
unsigned char pin_map[144];
};
struct jbusmc_obp_mem_layout {
/* One max 8-byte string label per DIMM. Usually
* this matches the label on the motherboard where
* that DIMM resides.
*/
char dimm_labels[JB_NUM_DIMMS][DIMM_LABEL_SZ];
/* If symmetric use map[0], else it is
* asymmetric and map[1] should be used.
*/
char symmetric;
struct jbusmc_obp_map map;
char _pad;
};
struct jbusmc_dimm_group {
struct jbusmc *controller;
int index;
u64 base_addr;
u64 size;
};
struct jbusmc {
void __iomem *regs;
u64 mc_reg_1;
u32 portid;
struct jbusmc_obp_mem_layout layout;
int layout_len;
int num_dimm_groups;
struct jbusmc_dimm_group dimm_groups[JB_NUM_DIMM_GROUPS];
struct list_head list;
};
static DEFINE_SPINLOCK(mctrl_list_lock);
static LIST_HEAD(mctrl_list);
static void mc_list_add(struct list_head *list)
{
spin_lock(&mctrl_list_lock);
list_add(list, &mctrl_list);
spin_unlock(&mctrl_list_lock);
}
static void mc_list_del(struct list_head *list)
{
spin_lock(&mctrl_list_lock);
list_del_init(list);
spin_unlock(&mctrl_list_lock);
}
#define SYNDROME_MIN -1
#define SYNDROME_MAX 144
/* Covert syndrome code into the way the bits are positioned
* on the bus.
*/
static int syndrome_to_qword_code(int syndrome_code)
{
if (syndrome_code < 128)
syndrome_code += 16;
else if (syndrome_code < 128 + 9)
syndrome_code -= (128 - 7);
else if (syndrome_code < (128 + 9 + 3))
syndrome_code -= (128 + 9 - 4);
else
syndrome_code -= (128 + 9 + 3);
return syndrome_code;
}
/* All this magic has to do with how a cache line comes over the wire
* on Safari and JBUS. A 64-bit line comes over in 1 or more quadword
* cycles, each of which transmit ECC/MTAG info as well as the actual
* data.
*/
#define L2_LINE_SIZE 64
#define L2_LINE_ADDR_MSK (L2_LINE_SIZE - 1)
#define QW_PER_LINE 4
#define QW_BYTES (L2_LINE_SIZE / QW_PER_LINE)
#define QW_BITS 144
#define SAFARI_LAST_BIT (576 - 1)
#define JBUS_LAST_BIT (144 - 1)
static void get_pin_and_dimm_str(int syndrome_code, unsigned long paddr,
int *pin_p, char **dimm_str_p, void *_prop,
int base_dimm_offset)
{
int qword_code = syndrome_to_qword_code(syndrome_code);
int cache_line_offset;
int offset_inverse;
int dimm_map_index;
int map_val;
if (mc_type == MC_TYPE_JBUS) {
struct jbusmc_obp_mem_layout *p = _prop;
/* JBUS */
cache_line_offset = qword_code;
offset_inverse = (JBUS_LAST_BIT - cache_line_offset);
dimm_map_index = offset_inverse / 8;
map_val = p->map.dimm_map[dimm_map_index];
map_val = ((map_val >> ((7 - (offset_inverse & 7)))) & 1);
*dimm_str_p = p->dimm_labels[base_dimm_offset + map_val];
*pin_p = p->map.pin_map[cache_line_offset];
} else {
struct chmc_obp_mem_layout *p = _prop;
struct chmc_obp_map *mp;
int qword;
/* Safari */
if (p->symmetric)
mp = &p->map[0];
else
mp = &p->map[1];
qword = (paddr & L2_LINE_ADDR_MSK) / QW_BYTES;
cache_line_offset = ((3 - qword) * QW_BITS) + qword_code;
offset_inverse = (SAFARI_LAST_BIT - cache_line_offset);
dimm_map_index = offset_inverse >> 2;
map_val = mp->dimm_map[dimm_map_index];
map_val = ((map_val >> ((3 - (offset_inverse & 3)) << 1)) & 0x3);
*dimm_str_p = p->dimm_labels[base_dimm_offset + map_val];
*pin_p = mp->pin_map[cache_line_offset];
}
}
static struct jbusmc_dimm_group *jbusmc_find_dimm_group(unsigned long phys_addr)
{
struct jbusmc *p;
list_for_each_entry(p, &mctrl_list, list) {
int i;
for (i = 0; i < p->num_dimm_groups; i++) {
struct jbusmc_dimm_group *dp = &p->dimm_groups[i];
if (phys_addr < dp->base_addr ||
(dp->base_addr + dp->size) <= phys_addr)
continue;
return dp;
}
}
return NULL;
}
static int jbusmc_print_dimm(int syndrome_code,
unsigned long phys_addr,
char *buf, int buflen)
{
struct jbusmc_obp_mem_layout *prop;
struct jbusmc_dimm_group *dp;
struct jbusmc *p;
int first_dimm;
dp = jbusmc_find_dimm_group(phys_addr);
if (dp == NULL ||
syndrome_code < SYNDROME_MIN ||
syndrome_code > SYNDROME_MAX) {
buf[0] = '?';
buf[1] = '?';
buf[2] = '?';
buf[3] = '\0';
return 0;
}
p = dp->controller;
prop = &p->layout;
first_dimm = dp->index * JB_NUM_DIMMS_PER_GROUP;
if (syndrome_code != SYNDROME_MIN) {
char *dimm_str;
int pin;
get_pin_and_dimm_str(syndrome_code, phys_addr, &pin,
&dimm_str, prop, first_dimm);
sprintf(buf, "%s, pin %3d", dimm_str, pin);
} else {
int dimm;
/* Multi-bit error, we just dump out all the
* dimm labels associated with this dimm group.
*/
for (dimm = 0; dimm < JB_NUM_DIMMS_PER_GROUP; dimm++) {
sprintf(buf, "%s ",
prop->dimm_labels[first_dimm + dimm]);
buf += strlen(buf);
}
}
return 0;
}
static u64 jbusmc_dimm_group_size(u64 base,
const struct linux_prom64_registers *mem_regs,
int num_mem_regs)
{
u64 max = base + (8UL * 1024 * 1024 * 1024);
u64 max_seen = base;
int i;
for (i = 0; i < num_mem_regs; i++) {
const struct linux_prom64_registers *ent;
u64 this_base;
u64 this_end;
ent = &mem_regs[i];
this_base = ent->phys_addr;
this_end = this_base + ent->reg_size;
if (base < this_base || base >= this_end)
continue;
if (this_end > max)
this_end = max;
if (this_end > max_seen)
max_seen = this_end;
}
return max_seen - base;
}
static void jbusmc_construct_one_dimm_group(struct jbusmc *p,
unsigned long index,
const struct linux_prom64_registers *mem_regs,
int num_mem_regs)
{
struct jbusmc_dimm_group *dp = &p->dimm_groups[index];
dp->controller = p;
dp->index = index;
dp->base_addr = (p->portid * (64UL * 1024 * 1024 * 1024));
dp->base_addr += (index * (8UL * 1024 * 1024 * 1024));
dp->size = jbusmc_dimm_group_size(dp->base_addr, mem_regs, num_mem_regs);
}
static void jbusmc_construct_dimm_groups(struct jbusmc *p,
const struct linux_prom64_registers *mem_regs,
int num_mem_regs)
{
if (p->mc_reg_1 & JB_MC_REG1_DIMM1_BANK0) {
jbusmc_construct_one_dimm_group(p, 0, mem_regs, num_mem_regs);
p->num_dimm_groups++;
}
if (p->mc_reg_1 & JB_MC_REG1_DIMM2_BANK2) {
jbusmc_construct_one_dimm_group(p, 1, mem_regs, num_mem_regs);
p->num_dimm_groups++;
}
}
static int jbusmc_probe(struct platform_device *op)
{
const struct linux_prom64_registers *mem_regs;
struct device_node *mem_node;
int err, len, num_mem_regs;
struct jbusmc *p;
const u32 *prop;
const void *ml;
err = -ENODEV;
mem_node = of_find_node_by_path("/memory");
if (!mem_node) {
printk(KERN_ERR PFX "Cannot find /memory node.\n");
goto out;
}
mem_regs = of_get_property(mem_node, "reg", &len);
if (!mem_regs) {
printk(KERN_ERR PFX "Cannot get reg property of /memory node.\n");
goto out;
}
num_mem_regs = len / sizeof(*mem_regs);
err = -ENOMEM;
p = kzalloc(sizeof(*p), GFP_KERNEL);
if (!p) {
printk(KERN_ERR PFX "Cannot allocate struct jbusmc.\n");
goto out;
}
INIT_LIST_HEAD(&p->list);
err = -ENODEV;
prop = of_get_property(op->dev.of_node, "portid", &len);
if (!prop || len != 4) {
printk(KERN_ERR PFX "Cannot find portid.\n");
goto out_free;
}
p->portid = *prop;
prop = of_get_property(op->dev.of_node, "memory-control-register-1", &len);
if (!prop || len != 8) {
printk(KERN_ERR PFX "Cannot get memory control register 1.\n");
goto out_free;
}
p->mc_reg_1 = ((u64)prop[0] << 32) | (u64) prop[1];
err = -ENOMEM;
p->regs = of_ioremap(&op->resource[0], 0, JBUSMC_REGS_SIZE, "jbusmc");
if (!p->regs) {
printk(KERN_ERR PFX "Cannot map jbusmc regs.\n");
goto out_free;
}
err = -ENODEV;
ml = of_get_property(op->dev.of_node, "memory-layout", &p->layout_len);
if (!ml) {
printk(KERN_ERR PFX "Cannot get memory layout property.\n");
goto out_iounmap;
}
if (p->layout_len > sizeof(p->layout)) {
printk(KERN_ERR PFX "Unexpected memory-layout size %d\n",
p->layout_len);
goto out_iounmap;
}
memcpy(&p->layout, ml, p->layout_len);
jbusmc_construct_dimm_groups(p, mem_regs, num_mem_regs);
mc_list_add(&p->list);
printk(KERN_INFO PFX "UltraSPARC-IIIi memory controller at %pOF\n",
op->dev.of_node);
dev_set_drvdata(&op->dev, p);
err = 0;
out:
return err;
out_iounmap:
of_iounmap(&op->resource[0], p->regs, JBUSMC_REGS_SIZE);
out_free:
kfree(p);
goto out;
}
/* Does BANK decode PHYS_ADDR? */
static int chmc_bank_match(struct chmc_bank_info *bp, unsigned long phys_addr)
{
unsigned long upper_bits = (phys_addr & PA_UPPER_BITS) >> PA_UPPER_BITS_SHIFT;
unsigned long lower_bits = (phys_addr & PA_LOWER_BITS) >> PA_LOWER_BITS_SHIFT;
/* Bank must be enabled to match. */
if (bp->valid == 0)
return 0;
/* Would BANK match upper bits? */
upper_bits ^= bp->um; /* What bits are different? */
upper_bits = ~upper_bits; /* Invert. */
upper_bits |= bp->uk; /* What bits don't matter for matching? */
upper_bits = ~upper_bits; /* Invert. */
if (upper_bits)
return 0;
/* Would BANK match lower bits? */
lower_bits ^= bp->lm; /* What bits are different? */
lower_bits = ~lower_bits; /* Invert. */
lower_bits |= bp->lk; /* What bits don't matter for matching? */
lower_bits = ~lower_bits; /* Invert. */
if (lower_bits)
return 0;
/* I always knew you'd be the one. */
return 1;
}
/* Given PHYS_ADDR, search memory controller banks for a match. */
static struct chmc_bank_info *chmc_find_bank(unsigned long phys_addr)
{
struct chmc *p;
list_for_each_entry(p, &mctrl_list, list) {
int bank_no;
for (bank_no = 0; bank_no < CHMCTRL_NBANKS; bank_no++) {
struct chmc_bank_info *bp;
bp = &p->logical_banks[bank_no];
if (chmc_bank_match(bp, phys_addr))
return bp;
}
}
return NULL;
}
/* This is the main purpose of this driver. */
static int chmc_print_dimm(int syndrome_code,
unsigned long phys_addr,
char *buf, int buflen)
{
struct chmc_bank_info *bp;
struct chmc_obp_mem_layout *prop;
int bank_in_controller, first_dimm;
bp = chmc_find_bank(phys_addr);
if (bp == NULL ||
syndrome_code < SYNDROME_MIN ||
syndrome_code > SYNDROME_MAX) {
buf[0] = '?';
buf[1] = '?';
buf[2] = '?';
buf[3] = '\0';
return 0;
}
prop = &bp->p->layout_prop;
bank_in_controller = bp->bank_id & (CHMCTRL_NBANKS - 1);
first_dimm = (bank_in_controller & (CHMCTRL_NDGRPS - 1));
first_dimm *= CHMCTRL_NDIMMS;
if (syndrome_code != SYNDROME_MIN) {
char *dimm_str;
int pin;
get_pin_and_dimm_str(syndrome_code, phys_addr, &pin,
&dimm_str, prop, first_dimm);
sprintf(buf, "%s, pin %3d", dimm_str, pin);
} else {
int dimm;
/* Multi-bit error, we just dump out all the
* dimm labels associated with this bank.
*/
for (dimm = 0; dimm < CHMCTRL_NDIMMS; dimm++) {
sprintf(buf, "%s ",
prop->dimm_labels[first_dimm + dimm]);
buf += strlen(buf);
}
}
return 0;
}
/* Accessing the registers is slightly complicated. If you want
* to get at the memory controller which is on the same processor
* the code is executing, you must use special ASI load/store else
* you go through the global mapping.
*/
static u64 chmc_read_mcreg(struct chmc *p, unsigned long offset)
{
unsigned long ret, this_cpu;
preempt_disable();
this_cpu = real_hard_smp_processor_id();
if (p->portid == this_cpu) {
__asm__ __volatile__("ldxa [%1] %2, %0"
: "=r" (ret)
: "r" (offset), "i" (ASI_MCU_CTRL_REG));
} else {
__asm__ __volatile__("ldxa [%1] %2, %0"
: "=r" (ret)
: "r" (p->regs + offset),
"i" (ASI_PHYS_BYPASS_EC_E));
}
preempt_enable();
return ret;
}
#if 0 /* currently unused */
static void chmc_write_mcreg(struct chmc *p, unsigned long offset, u64 val)
{
if (p->portid == smp_processor_id()) {
__asm__ __volatile__("stxa %0, [%1] %2"
: : "r" (val),
"r" (offset), "i" (ASI_MCU_CTRL_REG));
} else {
__asm__ __volatile__("ldxa %0, [%1] %2"
: : "r" (val),
"r" (p->regs + offset),
"i" (ASI_PHYS_BYPASS_EC_E));
}
}
#endif
static void chmc_interpret_one_decode_reg(struct chmc *p, int which_bank, u64 val)
{
struct chmc_bank_info *bp = &p->logical_banks[which_bank];
bp->p = p;
bp->bank_id = (CHMCTRL_NBANKS * p->portid) + which_bank;
bp->raw_reg = val;
bp->valid = (val & MEM_DECODE_VALID) >> MEM_DECODE_VALID_SHIFT;
bp->uk = (val & MEM_DECODE_UK) >> MEM_DECODE_UK_SHIFT;
bp->um = (val & MEM_DECODE_UM) >> MEM_DECODE_UM_SHIFT;
bp->lk = (val & MEM_DECODE_LK) >> MEM_DECODE_LK_SHIFT;
bp->lm = (val & MEM_DECODE_LM) >> MEM_DECODE_LM_SHIFT;
bp->base = (bp->um);
bp->base &= ~(bp->uk);
bp->base <<= PA_UPPER_BITS_SHIFT;
switch(bp->lk) {
case 0xf:
default:
bp->interleave = 1;
break;
case 0xe:
bp->interleave = 2;
break;
case 0xc:
bp->interleave = 4;
break;
case 0x8:
bp->interleave = 8;
break;
case 0x0:
bp->interleave = 16;
break;
}
/* UK[10] is reserved, and UK[11] is not set for the SDRAM
* bank size definition.
*/
bp->size = (((unsigned long)bp->uk &
((1UL << 10UL) - 1UL)) + 1UL) << PA_UPPER_BITS_SHIFT;
bp->size /= bp->interleave;
}
static void chmc_fetch_decode_regs(struct chmc *p)
{
if (p->layout_size == 0)
return;
chmc_interpret_one_decode_reg(p, 0,
chmc_read_mcreg(p, CHMCTRL_DECODE1));
chmc_interpret_one_decode_reg(p, 1,
chmc_read_mcreg(p, CHMCTRL_DECODE2));
chmc_interpret_one_decode_reg(p, 2,
chmc_read_mcreg(p, CHMCTRL_DECODE3));
chmc_interpret_one_decode_reg(p, 3,
chmc_read_mcreg(p, CHMCTRL_DECODE4));
}
static int chmc_probe(struct platform_device *op)
{
struct device_node *dp = op->dev.of_node;
unsigned long ver;
const void *pval;
int len, portid;
struct chmc *p;
int err;
err = -ENODEV;
__asm__ ("rdpr %%ver, %0" : "=r" (ver));
if ((ver >> 32UL) == __JALAPENO_ID ||
(ver >> 32UL) == __SERRANO_ID)
goto out;
portid = of_getintprop_default(dp, "portid", -1);
if (portid == -1)
goto out;
pval = of_get_property(dp, "memory-layout", &len);
if (pval && len > sizeof(p->layout_prop)) {
printk(KERN_ERR PFX "Unexpected memory-layout property "
"size %d.\n", len);
goto out;
}
err = -ENOMEM;
p = kzalloc(sizeof(*p), GFP_KERNEL);
if (!p) {
printk(KERN_ERR PFX "Could not allocate struct chmc.\n");
goto out;
}
p->portid = portid;
p->layout_size = len;
if (!pval)
p->layout_size = 0;
else
memcpy(&p->layout_prop, pval, len);
p->regs = of_ioremap(&op->resource[0], 0, 0x48, "chmc");
if (!p->regs) {
printk(KERN_ERR PFX "Could not map registers.\n");
goto out_free;
}
if (p->layout_size != 0UL) {
p->timing_control1 = chmc_read_mcreg(p, CHMCTRL_TCTRL1);
p->timing_control2 = chmc_read_mcreg(p, CHMCTRL_TCTRL2);
p->timing_control3 = chmc_read_mcreg(p, CHMCTRL_TCTRL3);
p->timing_control4 = chmc_read_mcreg(p, CHMCTRL_TCTRL4);
p->memaddr_control = chmc_read_mcreg(p, CHMCTRL_MACTRL);
}
chmc_fetch_decode_regs(p);
mc_list_add(&p->list);
printk(KERN_INFO PFX "UltraSPARC-III memory controller at %pOF [%s]\n",
dp,
(p->layout_size ? "ACTIVE" : "INACTIVE"));
dev_set_drvdata(&op->dev, p);
err = 0;
out:
return err;
out_free:
kfree(p);
goto out;
}
static int us3mc_probe(struct platform_device *op)
{
if (mc_type == MC_TYPE_SAFARI)
return chmc_probe(op);
else if (mc_type == MC_TYPE_JBUS)
return jbusmc_probe(op);
return -ENODEV;
}
static void chmc_destroy(struct platform_device *op, struct chmc *p)
{
list_del(&p->list);
of_iounmap(&op->resource[0], p->regs, 0x48);
kfree(p);
}
static void jbusmc_destroy(struct platform_device *op, struct jbusmc *p)
{
mc_list_del(&p->list);
of_iounmap(&op->resource[0], p->regs, JBUSMC_REGS_SIZE);
kfree(p);
}
static int us3mc_remove(struct platform_device *op)
{
void *p = dev_get_drvdata(&op->dev);
if (p) {
if (mc_type == MC_TYPE_SAFARI)
chmc_destroy(op, p);
else if (mc_type == MC_TYPE_JBUS)
jbusmc_destroy(op, p);
}
return 0;
}
static const struct of_device_id us3mc_match[] = {
{
.name = "memory-controller",
},
{},
};
MODULE_DEVICE_TABLE(of, us3mc_match);
static struct platform_driver us3mc_driver = {
.driver = {
.name = "us3mc",
.of_match_table = us3mc_match,
},
.probe = us3mc_probe,
.remove = us3mc_remove,
};
static inline bool us3mc_platform(void)
{
if (tlb_type == cheetah || tlb_type == cheetah_plus)
return true;
return false;
}
static int __init us3mc_init(void)
{
unsigned long ver;
int ret;
if (!us3mc_platform())
return -ENODEV;
__asm__ __volatile__("rdpr %%ver, %0" : "=r" (ver));
if ((ver >> 32UL) == __JALAPENO_ID ||
(ver >> 32UL) == __SERRANO_ID) {
mc_type = MC_TYPE_JBUS;
us3mc_dimm_printer = jbusmc_print_dimm;
} else {
mc_type = MC_TYPE_SAFARI;
us3mc_dimm_printer = chmc_print_dimm;
}
ret = register_dimm_printer(us3mc_dimm_printer);
if (!ret) {
ret = platform_driver_register(&us3mc_driver);
if (ret)
unregister_dimm_printer(us3mc_dimm_printer);
}
return ret;
}
static void __exit us3mc_cleanup(void)
{
if (us3mc_platform()) {
unregister_dimm_printer(us3mc_dimm_printer);
platform_driver_unregister(&us3mc_driver);
}
}
module_init(us3mc_init);
module_exit(us3mc_cleanup);
| linux-master | arch/sparc/kernel/chmc.c |
// SPDX-License-Identifier: GPL-2.0
/* irq.c: UltraSparc IRQ handling/init/registry.
*
* Copyright (C) 1997, 2007, 2008 David S. Miller ([email protected])
* Copyright (C) 1998 Eddie C. Dost ([email protected])
* Copyright (C) 1998 Jakub Jelinek ([email protected])
*/
#include <linux/sched.h>
#include <linux/linkage.h>
#include <linux/ptrace.h>
#include <linux/errno.h>
#include <linux/kernel_stat.h>
#include <linux/signal.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/random.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/ftrace.h>
#include <linux/irq.h>
#include <asm/ptrace.h>
#include <asm/processor.h>
#include <linux/atomic.h>
#include <asm/irq.h>
#include <asm/io.h>
#include <asm/iommu.h>
#include <asm/upa.h>
#include <asm/oplib.h>
#include <asm/prom.h>
#include <asm/timer.h>
#include <asm/smp.h>
#include <asm/starfire.h>
#include <linux/uaccess.h>
#include <asm/cache.h>
#include <asm/cpudata.h>
#include <asm/auxio.h>
#include <asm/head.h>
#include <asm/hypervisor.h>
#include <asm/cacheflush.h>
#include <asm/softirq_stack.h>
#include "entry.h"
#include "cpumap.h"
#include "kstack.h"
struct ino_bucket *ivector_table;
unsigned long ivector_table_pa;
/* On several sun4u processors, it is illegal to mix bypass and
* non-bypass accesses. Therefore we access all INO buckets
* using bypass accesses only.
*/
static unsigned long bucket_get_chain_pa(unsigned long bucket_pa)
{
unsigned long ret;
__asm__ __volatile__("ldxa [%1] %2, %0"
: "=&r" (ret)
: "r" (bucket_pa +
offsetof(struct ino_bucket,
__irq_chain_pa)),
"i" (ASI_PHYS_USE_EC));
return ret;
}
static void bucket_clear_chain_pa(unsigned long bucket_pa)
{
__asm__ __volatile__("stxa %%g0, [%0] %1"
: /* no outputs */
: "r" (bucket_pa +
offsetof(struct ino_bucket,
__irq_chain_pa)),
"i" (ASI_PHYS_USE_EC));
}
static unsigned int bucket_get_irq(unsigned long bucket_pa)
{
unsigned int ret;
__asm__ __volatile__("lduwa [%1] %2, %0"
: "=&r" (ret)
: "r" (bucket_pa +
offsetof(struct ino_bucket,
__irq)),
"i" (ASI_PHYS_USE_EC));
return ret;
}
static void bucket_set_irq(unsigned long bucket_pa, unsigned int irq)
{
__asm__ __volatile__("stwa %0, [%1] %2"
: /* no outputs */
: "r" (irq),
"r" (bucket_pa +
offsetof(struct ino_bucket,
__irq)),
"i" (ASI_PHYS_USE_EC));
}
#define irq_work_pa(__cpu) &(trap_block[(__cpu)].irq_worklist_pa)
static unsigned long hvirq_major __initdata;
static int __init early_hvirq_major(char *p)
{
int rc = kstrtoul(p, 10, &hvirq_major);
return rc;
}
early_param("hvirq", early_hvirq_major);
static int hv_irq_version;
/* Major version 2.0 of HV_GRP_INTR added support for the VIRQ cookie
* based interfaces, but:
*
* 1) Several OSs, Solaris and Linux included, use them even when only
* negotiating version 1.0 (or failing to negotiate at all). So the
* hypervisor has a workaround that provides the VIRQ interfaces even
* when only verion 1.0 of the API is in use.
*
* 2) Second, and more importantly, with major version 2.0 these VIRQ
* interfaces only were actually hooked up for LDC interrupts, even
* though the Hypervisor specification clearly stated:
*
* The new interrupt API functions will be available to a guest
* when it negotiates version 2.0 in the interrupt API group 0x2. When
* a guest negotiates version 2.0, all interrupt sources will only
* support using the cookie interface, and any attempt to use the
* version 1.0 interrupt APIs numbered 0xa0 to 0xa6 will result in the
* ENOTSUPPORTED error being returned.
*
* with an emphasis on "all interrupt sources".
*
* To correct this, major version 3.0 was created which does actually
* support VIRQs for all interrupt sources (not just LDC devices). So
* if we want to move completely over the cookie based VIRQs we must
* negotiate major version 3.0 or later of HV_GRP_INTR.
*/
static bool sun4v_cookie_only_virqs(void)
{
if (hv_irq_version >= 3)
return true;
return false;
}
static void __init irq_init_hv(void)
{
unsigned long hv_error, major, minor = 0;
if (tlb_type != hypervisor)
return;
if (hvirq_major)
major = hvirq_major;
else
major = 3;
hv_error = sun4v_hvapi_register(HV_GRP_INTR, major, &minor);
if (!hv_error)
hv_irq_version = major;
else
hv_irq_version = 1;
pr_info("SUN4V: Using IRQ API major %d, cookie only virqs %s\n",
hv_irq_version,
sun4v_cookie_only_virqs() ? "enabled" : "disabled");
}
/* This function is for the timer interrupt.*/
int __init arch_probe_nr_irqs(void)
{
return 1;
}
#define DEFAULT_NUM_IVECS (0xfffU)
static unsigned int nr_ivec = DEFAULT_NUM_IVECS;
#define NUM_IVECS (nr_ivec)
static unsigned int __init size_nr_ivec(void)
{
if (tlb_type == hypervisor) {
switch (sun4v_chip_type) {
/* Athena's devhandle|devino is large.*/
case SUN4V_CHIP_SPARC64X:
nr_ivec = 0xffff;
break;
}
}
return nr_ivec;
}
struct irq_handler_data {
union {
struct {
unsigned int dev_handle;
unsigned int dev_ino;
};
unsigned long sysino;
};
struct ino_bucket bucket;
unsigned long iclr;
unsigned long imap;
};
static inline unsigned int irq_data_to_handle(struct irq_data *data)
{
struct irq_handler_data *ihd = irq_data_get_irq_handler_data(data);
return ihd->dev_handle;
}
static inline unsigned int irq_data_to_ino(struct irq_data *data)
{
struct irq_handler_data *ihd = irq_data_get_irq_handler_data(data);
return ihd->dev_ino;
}
static inline unsigned long irq_data_to_sysino(struct irq_data *data)
{
struct irq_handler_data *ihd = irq_data_get_irq_handler_data(data);
return ihd->sysino;
}
void irq_free(unsigned int irq)
{
void *data = irq_get_handler_data(irq);
kfree(data);
irq_set_handler_data(irq, NULL);
irq_free_descs(irq, 1);
}
unsigned int irq_alloc(unsigned int dev_handle, unsigned int dev_ino)
{
int irq;
irq = __irq_alloc_descs(-1, 1, 1, numa_node_id(), NULL, NULL);
if (irq <= 0)
goto out;
return irq;
out:
return 0;
}
static unsigned int cookie_exists(u32 devhandle, unsigned int devino)
{
unsigned long hv_err, cookie;
struct ino_bucket *bucket;
unsigned int irq = 0U;
hv_err = sun4v_vintr_get_cookie(devhandle, devino, &cookie);
if (hv_err) {
pr_err("HV get cookie failed hv_err = %ld\n", hv_err);
goto out;
}
if (cookie & ((1UL << 63UL))) {
cookie = ~cookie;
bucket = (struct ino_bucket *) __va(cookie);
irq = bucket->__irq;
}
out:
return irq;
}
static unsigned int sysino_exists(u32 devhandle, unsigned int devino)
{
unsigned long sysino = sun4v_devino_to_sysino(devhandle, devino);
struct ino_bucket *bucket;
unsigned int irq;
bucket = &ivector_table[sysino];
irq = bucket_get_irq(__pa(bucket));
return irq;
}
void ack_bad_irq(unsigned int irq)
{
pr_crit("BAD IRQ ack %d\n", irq);
}
void irq_install_pre_handler(int irq,
void (*func)(unsigned int, void *, void *),
void *arg1, void *arg2)
{
pr_warn("IRQ pre handler NOT supported.\n");
}
/*
* /proc/interrupts printing:
*/
int arch_show_interrupts(struct seq_file *p, int prec)
{
int j;
seq_printf(p, "NMI: ");
for_each_online_cpu(j)
seq_printf(p, "%10u ", cpu_data(j).__nmi_count);
seq_printf(p, " Non-maskable interrupts\n");
return 0;
}
static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid)
{
unsigned int tid;
if (this_is_starfire) {
tid = starfire_translate(imap, cpuid);
tid <<= IMAP_TID_SHIFT;
tid &= IMAP_TID_UPA;
} else {
if (tlb_type == cheetah || tlb_type == cheetah_plus) {
unsigned long ver;
__asm__ ("rdpr %%ver, %0" : "=r" (ver));
if ((ver >> 32UL) == __JALAPENO_ID ||
(ver >> 32UL) == __SERRANO_ID) {
tid = cpuid << IMAP_TID_SHIFT;
tid &= IMAP_TID_JBUS;
} else {
unsigned int a = cpuid & 0x1f;
unsigned int n = (cpuid >> 5) & 0x1f;
tid = ((a << IMAP_AID_SHIFT) |
(n << IMAP_NID_SHIFT));
tid &= (IMAP_AID_SAFARI |
IMAP_NID_SAFARI);
}
} else {
tid = cpuid << IMAP_TID_SHIFT;
tid &= IMAP_TID_UPA;
}
}
return tid;
}
#ifdef CONFIG_SMP
static int irq_choose_cpu(unsigned int irq, const struct cpumask *affinity)
{
cpumask_t mask;
int cpuid;
cpumask_copy(&mask, affinity);
if (cpumask_equal(&mask, cpu_online_mask)) {
cpuid = map_to_cpu(irq);
} else {
cpumask_t tmp;
cpumask_and(&tmp, cpu_online_mask, &mask);
cpuid = cpumask_empty(&tmp) ? map_to_cpu(irq) : cpumask_first(&tmp);
}
return cpuid;
}
#else
#define irq_choose_cpu(irq, affinity) \
real_hard_smp_processor_id()
#endif
static void sun4u_irq_enable(struct irq_data *data)
{
struct irq_handler_data *handler_data;
handler_data = irq_data_get_irq_handler_data(data);
if (likely(handler_data)) {
unsigned long cpuid, imap, val;
unsigned int tid;
cpuid = irq_choose_cpu(data->irq,
irq_data_get_affinity_mask(data));
imap = handler_data->imap;
tid = sun4u_compute_tid(imap, cpuid);
val = upa_readq(imap);
val &= ~(IMAP_TID_UPA | IMAP_TID_JBUS |
IMAP_AID_SAFARI | IMAP_NID_SAFARI);
val |= tid | IMAP_VALID;
upa_writeq(val, imap);
upa_writeq(ICLR_IDLE, handler_data->iclr);
}
}
static int sun4u_set_affinity(struct irq_data *data,
const struct cpumask *mask, bool force)
{
struct irq_handler_data *handler_data;
handler_data = irq_data_get_irq_handler_data(data);
if (likely(handler_data)) {
unsigned long cpuid, imap, val;
unsigned int tid;
cpuid = irq_choose_cpu(data->irq, mask);
imap = handler_data->imap;
tid = sun4u_compute_tid(imap, cpuid);
val = upa_readq(imap);
val &= ~(IMAP_TID_UPA | IMAP_TID_JBUS |
IMAP_AID_SAFARI | IMAP_NID_SAFARI);
val |= tid | IMAP_VALID;
upa_writeq(val, imap);
upa_writeq(ICLR_IDLE, handler_data->iclr);
}
return 0;
}
/* Don't do anything. The desc->status check for IRQ_DISABLED in
* handler_irq() will skip the handler call and that will leave the
* interrupt in the sent state. The next ->enable() call will hit the
* ICLR register to reset the state machine.
*
* This scheme is necessary, instead of clearing the Valid bit in the
* IMAP register, to handle the case of IMAP registers being shared by
* multiple INOs (and thus ICLR registers). Since we use a different
* virtual IRQ for each shared IMAP instance, the generic code thinks
* there is only one user so it prematurely calls ->disable() on
* free_irq().
*
* We have to provide an explicit ->disable() method instead of using
* NULL to get the default. The reason is that if the generic code
* sees that, it also hooks up a default ->shutdown method which
* invokes ->mask() which we do not want. See irq_chip_set_defaults().
*/
static void sun4u_irq_disable(struct irq_data *data)
{
}
static void sun4u_irq_eoi(struct irq_data *data)
{
struct irq_handler_data *handler_data;
handler_data = irq_data_get_irq_handler_data(data);
if (likely(handler_data))
upa_writeq(ICLR_IDLE, handler_data->iclr);
}
static void sun4v_irq_enable(struct irq_data *data)
{
unsigned long cpuid = irq_choose_cpu(data->irq,
irq_data_get_affinity_mask(data));
unsigned int ino = irq_data_to_sysino(data);
int err;
err = sun4v_intr_settarget(ino, cpuid);
if (err != HV_EOK)
printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): "
"err(%d)\n", ino, cpuid, err);
err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
if (err != HV_EOK)
printk(KERN_ERR "sun4v_intr_setstate(%x): "
"err(%d)\n", ino, err);
err = sun4v_intr_setenabled(ino, HV_INTR_ENABLED);
if (err != HV_EOK)
printk(KERN_ERR "sun4v_intr_setenabled(%x): err(%d)\n",
ino, err);
}
static int sun4v_set_affinity(struct irq_data *data,
const struct cpumask *mask, bool force)
{
unsigned long cpuid = irq_choose_cpu(data->irq, mask);
unsigned int ino = irq_data_to_sysino(data);
int err;
err = sun4v_intr_settarget(ino, cpuid);
if (err != HV_EOK)
printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): "
"err(%d)\n", ino, cpuid, err);
return 0;
}
static void sun4v_irq_disable(struct irq_data *data)
{
unsigned int ino = irq_data_to_sysino(data);
int err;
err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED);
if (err != HV_EOK)
printk(KERN_ERR "sun4v_intr_setenabled(%x): "
"err(%d)\n", ino, err);
}
static void sun4v_irq_eoi(struct irq_data *data)
{
unsigned int ino = irq_data_to_sysino(data);
int err;
err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
if (err != HV_EOK)
printk(KERN_ERR "sun4v_intr_setstate(%x): "
"err(%d)\n", ino, err);
}
static void sun4v_virq_enable(struct irq_data *data)
{
unsigned long dev_handle = irq_data_to_handle(data);
unsigned long dev_ino = irq_data_to_ino(data);
unsigned long cpuid;
int err;
cpuid = irq_choose_cpu(data->irq, irq_data_get_affinity_mask(data));
err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
if (err != HV_EOK)
printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
"err(%d)\n",
dev_handle, dev_ino, cpuid, err);
err = sun4v_vintr_set_state(dev_handle, dev_ino,
HV_INTR_STATE_IDLE);
if (err != HV_EOK)
printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
"HV_INTR_STATE_IDLE): err(%d)\n",
dev_handle, dev_ino, err);
err = sun4v_vintr_set_valid(dev_handle, dev_ino,
HV_INTR_ENABLED);
if (err != HV_EOK)
printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
"HV_INTR_ENABLED): err(%d)\n",
dev_handle, dev_ino, err);
}
static int sun4v_virt_set_affinity(struct irq_data *data,
const struct cpumask *mask, bool force)
{
unsigned long dev_handle = irq_data_to_handle(data);
unsigned long dev_ino = irq_data_to_ino(data);
unsigned long cpuid;
int err;
cpuid = irq_choose_cpu(data->irq, mask);
err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
if (err != HV_EOK)
printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
"err(%d)\n",
dev_handle, dev_ino, cpuid, err);
return 0;
}
static void sun4v_virq_disable(struct irq_data *data)
{
unsigned long dev_handle = irq_data_to_handle(data);
unsigned long dev_ino = irq_data_to_ino(data);
int err;
err = sun4v_vintr_set_valid(dev_handle, dev_ino,
HV_INTR_DISABLED);
if (err != HV_EOK)
printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
"HV_INTR_DISABLED): err(%d)\n",
dev_handle, dev_ino, err);
}
static void sun4v_virq_eoi(struct irq_data *data)
{
unsigned long dev_handle = irq_data_to_handle(data);
unsigned long dev_ino = irq_data_to_ino(data);
int err;
err = sun4v_vintr_set_state(dev_handle, dev_ino,
HV_INTR_STATE_IDLE);
if (err != HV_EOK)
printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
"HV_INTR_STATE_IDLE): err(%d)\n",
dev_handle, dev_ino, err);
}
static struct irq_chip sun4u_irq = {
.name = "sun4u",
.irq_enable = sun4u_irq_enable,
.irq_disable = sun4u_irq_disable,
.irq_eoi = sun4u_irq_eoi,
.irq_set_affinity = sun4u_set_affinity,
.flags = IRQCHIP_EOI_IF_HANDLED,
};
static struct irq_chip sun4v_irq = {
.name = "sun4v",
.irq_enable = sun4v_irq_enable,
.irq_disable = sun4v_irq_disable,
.irq_eoi = sun4v_irq_eoi,
.irq_set_affinity = sun4v_set_affinity,
.flags = IRQCHIP_EOI_IF_HANDLED,
};
static struct irq_chip sun4v_virq = {
.name = "vsun4v",
.irq_enable = sun4v_virq_enable,
.irq_disable = sun4v_virq_disable,
.irq_eoi = sun4v_virq_eoi,
.irq_set_affinity = sun4v_virt_set_affinity,
.flags = IRQCHIP_EOI_IF_HANDLED,
};
unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap)
{
struct irq_handler_data *handler_data;
struct ino_bucket *bucket;
unsigned int irq;
int ino;
BUG_ON(tlb_type == hypervisor);
ino = (upa_readq(imap) & (IMAP_IGN | IMAP_INO)) + inofixup;
bucket = &ivector_table[ino];
irq = bucket_get_irq(__pa(bucket));
if (!irq) {
irq = irq_alloc(0, ino);
bucket_set_irq(__pa(bucket), irq);
irq_set_chip_and_handler_name(irq, &sun4u_irq,
handle_fasteoi_irq, "IVEC");
}
handler_data = irq_get_handler_data(irq);
if (unlikely(handler_data))
goto out;
handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
if (unlikely(!handler_data)) {
prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
prom_halt();
}
irq_set_handler_data(irq, handler_data);
handler_data->imap = imap;
handler_data->iclr = iclr;
out:
return irq;
}
static unsigned int sun4v_build_common(u32 devhandle, unsigned int devino,
void (*handler_data_init)(struct irq_handler_data *data,
u32 devhandle, unsigned int devino),
struct irq_chip *chip)
{
struct irq_handler_data *data;
unsigned int irq;
irq = irq_alloc(devhandle, devino);
if (!irq)
goto out;
data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
if (unlikely(!data)) {
pr_err("IRQ handler data allocation failed.\n");
irq_free(irq);
irq = 0;
goto out;
}
irq_set_handler_data(irq, data);
handler_data_init(data, devhandle, devino);
irq_set_chip_and_handler_name(irq, chip, handle_fasteoi_irq, "IVEC");
data->imap = ~0UL;
data->iclr = ~0UL;
out:
return irq;
}
static unsigned long cookie_assign(unsigned int irq, u32 devhandle,
unsigned int devino)
{
struct irq_handler_data *ihd = irq_get_handler_data(irq);
unsigned long hv_error, cookie;
/* handler_irq needs to find the irq. cookie is seen signed in
* sun4v_dev_mondo and treated as a non ivector_table delivery.
*/
ihd->bucket.__irq = irq;
cookie = ~__pa(&ihd->bucket);
hv_error = sun4v_vintr_set_cookie(devhandle, devino, cookie);
if (hv_error)
pr_err("HV vintr set cookie failed = %ld\n", hv_error);
return hv_error;
}
static void cookie_handler_data(struct irq_handler_data *data,
u32 devhandle, unsigned int devino)
{
data->dev_handle = devhandle;
data->dev_ino = devino;
}
static unsigned int cookie_build_irq(u32 devhandle, unsigned int devino,
struct irq_chip *chip)
{
unsigned long hv_error;
unsigned int irq;
irq = sun4v_build_common(devhandle, devino, cookie_handler_data, chip);
hv_error = cookie_assign(irq, devhandle, devino);
if (hv_error) {
irq_free(irq);
irq = 0;
}
return irq;
}
static unsigned int sun4v_build_cookie(u32 devhandle, unsigned int devino)
{
unsigned int irq;
irq = cookie_exists(devhandle, devino);
if (irq)
goto out;
irq = cookie_build_irq(devhandle, devino, &sun4v_virq);
out:
return irq;
}
static void sysino_set_bucket(unsigned int irq)
{
struct irq_handler_data *ihd = irq_get_handler_data(irq);
struct ino_bucket *bucket;
unsigned long sysino;
sysino = sun4v_devino_to_sysino(ihd->dev_handle, ihd->dev_ino);
BUG_ON(sysino >= nr_ivec);
bucket = &ivector_table[sysino];
bucket_set_irq(__pa(bucket), irq);
}
static void sysino_handler_data(struct irq_handler_data *data,
u32 devhandle, unsigned int devino)
{
unsigned long sysino;
sysino = sun4v_devino_to_sysino(devhandle, devino);
data->sysino = sysino;
}
static unsigned int sysino_build_irq(u32 devhandle, unsigned int devino,
struct irq_chip *chip)
{
unsigned int irq;
irq = sun4v_build_common(devhandle, devino, sysino_handler_data, chip);
if (!irq)
goto out;
sysino_set_bucket(irq);
out:
return irq;
}
static int sun4v_build_sysino(u32 devhandle, unsigned int devino)
{
int irq;
irq = sysino_exists(devhandle, devino);
if (irq)
goto out;
irq = sysino_build_irq(devhandle, devino, &sun4v_irq);
out:
return irq;
}
unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino)
{
unsigned int irq;
if (sun4v_cookie_only_virqs())
irq = sun4v_build_cookie(devhandle, devino);
else
irq = sun4v_build_sysino(devhandle, devino);
return irq;
}
unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
{
int irq;
irq = cookie_build_irq(devhandle, devino, &sun4v_virq);
if (!irq)
goto out;
/* This is borrowed from the original function.
*/
irq_set_status_flags(irq, IRQ_NOAUTOEN);
out:
return irq;
}
void *hardirq_stack[NR_CPUS];
void *softirq_stack[NR_CPUS];
void __irq_entry handler_irq(int pil, struct pt_regs *regs)
{
unsigned long pstate, bucket_pa;
struct pt_regs *old_regs;
void *orig_sp;
clear_softint(1 << pil);
old_regs = set_irq_regs(regs);
irq_enter();
/* Grab an atomic snapshot of the pending IVECs. */
__asm__ __volatile__("rdpr %%pstate, %0\n\t"
"wrpr %0, %3, %%pstate\n\t"
"ldx [%2], %1\n\t"
"stx %%g0, [%2]\n\t"
"wrpr %0, 0x0, %%pstate\n\t"
: "=&r" (pstate), "=&r" (bucket_pa)
: "r" (irq_work_pa(smp_processor_id())),
"i" (PSTATE_IE)
: "memory");
orig_sp = set_hardirq_stack();
while (bucket_pa) {
unsigned long next_pa;
unsigned int irq;
next_pa = bucket_get_chain_pa(bucket_pa);
irq = bucket_get_irq(bucket_pa);
bucket_clear_chain_pa(bucket_pa);
generic_handle_irq(irq);
bucket_pa = next_pa;
}
restore_hardirq_stack(orig_sp);
irq_exit();
set_irq_regs(old_regs);
}
#ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
void do_softirq_own_stack(void)
{
void *orig_sp, *sp = softirq_stack[smp_processor_id()];
sp += THREAD_SIZE - 192 - STACK_BIAS;
__asm__ __volatile__("mov %%sp, %0\n\t"
"mov %1, %%sp"
: "=&r" (orig_sp)
: "r" (sp));
__do_softirq();
__asm__ __volatile__("mov %0, %%sp"
: : "r" (orig_sp));
}
#endif
#ifdef CONFIG_HOTPLUG_CPU
void fixup_irqs(void)
{
unsigned int irq;
for (irq = 0; irq < NR_IRQS; irq++) {
struct irq_desc *desc = irq_to_desc(irq);
struct irq_data *data;
unsigned long flags;
if (!desc)
continue;
data = irq_desc_get_irq_data(desc);
raw_spin_lock_irqsave(&desc->lock, flags);
if (desc->action && !irqd_is_per_cpu(data)) {
if (data->chip->irq_set_affinity)
data->chip->irq_set_affinity(data,
irq_data_get_affinity_mask(data),
false);
}
raw_spin_unlock_irqrestore(&desc->lock, flags);
}
tick_ops->disable_irq();
}
#endif
struct sun5_timer {
u64 count0;
u64 limit0;
u64 count1;
u64 limit1;
};
static struct sun5_timer *prom_timers;
static u64 prom_limit0, prom_limit1;
static void map_prom_timers(void)
{
struct device_node *dp;
const unsigned int *addr;
/* PROM timer node hangs out in the top level of device siblings... */
dp = of_find_node_by_path("/");
dp = dp->child;
while (dp) {
if (of_node_name_eq(dp, "counter-timer"))
break;
dp = dp->sibling;
}
/* Assume if node is not present, PROM uses different tick mechanism
* which we should not care about.
*/
if (!dp) {
prom_timers = (struct sun5_timer *) 0;
return;
}
/* If PROM is really using this, it must be mapped by him. */
addr = of_get_property(dp, "address", NULL);
if (!addr) {
prom_printf("PROM does not have timer mapped, trying to continue.\n");
prom_timers = (struct sun5_timer *) 0;
return;
}
prom_timers = (struct sun5_timer *) ((unsigned long)addr[0]);
}
static void kill_prom_timer(void)
{
if (!prom_timers)
return;
/* Save them away for later. */
prom_limit0 = prom_timers->limit0;
prom_limit1 = prom_timers->limit1;
/* Just as in sun4c PROM uses timer which ticks at IRQ 14.
* We turn both off here just to be paranoid.
*/
prom_timers->limit0 = 0;
prom_timers->limit1 = 0;
/* Wheee, eat the interrupt packet too... */
__asm__ __volatile__(
" mov 0x40, %%g2\n"
" ldxa [%%g0] %0, %%g1\n"
" ldxa [%%g2] %1, %%g1\n"
" stxa %%g0, [%%g0] %0\n"
" membar #Sync\n"
: /* no outputs */
: "i" (ASI_INTR_RECEIVE), "i" (ASI_INTR_R)
: "g1", "g2");
}
void notrace init_irqwork_curcpu(void)
{
int cpu = hard_smp_processor_id();
trap_block[cpu].irq_worklist_pa = 0UL;
}
/* Please be very careful with register_one_mondo() and
* sun4v_register_mondo_queues().
*
* On SMP this gets invoked from the CPU trampoline before
* the cpu has fully taken over the trap table from OBP,
* and it's kernel stack + %g6 thread register state is
* not fully cooked yet.
*
* Therefore you cannot make any OBP calls, not even prom_printf,
* from these two routines.
*/
static void notrace register_one_mondo(unsigned long paddr, unsigned long type,
unsigned long qmask)
{
unsigned long num_entries = (qmask + 1) / 64;
unsigned long status;
status = sun4v_cpu_qconf(type, paddr, num_entries);
if (status != HV_EOK) {
prom_printf("SUN4V: sun4v_cpu_qconf(%lu:%lx:%lu) failed, "
"err %lu\n", type, paddr, num_entries, status);
prom_halt();
}
}
void notrace sun4v_register_mondo_queues(int this_cpu)
{
struct trap_per_cpu *tb = &trap_block[this_cpu];
register_one_mondo(tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO,
tb->cpu_mondo_qmask);
register_one_mondo(tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO,
tb->dev_mondo_qmask);
register_one_mondo(tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR,
tb->resum_qmask);
register_one_mondo(tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR,
tb->nonresum_qmask);
}
/* Each queue region must be a power of 2 multiple of 64 bytes in
* size. The base real address must be aligned to the size of the
* region. Thus, an 8KB queue must be 8KB aligned, for example.
*/
static void __init alloc_one_queue(unsigned long *pa_ptr, unsigned long qmask)
{
unsigned long size = PAGE_ALIGN(qmask + 1);
unsigned long order = get_order(size);
unsigned long p;
p = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
if (!p) {
prom_printf("SUN4V: Error, cannot allocate queue.\n");
prom_halt();
}
*pa_ptr = __pa(p);
}
static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb)
{
#ifdef CONFIG_SMP
unsigned long page;
void *mondo, *p;
BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > PAGE_SIZE);
/* Make sure mondo block is 64byte aligned */
p = kzalloc(127, GFP_KERNEL);
if (!p) {
prom_printf("SUN4V: Error, cannot allocate mondo block.\n");
prom_halt();
}
mondo = (void *)(((unsigned long)p + 63) & ~0x3f);
tb->cpu_mondo_block_pa = __pa(mondo);
page = get_zeroed_page(GFP_KERNEL);
if (!page) {
prom_printf("SUN4V: Error, cannot allocate cpu list page.\n");
prom_halt();
}
tb->cpu_list_pa = __pa(page);
#endif
}
/* Allocate mondo and error queues for all possible cpus. */
static void __init sun4v_init_mondo_queues(void)
{
int cpu;
for_each_possible_cpu(cpu) {
struct trap_per_cpu *tb = &trap_block[cpu];
alloc_one_queue(&tb->cpu_mondo_pa, tb->cpu_mondo_qmask);
alloc_one_queue(&tb->dev_mondo_pa, tb->dev_mondo_qmask);
alloc_one_queue(&tb->resum_mondo_pa, tb->resum_qmask);
alloc_one_queue(&tb->resum_kernel_buf_pa, tb->resum_qmask);
alloc_one_queue(&tb->nonresum_mondo_pa, tb->nonresum_qmask);
alloc_one_queue(&tb->nonresum_kernel_buf_pa,
tb->nonresum_qmask);
}
}
static void __init init_send_mondo_info(void)
{
int cpu;
for_each_possible_cpu(cpu) {
struct trap_per_cpu *tb = &trap_block[cpu];
init_cpu_send_mondo_info(tb);
}
}
static struct irqaction timer_irq_action = {
.name = "timer",
};
static void __init irq_ivector_init(void)
{
unsigned long size, order;
unsigned int ivecs;
/* If we are doing cookie only VIRQs then we do not need the ivector
* table to process interrupts.
*/
if (sun4v_cookie_only_virqs())
return;
ivecs = size_nr_ivec();
size = sizeof(struct ino_bucket) * ivecs;
order = get_order(size);
ivector_table = (struct ino_bucket *)
__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
if (!ivector_table) {
prom_printf("Fatal error, cannot allocate ivector_table\n");
prom_halt();
}
__flush_dcache_range((unsigned long) ivector_table,
((unsigned long) ivector_table) + size);
ivector_table_pa = __pa(ivector_table);
}
/* Only invoked on boot processor.*/
void __init init_IRQ(void)
{
irq_init_hv();
irq_ivector_init();
map_prom_timers();
kill_prom_timer();
if (tlb_type == hypervisor)
sun4v_init_mondo_queues();
init_send_mondo_info();
if (tlb_type == hypervisor) {
/* Load up the boot cpu's entries. */
sun4v_register_mondo_queues(hard_smp_processor_id());
}
/* We need to clear any IRQ's pending in the soft interrupt
* registers, a spurious one could be left around from the
* PROM timer which we just disabled.
*/
clear_softint(get_softint());
/* Now that ivector table is initialized, it is safe
* to receive IRQ vector traps. We will normally take
* one or two right now, in case some device PROM used
* to boot us wants to speak to us. We just ignore them.
*/
__asm__ __volatile__("rdpr %%pstate, %%g1\n\t"
"or %%g1, %0, %%g1\n\t"
"wrpr %%g1, 0x0, %%pstate"
: /* No outputs */
: "i" (PSTATE_IE)
: "g1");
irq_to_desc(0)->action = &timer_irq_action;
}
| linux-master | arch/sparc/kernel/irq_64.c |
// SPDX-License-Identifier: GPL-2.0
/* linux/arch/sparc64/kernel/sys_sparc.c
*
* This file contains various random system calls that
* have a non-standard calling sequence on the Linux/sparc
* platform.
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/sched/signal.h>
#include <linux/sched/mm.h>
#include <linux/sched/debug.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/mm.h>
#include <linux/sem.h>
#include <linux/msg.h>
#include <linux/shm.h>
#include <linux/stat.h>
#include <linux/mman.h>
#include <linux/utsname.h>
#include <linux/smp.h>
#include <linux/slab.h>
#include <linux/syscalls.h>
#include <linux/ipc.h>
#include <linux/personality.h>
#include <linux/random.h>
#include <linux/export.h>
#include <linux/context_tracking.h>
#include <linux/timex.h>
#include <linux/uaccess.h>
#include <asm/utrap.h>
#include <asm/unistd.h>
#include "entry.h"
#include "kernel.h"
#include "systbls.h"
/* #define DEBUG_UNIMP_SYSCALL */
SYSCALL_DEFINE0(getpagesize)
{
return PAGE_SIZE;
}
/* Does addr --> addr+len fall within 4GB of the VA-space hole or
* overflow past the end of the 64-bit address space?
*/
static inline int invalid_64bit_range(unsigned long addr, unsigned long len)
{
unsigned long va_exclude_start, va_exclude_end;
va_exclude_start = VA_EXCLUDE_START;
va_exclude_end = VA_EXCLUDE_END;
if (unlikely(len >= va_exclude_start))
return 1;
if (unlikely((addr + len) < addr))
return 1;
if (unlikely((addr >= va_exclude_start && addr < va_exclude_end) ||
((addr + len) >= va_exclude_start &&
(addr + len) < va_exclude_end)))
return 1;
return 0;
}
/* These functions differ from the default implementations in
* mm/mmap.c in two ways:
*
* 1) For file backed MAP_SHARED mmap()'s we D-cache color align,
* for fixed such mappings we just validate what the user gave us.
* 2) For 64-bit tasks we avoid mapping anything within 4GB of
* the spitfire/niagara VA-hole.
*/
static inline unsigned long COLOR_ALIGN(unsigned long addr,
unsigned long pgoff)
{
unsigned long base = (addr+SHMLBA-1)&~(SHMLBA-1);
unsigned long off = (pgoff<<PAGE_SHIFT) & (SHMLBA-1);
return base + off;
}
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct * vma;
unsigned long task_size = TASK_SIZE;
int do_color_align;
struct vm_unmapped_area_info info;
if (flags & MAP_FIXED) {
/* We do not accept a shared mapping if it would violate
* cache aliasing constraints.
*/
if ((flags & MAP_SHARED) &&
((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
return -EINVAL;
return addr;
}
if (test_thread_flag(TIF_32BIT))
task_size = STACK_TOP32;
if (unlikely(len > task_size || len >= VA_EXCLUDE_START))
return -ENOMEM;
do_color_align = 0;
if (filp || (flags & MAP_SHARED))
do_color_align = 1;
if (addr) {
if (do_color_align)
addr = COLOR_ALIGN(addr, pgoff);
else
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (task_size - len >= addr &&
(!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
info.flags = 0;
info.length = len;
info.low_limit = TASK_UNMAPPED_BASE;
info.high_limit = min(task_size, VA_EXCLUDE_START);
info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
info.align_offset = pgoff << PAGE_SHIFT;
addr = vm_unmapped_area(&info);
if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
VM_BUG_ON(addr != -ENOMEM);
info.low_limit = VA_EXCLUDE_END;
info.high_limit = task_size;
addr = vm_unmapped_area(&info);
}
return addr;
}
unsigned long
arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
const unsigned long len, const unsigned long pgoff,
const unsigned long flags)
{
struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
unsigned long task_size = STACK_TOP32;
unsigned long addr = addr0;
int do_color_align;
struct vm_unmapped_area_info info;
/* This should only ever run for 32-bit processes. */
BUG_ON(!test_thread_flag(TIF_32BIT));
if (flags & MAP_FIXED) {
/* We do not accept a shared mapping if it would violate
* cache aliasing constraints.
*/
if ((flags & MAP_SHARED) &&
((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
return -EINVAL;
return addr;
}
if (unlikely(len > task_size))
return -ENOMEM;
do_color_align = 0;
if (filp || (flags & MAP_SHARED))
do_color_align = 1;
/* requesting a specific address */
if (addr) {
if (do_color_align)
addr = COLOR_ALIGN(addr, pgoff);
else
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (task_size - len >= addr &&
(!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
info.length = len;
info.low_limit = PAGE_SIZE;
info.high_limit = mm->mmap_base;
info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
info.align_offset = pgoff << PAGE_SHIFT;
addr = vm_unmapped_area(&info);
/*
* A failed mmap() very likely causes application failure,
* so fall back to the bottom-up function here. This scenario
* can happen with large stack limits and large mmap()
* allocations.
*/
if (addr & ~PAGE_MASK) {
VM_BUG_ON(addr != -ENOMEM);
info.flags = 0;
info.low_limit = TASK_UNMAPPED_BASE;
info.high_limit = STACK_TOP32;
addr = vm_unmapped_area(&info);
}
return addr;
}
/* Try to align mapping such that we align it as much as possible. */
unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, unsigned long len, unsigned long pgoff, unsigned long flags)
{
unsigned long align_goal, addr = -ENOMEM;
unsigned long (*get_area)(struct file *, unsigned long,
unsigned long, unsigned long, unsigned long);
get_area = current->mm->get_unmapped_area;
if (flags & MAP_FIXED) {
/* Ok, don't mess with it. */
return get_area(NULL, orig_addr, len, pgoff, flags);
}
flags &= ~MAP_SHARED;
align_goal = PAGE_SIZE;
if (len >= (4UL * 1024 * 1024))
align_goal = (4UL * 1024 * 1024);
else if (len >= (512UL * 1024))
align_goal = (512UL * 1024);
else if (len >= (64UL * 1024))
align_goal = (64UL * 1024);
do {
addr = get_area(NULL, orig_addr, len + (align_goal - PAGE_SIZE), pgoff, flags);
if (!(addr & ~PAGE_MASK)) {
addr = (addr + (align_goal - 1UL)) & ~(align_goal - 1UL);
break;
}
if (align_goal == (4UL * 1024 * 1024))
align_goal = (512UL * 1024);
else if (align_goal == (512UL * 1024))
align_goal = (64UL * 1024);
else
align_goal = PAGE_SIZE;
} while ((addr & ~PAGE_MASK) && align_goal > PAGE_SIZE);
/* Mapping is smaller than 64K or larger areas could not
* be obtained.
*/
if (addr & ~PAGE_MASK)
addr = get_area(NULL, orig_addr, len, pgoff, flags);
return addr;
}
EXPORT_SYMBOL(get_fb_unmapped_area);
/* Essentially the same as PowerPC. */
static unsigned long mmap_rnd(void)
{
unsigned long rnd = 0UL;
if (current->flags & PF_RANDOMIZE) {
unsigned long val = get_random_long();
if (test_thread_flag(TIF_32BIT))
rnd = (val % (1UL << (23UL-PAGE_SHIFT)));
else
rnd = (val % (1UL << (30UL-PAGE_SHIFT)));
}
return rnd << PAGE_SHIFT;
}
void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
{
unsigned long random_factor = mmap_rnd();
unsigned long gap;
/*
* Fall back to the standard layout if the personality
* bit is set, or if the expected stack growth is unlimited:
*/
gap = rlim_stack->rlim_cur;
if (!test_thread_flag(TIF_32BIT) ||
(current->personality & ADDR_COMPAT_LAYOUT) ||
gap == RLIM_INFINITY ||
sysctl_legacy_va_layout) {
mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
mm->get_unmapped_area = arch_get_unmapped_area;
} else {
/* We know it's 32-bit */
unsigned long task_size = STACK_TOP32;
if (gap < 128 * 1024 * 1024)
gap = 128 * 1024 * 1024;
if (gap > (task_size / 6 * 5))
gap = (task_size / 6 * 5);
mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
mm->get_unmapped_area = arch_get_unmapped_area_topdown;
}
}
/*
* sys_pipe() is the normal C calling standard for creating
* a pipe. It's not the way unix traditionally does this, though.
*/
SYSCALL_DEFINE0(sparc_pipe)
{
int fd[2];
int error;
error = do_pipe_flags(fd, 0);
if (error)
goto out;
current_pt_regs()->u_regs[UREG_I1] = fd[1];
error = fd[0];
out:
return error;
}
/*
* sys_ipc() is the de-multiplexer for the SysV IPC calls..
*
* This is really horribly ugly.
*/
SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second,
unsigned long, third, void __user *, ptr, long, fifth)
{
long err;
if (!IS_ENABLED(CONFIG_SYSVIPC))
return -ENOSYS;
/* No need for backward compatibility. We can start fresh... */
if (call <= SEMTIMEDOP) {
switch (call) {
case SEMOP:
err = ksys_semtimedop(first, ptr,
(unsigned int)second, NULL);
goto out;
case SEMTIMEDOP:
err = ksys_semtimedop(first, ptr, (unsigned int)second,
(const struct __kernel_timespec __user *)
(unsigned long) fifth);
goto out;
case SEMGET:
err = ksys_semget(first, (int)second, (int)third);
goto out;
case SEMCTL: {
err = ksys_old_semctl(first, second,
(int)third | IPC_64,
(unsigned long) ptr);
goto out;
}
default:
err = -ENOSYS;
goto out;
}
}
if (call <= MSGCTL) {
switch (call) {
case MSGSND:
err = ksys_msgsnd(first, ptr, (size_t)second,
(int)third);
goto out;
case MSGRCV:
err = ksys_msgrcv(first, ptr, (size_t)second, fifth,
(int)third);
goto out;
case MSGGET:
err = ksys_msgget((key_t)first, (int)second);
goto out;
case MSGCTL:
err = ksys_old_msgctl(first, (int)second | IPC_64, ptr);
goto out;
default:
err = -ENOSYS;
goto out;
}
}
if (call <= SHMCTL) {
switch (call) {
case SHMAT: {
ulong raddr;
err = do_shmat(first, ptr, (int)second, &raddr, SHMLBA);
if (!err) {
if (put_user(raddr,
(ulong __user *) third))
err = -EFAULT;
}
goto out;
}
case SHMDT:
err = ksys_shmdt(ptr);
goto out;
case SHMGET:
err = ksys_shmget(first, (size_t)second, (int)third);
goto out;
case SHMCTL:
err = ksys_old_shmctl(first, (int)second | IPC_64, ptr);
goto out;
default:
err = -ENOSYS;
goto out;
}
} else {
err = -ENOSYS;
}
out:
return err;
}
SYSCALL_DEFINE1(sparc64_personality, unsigned long, personality)
{
long ret;
if (personality(current->personality) == PER_LINUX32 &&
personality(personality) == PER_LINUX)
personality |= PER_LINUX32;
ret = sys_personality(personality);
if (personality(ret) == PER_LINUX32)
ret &= ~PER_LINUX32;
return ret;
}
int sparc_mmap_check(unsigned long addr, unsigned long len)
{
if (test_thread_flag(TIF_32BIT)) {
if (len >= STACK_TOP32)
return -EINVAL;
if (addr > STACK_TOP32 - len)
return -EINVAL;
} else {
if (len >= VA_EXCLUDE_START)
return -EINVAL;
if (invalid_64bit_range(addr, len))
return -EINVAL;
}
return 0;
}
/* Linux version of mmap */
SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
unsigned long, prot, unsigned long, flags, unsigned long, fd,
unsigned long, off)
{
unsigned long retval = -EINVAL;
if ((off + PAGE_ALIGN(len)) < off)
goto out;
if (off & ~PAGE_MASK)
goto out;
retval = ksys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
out:
return retval;
}
SYSCALL_DEFINE2(64_munmap, unsigned long, addr, size_t, len)
{
if (invalid_64bit_range(addr, len))
return -EINVAL;
return vm_munmap(addr, len);
}
SYSCALL_DEFINE5(64_mremap, unsigned long, addr, unsigned long, old_len,
unsigned long, new_len, unsigned long, flags,
unsigned long, new_addr)
{
if (test_thread_flag(TIF_32BIT))
return -EINVAL;
return sys_mremap(addr, old_len, new_len, flags, new_addr);
}
SYSCALL_DEFINE0(nis_syscall)
{
static int count;
struct pt_regs *regs = current_pt_regs();
/* Don't make the system unusable, if someone goes stuck */
if (count++ > 5)
return -ENOSYS;
printk ("Unimplemented SPARC system call %ld\n",regs->u_regs[1]);
#ifdef DEBUG_UNIMP_SYSCALL
show_regs (regs);
#endif
return -ENOSYS;
}
/* #define DEBUG_SPARC_BREAKPOINT */
asmlinkage void sparc_breakpoint(struct pt_regs *regs)
{
enum ctx_state prev_state = exception_enter();
if (test_thread_flag(TIF_32BIT)) {
regs->tpc &= 0xffffffff;
regs->tnpc &= 0xffffffff;
}
#ifdef DEBUG_SPARC_BREAKPOINT
printk ("TRAP: Entering kernel PC=%lx, nPC=%lx\n", regs->tpc, regs->tnpc);
#endif
force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->tpc);
#ifdef DEBUG_SPARC_BREAKPOINT
printk ("TRAP: Returning to space: PC=%lx nPC=%lx\n", regs->tpc, regs->tnpc);
#endif
exception_exit(prev_state);
}
SYSCALL_DEFINE2(getdomainname, char __user *, name, int, len)
{
int nlen, err;
char tmp[__NEW_UTS_LEN + 1];
if (len < 0)
return -EINVAL;
down_read(&uts_sem);
nlen = strlen(utsname()->domainname) + 1;
err = -EINVAL;
if (nlen > len)
goto out_unlock;
memcpy(tmp, utsname()->domainname, nlen);
up_read(&uts_sem);
if (copy_to_user(name, tmp, nlen))
return -EFAULT;
return 0;
out_unlock:
up_read(&uts_sem);
return err;
}
SYSCALL_DEFINE1(sparc_adjtimex, struct __kernel_timex __user *, txc_p)
{
struct __kernel_timex txc;
struct __kernel_old_timeval *tv = (void *)&txc.time;
int ret;
/* Copy the user data space into the kernel copy
* structure. But bear in mind that the structures
* may change
*/
if (copy_from_user(&txc, txc_p, sizeof(txc)))
return -EFAULT;
/*
* override for sparc64 specific timeval type: tv_usec
* is 32 bit wide instead of 64-bit in __kernel_timex
*/
txc.time.tv_usec = tv->tv_usec;
ret = do_adjtimex(&txc);
tv->tv_usec = txc.time.tv_usec;
return copy_to_user(txc_p, &txc, sizeof(txc)) ? -EFAULT : ret;
}
SYSCALL_DEFINE2(sparc_clock_adjtime, const clockid_t, which_clock,
struct __kernel_timex __user *, txc_p)
{
struct __kernel_timex txc;
struct __kernel_old_timeval *tv = (void *)&txc.time;
int ret;
if (!IS_ENABLED(CONFIG_POSIX_TIMERS)) {
pr_err_once("process %d (%s) attempted a POSIX timer syscall "
"while CONFIG_POSIX_TIMERS is not set\n",
current->pid, current->comm);
return -ENOSYS;
}
/* Copy the user data space into the kernel copy
* structure. But bear in mind that the structures
* may change
*/
if (copy_from_user(&txc, txc_p, sizeof(txc)))
return -EFAULT;
/*
* override for sparc64 specific timeval type: tv_usec
* is 32 bit wide instead of 64-bit in __kernel_timex
*/
txc.time.tv_usec = tv->tv_usec;
ret = do_clock_adjtime(which_clock, &txc);
tv->tv_usec = txc.time.tv_usec;
return copy_to_user(txc_p, &txc, sizeof(txc)) ? -EFAULT : ret;
}
SYSCALL_DEFINE5(utrap_install, utrap_entry_t, type,
utrap_handler_t, new_p, utrap_handler_t, new_d,
utrap_handler_t __user *, old_p,
utrap_handler_t __user *, old_d)
{
if (type < UT_INSTRUCTION_EXCEPTION || type > UT_TRAP_INSTRUCTION_31)
return -EINVAL;
if (new_p == (utrap_handler_t)(long)UTH_NOCHANGE) {
if (old_p) {
if (!current_thread_info()->utraps) {
if (put_user(NULL, old_p))
return -EFAULT;
} else {
if (put_user((utrap_handler_t)(current_thread_info()->utraps[type]), old_p))
return -EFAULT;
}
}
if (old_d) {
if (put_user(NULL, old_d))
return -EFAULT;
}
return 0;
}
if (!current_thread_info()->utraps) {
current_thread_info()->utraps =
kcalloc(UT_TRAP_INSTRUCTION_31 + 1, sizeof(long),
GFP_KERNEL);
if (!current_thread_info()->utraps)
return -ENOMEM;
current_thread_info()->utraps[0] = 1;
} else {
if ((utrap_handler_t)current_thread_info()->utraps[type] != new_p &&
current_thread_info()->utraps[0] > 1) {
unsigned long *p = current_thread_info()->utraps;
current_thread_info()->utraps =
kmalloc_array(UT_TRAP_INSTRUCTION_31 + 1,
sizeof(long),
GFP_KERNEL);
if (!current_thread_info()->utraps) {
current_thread_info()->utraps = p;
return -ENOMEM;
}
p[0]--;
current_thread_info()->utraps[0] = 1;
memcpy(current_thread_info()->utraps+1, p+1,
UT_TRAP_INSTRUCTION_31*sizeof(long));
}
}
if (old_p) {
if (put_user((utrap_handler_t)(current_thread_info()->utraps[type]), old_p))
return -EFAULT;
}
if (old_d) {
if (put_user(NULL, old_d))
return -EFAULT;
}
current_thread_info()->utraps[type] = (long)new_p;
return 0;
}
SYSCALL_DEFINE1(memory_ordering, unsigned long, model)
{
struct pt_regs *regs = current_pt_regs();
if (model >= 3)
return -EINVAL;
regs->tstate = (regs->tstate & ~TSTATE_MM) | (model << 14);
return 0;
}
SYSCALL_DEFINE5(rt_sigaction, int, sig, const struct sigaction __user *, act,
struct sigaction __user *, oact, void __user *, restorer,
size_t, sigsetsize)
{
struct k_sigaction new_ka, old_ka;
int ret;
/* XXX: Don't preclude handling different sized sigset_t's. */
if (sigsetsize != sizeof(sigset_t))
return -EINVAL;
if (act) {
new_ka.ka_restorer = restorer;
if (copy_from_user(&new_ka.sa, act, sizeof(*act)))
return -EFAULT;
}
ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
if (!ret && oact) {
if (copy_to_user(oact, &old_ka.sa, sizeof(*oact)))
return -EFAULT;
}
return ret;
}
SYSCALL_DEFINE0(kern_features)
{
return KERN_FEATURE_MIXED_MODE_STACK;
}
| linux-master | arch/sparc/kernel/sys_sparc_64.c |
// SPDX-License-Identifier: GPL-2.0
/*
* leon_pci_grpci1.c: GRPCI1 Host PCI driver
*
* Copyright (C) 2013 Aeroflex Gaisler AB
*
* This GRPCI1 driver does not support PCI interrupts taken from
* GPIO pins. Interrupt generation at PCI parity and system error
* detection is by default turned off since some GRPCI1 cores does
* not support detection. It can be turned on from the bootloader
* using the all_pci_errors property.
*
* Contributors: Daniel Hellstrom <[email protected]>
*/
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/pci.h>
#include <asm/leon_pci.h>
#include <asm/sections.h>
#include <asm/vaddrs.h>
#include <asm/leon.h>
#include <asm/io.h>
#include "irq.h"
/* Enable/Disable Debugging Configuration Space Access */
#undef GRPCI1_DEBUG_CFGACCESS
/*
* GRPCI1 APB Register MAP
*/
struct grpci1_regs {
unsigned int cfg_stat; /* 0x00 Configuration / Status */
unsigned int bar0; /* 0x04 BAR0 (RO) */
unsigned int page0; /* 0x08 PAGE0 (RO) */
unsigned int bar1; /* 0x0C BAR1 (RO) */
unsigned int page1; /* 0x10 PAGE1 */
unsigned int iomap; /* 0x14 IO Map */
unsigned int stat_cmd; /* 0x18 PCI Status & Command (RO) */
unsigned int irq; /* 0x1C Interrupt register */
};
#define REGLOAD(a) (be32_to_cpu(__raw_readl(&(a))))
#define REGSTORE(a, v) (__raw_writel(cpu_to_be32(v), &(a)))
#define PAGE0_BTEN_BIT 0
#define PAGE0_BTEN (1 << PAGE0_BTEN_BIT)
#define CFGSTAT_HOST_BIT 13
#define CFGSTAT_CTO_BIT 8
#define CFGSTAT_HOST (1 << CFGSTAT_HOST_BIT)
#define CFGSTAT_CTO (1 << CFGSTAT_CTO_BIT)
#define IRQ_DPE (1 << 9)
#define IRQ_SSE (1 << 8)
#define IRQ_RMA (1 << 7)
#define IRQ_RTA (1 << 6)
#define IRQ_STA (1 << 5)
#define IRQ_DPED (1 << 4)
#define IRQ_INTD (1 << 3)
#define IRQ_INTC (1 << 2)
#define IRQ_INTB (1 << 1)
#define IRQ_INTA (1 << 0)
#define IRQ_DEF_ERRORS (IRQ_RMA | IRQ_RTA | IRQ_STA)
#define IRQ_ALL_ERRORS (IRQ_DPED | IRQ_DEF_ERRORS | IRQ_SSE | IRQ_DPE)
#define IRQ_INTX (IRQ_INTA | IRQ_INTB | IRQ_INTC | IRQ_INTD)
#define IRQ_MASK_BIT 16
#define DEF_PCI_ERRORS (PCI_STATUS_SIG_TARGET_ABORT | \
PCI_STATUS_REC_TARGET_ABORT | \
PCI_STATUS_REC_MASTER_ABORT)
#define ALL_PCI_ERRORS (PCI_STATUS_PARITY | PCI_STATUS_DETECTED_PARITY | \
PCI_STATUS_SIG_SYSTEM_ERROR | DEF_PCI_ERRORS)
#define TGT 256
struct grpci1_priv {
struct leon_pci_info info; /* must be on top of this structure */
struct grpci1_regs __iomem *regs; /* GRPCI register map */
struct device *dev;
int pci_err_mask; /* STATUS register error mask */
int irq; /* LEON irqctrl GRPCI IRQ */
unsigned char irq_map[4]; /* GRPCI nexus PCI INTX# IRQs */
unsigned int irq_err; /* GRPCI nexus Virt Error IRQ */
/* AHB PCI Windows */
unsigned long pci_area; /* MEMORY */
unsigned long pci_area_end;
unsigned long pci_io; /* I/O */
unsigned long pci_conf; /* CONFIGURATION */
unsigned long pci_conf_end;
unsigned long pci_io_va;
};
static struct grpci1_priv *grpci1priv;
static int grpci1_cfg_w32(struct grpci1_priv *priv, unsigned int bus,
unsigned int devfn, int where, u32 val);
static int grpci1_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
struct grpci1_priv *priv = dev->bus->sysdata;
int irq_group;
/* Use default IRQ decoding on PCI BUS0 according slot numbering */
irq_group = slot & 0x3;
pin = ((pin - 1) + irq_group) & 0x3;
return priv->irq_map[pin];
}
static int grpci1_cfg_r32(struct grpci1_priv *priv, unsigned int bus,
unsigned int devfn, int where, u32 *val)
{
u32 *pci_conf, tmp, cfg;
if (where & 0x3)
return -EINVAL;
if (bus == 0) {
devfn += (0x8 * 6); /* start at AD16=Device0 */
} else if (bus == TGT) {
bus = 0;
devfn = 0; /* special case: bridge controller itself */
}
/* Select bus */
cfg = REGLOAD(priv->regs->cfg_stat);
REGSTORE(priv->regs->cfg_stat, (cfg & ~(0xf << 23)) | (bus << 23));
/* do read access */
pci_conf = (u32 *) (priv->pci_conf | (devfn << 8) | (where & 0xfc));
tmp = LEON3_BYPASS_LOAD_PA(pci_conf);
/* check if master abort was received */
if (REGLOAD(priv->regs->cfg_stat) & CFGSTAT_CTO) {
*val = 0xffffffff;
/* Clear Master abort bit in PCI cfg space (is set) */
tmp = REGLOAD(priv->regs->stat_cmd);
grpci1_cfg_w32(priv, TGT, 0, PCI_COMMAND, tmp);
} else {
/* Bus always little endian (unaffected by byte-swapping) */
*val = swab32(tmp);
}
return 0;
}
static int grpci1_cfg_r16(struct grpci1_priv *priv, unsigned int bus,
unsigned int devfn, int where, u32 *val)
{
u32 v;
int ret;
if (where & 0x1)
return -EINVAL;
ret = grpci1_cfg_r32(priv, bus, devfn, where & ~0x3, &v);
*val = 0xffff & (v >> (8 * (where & 0x3)));
return ret;
}
static int grpci1_cfg_r8(struct grpci1_priv *priv, unsigned int bus,
unsigned int devfn, int where, u32 *val)
{
u32 v;
int ret;
ret = grpci1_cfg_r32(priv, bus, devfn, where & ~0x3, &v);
*val = 0xff & (v >> (8 * (where & 3)));
return ret;
}
static int grpci1_cfg_w32(struct grpci1_priv *priv, unsigned int bus,
unsigned int devfn, int where, u32 val)
{
unsigned int *pci_conf;
u32 cfg;
if (where & 0x3)
return -EINVAL;
if (bus == 0) {
devfn += (0x8 * 6); /* start at AD16=Device0 */
} else if (bus == TGT) {
bus = 0;
devfn = 0; /* special case: bridge controller itself */
}
/* Select bus */
cfg = REGLOAD(priv->regs->cfg_stat);
REGSTORE(priv->regs->cfg_stat, (cfg & ~(0xf << 23)) | (bus << 23));
pci_conf = (unsigned int *) (priv->pci_conf |
(devfn << 8) | (where & 0xfc));
LEON3_BYPASS_STORE_PA(pci_conf, swab32(val));
return 0;
}
static int grpci1_cfg_w16(struct grpci1_priv *priv, unsigned int bus,
unsigned int devfn, int where, u32 val)
{
int ret;
u32 v;
if (where & 0x1)
return -EINVAL;
ret = grpci1_cfg_r32(priv, bus, devfn, where&~3, &v);
if (ret)
return ret;
v = (v & ~(0xffff << (8 * (where & 0x3)))) |
((0xffff & val) << (8 * (where & 0x3)));
return grpci1_cfg_w32(priv, bus, devfn, where & ~0x3, v);
}
static int grpci1_cfg_w8(struct grpci1_priv *priv, unsigned int bus,
unsigned int devfn, int where, u32 val)
{
int ret;
u32 v;
ret = grpci1_cfg_r32(priv, bus, devfn, where & ~0x3, &v);
if (ret != 0)
return ret;
v = (v & ~(0xff << (8 * (where & 0x3)))) |
((0xff & val) << (8 * (where & 0x3)));
return grpci1_cfg_w32(priv, bus, devfn, where & ~0x3, v);
}
/* Read from Configuration Space. When entering here the PCI layer has taken
* the pci_lock spinlock and IRQ is off.
*/
static int grpci1_read_config(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 *val)
{
struct grpci1_priv *priv = grpci1priv;
unsigned int busno = bus->number;
int ret;
if (PCI_SLOT(devfn) > 15 || busno > 15) {
*val = ~0;
return 0;
}
switch (size) {
case 1:
ret = grpci1_cfg_r8(priv, busno, devfn, where, val);
break;
case 2:
ret = grpci1_cfg_r16(priv, busno, devfn, where, val);
break;
case 4:
ret = grpci1_cfg_r32(priv, busno, devfn, where, val);
break;
default:
ret = -EINVAL;
break;
}
#ifdef GRPCI1_DEBUG_CFGACCESS
printk(KERN_INFO
"grpci1_read_config: [%02x:%02x:%x] ofs=%d val=%x size=%d\n",
busno, PCI_SLOT(devfn), PCI_FUNC(devfn), where, *val, size);
#endif
return ret;
}
/* Write to Configuration Space. When entering here the PCI layer has taken
* the pci_lock spinlock and IRQ is off.
*/
static int grpci1_write_config(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 val)
{
struct grpci1_priv *priv = grpci1priv;
unsigned int busno = bus->number;
if (PCI_SLOT(devfn) > 15 || busno > 15)
return 0;
#ifdef GRPCI1_DEBUG_CFGACCESS
printk(KERN_INFO
"grpci1_write_config: [%02x:%02x:%x] ofs=%d size=%d val=%x\n",
busno, PCI_SLOT(devfn), PCI_FUNC(devfn), where, size, val);
#endif
switch (size) {
default:
return -EINVAL;
case 1:
return grpci1_cfg_w8(priv, busno, devfn, where, val);
case 2:
return grpci1_cfg_w16(priv, busno, devfn, where, val);
case 4:
return grpci1_cfg_w32(priv, busno, devfn, where, val);
}
}
static struct pci_ops grpci1_ops = {
.read = grpci1_read_config,
.write = grpci1_write_config,
};
/* GENIRQ IRQ chip implementation for grpci1 irqmode=0..2. In configuration
* 3 where all PCI Interrupts has a separate IRQ on the system IRQ controller
* this is not needed and the standard IRQ controller can be used.
*/
static void grpci1_mask_irq(struct irq_data *data)
{
u32 irqidx;
struct grpci1_priv *priv = grpci1priv;
irqidx = (u32)data->chip_data - 1;
if (irqidx > 3) /* only mask PCI interrupts here */
return;
irqidx += IRQ_MASK_BIT;
REGSTORE(priv->regs->irq, REGLOAD(priv->regs->irq) & ~(1 << irqidx));
}
static void grpci1_unmask_irq(struct irq_data *data)
{
u32 irqidx;
struct grpci1_priv *priv = grpci1priv;
irqidx = (u32)data->chip_data - 1;
if (irqidx > 3) /* only unmask PCI interrupts here */
return;
irqidx += IRQ_MASK_BIT;
REGSTORE(priv->regs->irq, REGLOAD(priv->regs->irq) | (1 << irqidx));
}
static unsigned int grpci1_startup_irq(struct irq_data *data)
{
grpci1_unmask_irq(data);
return 0;
}
static void grpci1_shutdown_irq(struct irq_data *data)
{
grpci1_mask_irq(data);
}
static struct irq_chip grpci1_irq = {
.name = "grpci1",
.irq_startup = grpci1_startup_irq,
.irq_shutdown = grpci1_shutdown_irq,
.irq_mask = grpci1_mask_irq,
.irq_unmask = grpci1_unmask_irq,
};
/* Handle one or multiple IRQs from the PCI core */
static void grpci1_pci_flow_irq(struct irq_desc *desc)
{
struct grpci1_priv *priv = grpci1priv;
int i, ack = 0;
unsigned int irqreg;
irqreg = REGLOAD(priv->regs->irq);
irqreg = (irqreg >> IRQ_MASK_BIT) & irqreg;
/* Error Interrupt? */
if (irqreg & IRQ_ALL_ERRORS) {
generic_handle_irq(priv->irq_err);
ack = 1;
}
/* PCI Interrupt? */
if (irqreg & IRQ_INTX) {
/* Call respective PCI Interrupt handler */
for (i = 0; i < 4; i++) {
if (irqreg & (1 << i))
generic_handle_irq(priv->irq_map[i]);
}
ack = 1;
}
/*
* Call "first level" IRQ chip end-of-irq handler. It will ACK LEON IRQ
* Controller, this must be done after IRQ sources have been handled to
* avoid double IRQ generation
*/
if (ack)
desc->irq_data.chip->irq_eoi(&desc->irq_data);
}
/* Create a virtual IRQ */
static unsigned int grpci1_build_device_irq(unsigned int irq)
{
unsigned int virq = 0, pil;
pil = 1 << 8;
virq = irq_alloc(irq, pil);
if (virq == 0)
goto out;
irq_set_chip_and_handler_name(virq, &grpci1_irq, handle_simple_irq,
"pcilvl");
irq_set_chip_data(virq, (void *)irq);
out:
return virq;
}
/*
* Initialize mappings AMBA<->PCI, clear IRQ state, setup PCI interface
*
* Target BARs:
* BAR0: unused in this implementation
* BAR1: peripheral DMA to host's memory (size at least 256MByte)
* BAR2..BAR5: not implemented in hardware
*/
static void grpci1_hw_init(struct grpci1_priv *priv)
{
u32 ahbadr, bar_sz, data, pciadr;
struct grpci1_regs __iomem *regs = priv->regs;
/* set 1:1 mapping between AHB -> PCI memory space */
REGSTORE(regs->cfg_stat, priv->pci_area & 0xf0000000);
/* map PCI accesses to target BAR1 to Linux kernel memory 1:1 */
ahbadr = 0xf0000000 & (u32)__pa(PAGE_ALIGN((unsigned long) &_end));
REGSTORE(regs->page1, ahbadr);
/* translate I/O accesses to 0, I/O Space always @ PCI low 64Kbytes */
REGSTORE(regs->iomap, REGLOAD(regs->iomap) & 0x0000ffff);
/* disable and clear pending interrupts */
REGSTORE(regs->irq, 0);
/* Setup BAR0 outside access range so that it does not conflict with
* peripheral DMA. There is no need to set up the PAGE0 register.
*/
grpci1_cfg_w32(priv, TGT, 0, PCI_BASE_ADDRESS_0, 0xffffffff);
grpci1_cfg_r32(priv, TGT, 0, PCI_BASE_ADDRESS_0, &bar_sz);
bar_sz = ~bar_sz + 1;
pciadr = priv->pci_area - bar_sz;
grpci1_cfg_w32(priv, TGT, 0, PCI_BASE_ADDRESS_0, pciadr);
/*
* Setup the Host's PCI Target BAR1 for other peripherals to access,
* and do DMA to the host's memory.
*/
grpci1_cfg_w32(priv, TGT, 0, PCI_BASE_ADDRESS_1, ahbadr);
/*
* Setup Latency Timer and cache line size. Default cache line
* size will result in poor performance (256 word fetches), 0xff
* will set it according to the max size of the PCI FIFO.
*/
grpci1_cfg_w8(priv, TGT, 0, PCI_CACHE_LINE_SIZE, 0xff);
grpci1_cfg_w8(priv, TGT, 0, PCI_LATENCY_TIMER, 0x40);
/* set as bus master, enable pci memory responses, clear status bits */
grpci1_cfg_r32(priv, TGT, 0, PCI_COMMAND, &data);
data |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
grpci1_cfg_w32(priv, TGT, 0, PCI_COMMAND, data);
}
static irqreturn_t grpci1_jump_interrupt(int irq, void *arg)
{
struct grpci1_priv *priv = arg;
dev_err(priv->dev, "Jump IRQ happened\n");
return IRQ_NONE;
}
/* Handle GRPCI1 Error Interrupt */
static irqreturn_t grpci1_err_interrupt(int irq, void *arg)
{
struct grpci1_priv *priv = arg;
u32 status;
grpci1_cfg_r16(priv, TGT, 0, PCI_STATUS, &status);
status &= priv->pci_err_mask;
if (status == 0)
return IRQ_NONE;
if (status & PCI_STATUS_PARITY)
dev_err(priv->dev, "Data Parity Error\n");
if (status & PCI_STATUS_SIG_TARGET_ABORT)
dev_err(priv->dev, "Signalled Target Abort\n");
if (status & PCI_STATUS_REC_TARGET_ABORT)
dev_err(priv->dev, "Received Target Abort\n");
if (status & PCI_STATUS_REC_MASTER_ABORT)
dev_err(priv->dev, "Received Master Abort\n");
if (status & PCI_STATUS_SIG_SYSTEM_ERROR)
dev_err(priv->dev, "Signalled System Error\n");
if (status & PCI_STATUS_DETECTED_PARITY)
dev_err(priv->dev, "Parity Error\n");
/* Clear handled INT TYPE IRQs */
grpci1_cfg_w16(priv, TGT, 0, PCI_STATUS, status);
return IRQ_HANDLED;
}
static int grpci1_of_probe(struct platform_device *ofdev)
{
struct grpci1_regs __iomem *regs;
struct grpci1_priv *priv;
int err, len;
const int *tmp;
u32 cfg, size, err_mask;
struct resource *res;
if (grpci1priv) {
dev_err(&ofdev->dev, "only one GRPCI1 supported\n");
return -ENODEV;
}
if (ofdev->num_resources < 3) {
dev_err(&ofdev->dev, "not enough APB/AHB resources\n");
return -EIO;
}
priv = devm_kzalloc(&ofdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv) {
dev_err(&ofdev->dev, "memory allocation failed\n");
return -ENOMEM;
}
platform_set_drvdata(ofdev, priv);
priv->dev = &ofdev->dev;
/* find device register base address */
res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
regs = devm_ioremap_resource(&ofdev->dev, res);
if (IS_ERR(regs))
return PTR_ERR(regs);
/*
* check that we're in Host Slot and that we can act as a Host Bridge
* and not only as target/peripheral.
*/
cfg = REGLOAD(regs->cfg_stat);
if ((cfg & CFGSTAT_HOST) == 0) {
dev_err(&ofdev->dev, "not in host system slot\n");
return -EIO;
}
/* check that BAR1 support 256 MByte so that we can map kernel space */
REGSTORE(regs->page1, 0xffffffff);
size = ~REGLOAD(regs->page1) + 1;
if (size < 0x10000000) {
dev_err(&ofdev->dev, "BAR1 must be at least 256MByte\n");
return -EIO;
}
/* hardware must support little-endian PCI (byte-twisting) */
if ((REGLOAD(regs->page0) & PAGE0_BTEN) == 0) {
dev_err(&ofdev->dev, "byte-twisting is required\n");
return -EIO;
}
priv->regs = regs;
priv->irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
dev_info(&ofdev->dev, "host found at 0x%p, irq%d\n", regs, priv->irq);
/* Find PCI Memory, I/O and Configuration Space Windows */
priv->pci_area = ofdev->resource[1].start;
priv->pci_area_end = ofdev->resource[1].end+1;
priv->pci_io = ofdev->resource[2].start;
priv->pci_conf = ofdev->resource[2].start + 0x10000;
priv->pci_conf_end = priv->pci_conf + 0x10000;
priv->pci_io_va = (unsigned long)ioremap(priv->pci_io, 0x10000);
if (!priv->pci_io_va) {
dev_err(&ofdev->dev, "unable to map PCI I/O area\n");
return -EIO;
}
printk(KERN_INFO
"GRPCI1: MEMORY SPACE [0x%08lx - 0x%08lx]\n"
" I/O SPACE [0x%08lx - 0x%08lx]\n"
" CONFIG SPACE [0x%08lx - 0x%08lx]\n",
priv->pci_area, priv->pci_area_end-1,
priv->pci_io, priv->pci_conf-1,
priv->pci_conf, priv->pci_conf_end-1);
/*
* I/O Space resources in I/O Window mapped into Virtual Adr Space
* We never use low 4KB because some devices seem have problems using
* address 0.
*/
priv->info.io_space.name = "GRPCI1 PCI I/O Space";
priv->info.io_space.start = priv->pci_io_va + 0x1000;
priv->info.io_space.end = priv->pci_io_va + 0x10000 - 1;
priv->info.io_space.flags = IORESOURCE_IO;
/*
* grpci1 has no prefetchable memory, map everything as
* non-prefetchable memory
*/
priv->info.mem_space.name = "GRPCI1 PCI MEM Space";
priv->info.mem_space.start = priv->pci_area;
priv->info.mem_space.end = priv->pci_area_end - 1;
priv->info.mem_space.flags = IORESOURCE_MEM;
if (request_resource(&iomem_resource, &priv->info.mem_space) < 0) {
dev_err(&ofdev->dev, "unable to request PCI memory area\n");
err = -ENOMEM;
goto err1;
}
if (request_resource(&ioport_resource, &priv->info.io_space) < 0) {
dev_err(&ofdev->dev, "unable to request PCI I/O area\n");
err = -ENOMEM;
goto err2;
}
/* setup maximum supported PCI buses */
priv->info.busn.name = "GRPCI1 busn";
priv->info.busn.start = 0;
priv->info.busn.end = 15;
grpci1priv = priv;
/* Initialize hardware */
grpci1_hw_init(priv);
/*
* Get PCI Interrupt to System IRQ mapping and setup IRQ handling
* Error IRQ. All PCI and PCI-Error interrupts are shared using the
* same system IRQ.
*/
leon_update_virq_handling(priv->irq, grpci1_pci_flow_irq, "pcilvl", 0);
priv->irq_map[0] = grpci1_build_device_irq(1);
priv->irq_map[1] = grpci1_build_device_irq(2);
priv->irq_map[2] = grpci1_build_device_irq(3);
priv->irq_map[3] = grpci1_build_device_irq(4);
priv->irq_err = grpci1_build_device_irq(5);
printk(KERN_INFO " PCI INTA..D#: IRQ%d, IRQ%d, IRQ%d, IRQ%d\n",
priv->irq_map[0], priv->irq_map[1], priv->irq_map[2],
priv->irq_map[3]);
/* Enable IRQs on LEON IRQ controller */
err = devm_request_irq(&ofdev->dev, priv->irq, grpci1_jump_interrupt, 0,
"GRPCI1_JUMP", priv);
if (err) {
dev_err(&ofdev->dev, "ERR IRQ request failed: %d\n", err);
goto err3;
}
/* Setup IRQ handler for access errors */
err = devm_request_irq(&ofdev->dev, priv->irq_err,
grpci1_err_interrupt, IRQF_SHARED, "GRPCI1_ERR",
priv);
if (err) {
dev_err(&ofdev->dev, "ERR VIRQ request failed: %d\n", err);
goto err3;
}
tmp = of_get_property(ofdev->dev.of_node, "all_pci_errors", &len);
if (tmp && (len == 4)) {
priv->pci_err_mask = ALL_PCI_ERRORS;
err_mask = IRQ_ALL_ERRORS << IRQ_MASK_BIT;
} else {
priv->pci_err_mask = DEF_PCI_ERRORS;
err_mask = IRQ_DEF_ERRORS << IRQ_MASK_BIT;
}
/*
* Enable Error Interrupts. PCI interrupts are unmasked once request_irq
* is called by the PCI Device drivers
*/
REGSTORE(regs->irq, err_mask);
/* Init common layer and scan buses */
priv->info.ops = &grpci1_ops;
priv->info.map_irq = grpci1_map_irq;
leon_pci_init(ofdev, &priv->info);
return 0;
err3:
release_resource(&priv->info.io_space);
err2:
release_resource(&priv->info.mem_space);
err1:
iounmap((void __iomem *)priv->pci_io_va);
grpci1priv = NULL;
return err;
}
static const struct of_device_id grpci1_of_match[] __initconst = {
{
.name = "GAISLER_PCIFBRG",
},
{
.name = "01_014",
},
{},
};
static struct platform_driver grpci1_of_driver = {
.driver = {
.name = "grpci1",
.of_match_table = grpci1_of_match,
},
.probe = grpci1_of_probe,
};
static int __init grpci1_init(void)
{
return platform_driver_register(&grpci1_of_driver);
}
subsys_initcall(grpci1_init);
| linux-master | arch/sparc/kernel/leon_pci_grpci1.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Pseudo NMI support on sparc64 systems.
*
* Copyright (C) 2009 David S. Miller <[email protected]>
*
* The NMI watchdog support and infrastructure is based almost
* entirely upon the x86 NMI support code.
*/
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/init.h>
#include <linux/percpu.h>
#include <linux/nmi.h>
#include <linux/export.h>
#include <linux/kprobes.h>
#include <linux/kernel_stat.h>
#include <linux/reboot.h>
#include <linux/slab.h>
#include <linux/kdebug.h>
#include <linux/delay.h>
#include <linux/smp.h>
#include <asm/perf_event.h>
#include <asm/ptrace.h>
#include <asm/pcr.h>
#include "kstack.h"
/* We don't have a real NMI on sparc64, but we can fake one
* up using profiling counter overflow interrupts and interrupt
* levels.
*
* The profile overflow interrupts at level 15, so we use
* level 14 as our IRQ off level.
*/
static int panic_on_timeout;
/* nmi_active:
* >0: the NMI watchdog is active, but can be disabled
* <0: the NMI watchdog has not been set up, and cannot be enabled
* 0: the NMI watchdog is disabled, but can be enabled
*/
atomic_t nmi_active = ATOMIC_INIT(0); /* oprofile uses this */
EXPORT_SYMBOL(nmi_active);
static int nmi_init_done;
static unsigned int nmi_hz = HZ;
static DEFINE_PER_CPU(short, wd_enabled);
static int endflag __initdata;
static DEFINE_PER_CPU(unsigned int, last_irq_sum);
static DEFINE_PER_CPU(long, alert_counter);
static DEFINE_PER_CPU(int, nmi_touch);
void arch_touch_nmi_watchdog(void)
{
if (atomic_read(&nmi_active)) {
int cpu;
for_each_present_cpu(cpu) {
if (per_cpu(nmi_touch, cpu) != 1)
per_cpu(nmi_touch, cpu) = 1;
}
}
}
EXPORT_SYMBOL(arch_touch_nmi_watchdog);
int __init watchdog_hardlockup_probe(void)
{
return 0;
}
static void die_nmi(const char *str, struct pt_regs *regs, int do_panic)
{
int this_cpu = smp_processor_id();
if (notify_die(DIE_NMIWATCHDOG, str, regs, 0,
pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP)
return;
if (do_panic || panic_on_oops)
panic("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
else
WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu);
}
notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
{
unsigned int sum, touched = 0;
void *orig_sp;
clear_softint(1 << irq);
local_cpu_data().__nmi_count++;
nmi_enter();
orig_sp = set_hardirq_stack();
if (notify_die(DIE_NMI, "nmi", regs, 0,
pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP)
touched = 1;
else
pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_disable);
sum = local_cpu_data().irq0_irqs;
if (__this_cpu_read(nmi_touch)) {
__this_cpu_write(nmi_touch, 0);
touched = 1;
}
if (!touched && __this_cpu_read(last_irq_sum) == sum) {
__this_cpu_inc(alert_counter);
if (__this_cpu_read(alert_counter) == 30 * nmi_hz)
die_nmi("BUG: NMI Watchdog detected LOCKUP",
regs, panic_on_timeout);
} else {
__this_cpu_write(last_irq_sum, sum);
__this_cpu_write(alert_counter, 0);
}
if (__this_cpu_read(wd_enabled)) {
pcr_ops->write_pic(0, pcr_ops->nmi_picl_value(nmi_hz));
pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_enable);
}
restore_hardirq_stack(orig_sp);
nmi_exit();
}
static inline unsigned int get_nmi_count(int cpu)
{
return cpu_data(cpu).__nmi_count;
}
static __init void nmi_cpu_busy(void *data)
{
while (endflag == 0)
mb();
}
static void report_broken_nmi(int cpu, int *prev_nmi_count)
{
printk(KERN_CONT "\n");
printk(KERN_WARNING
"WARNING: CPU#%d: NMI appears to be stuck (%d->%d)!\n",
cpu, prev_nmi_count[cpu], get_nmi_count(cpu));
printk(KERN_WARNING
"Please report this to bugzilla.kernel.org,\n");
printk(KERN_WARNING
"and attach the output of the 'dmesg' command.\n");
per_cpu(wd_enabled, cpu) = 0;
atomic_dec(&nmi_active);
}
void stop_nmi_watchdog(void *unused)
{
if (!__this_cpu_read(wd_enabled))
return;
pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_disable);
__this_cpu_write(wd_enabled, 0);
atomic_dec(&nmi_active);
}
static int __init check_nmi_watchdog(void)
{
unsigned int *prev_nmi_count;
int cpu, err;
if (!atomic_read(&nmi_active))
return 0;
prev_nmi_count = kmalloc_array(nr_cpu_ids, sizeof(unsigned int),
GFP_KERNEL);
if (!prev_nmi_count) {
err = -ENOMEM;
goto error;
}
printk(KERN_INFO "Testing NMI watchdog ... ");
smp_call_function(nmi_cpu_busy, (void *)&endflag, 0);
for_each_possible_cpu(cpu)
prev_nmi_count[cpu] = get_nmi_count(cpu);
local_irq_enable();
mdelay((20 * 1000) / nmi_hz); /* wait 20 ticks */
for_each_online_cpu(cpu) {
if (!per_cpu(wd_enabled, cpu))
continue;
if (get_nmi_count(cpu) - prev_nmi_count[cpu] <= 5)
report_broken_nmi(cpu, prev_nmi_count);
}
endflag = 1;
if (!atomic_read(&nmi_active)) {
kfree(prev_nmi_count);
atomic_set(&nmi_active, -1);
err = -ENODEV;
goto error;
}
printk("OK.\n");
nmi_hz = 1;
kfree(prev_nmi_count);
return 0;
error:
on_each_cpu(stop_nmi_watchdog, NULL, 1);
return err;
}
void start_nmi_watchdog(void *unused)
{
if (__this_cpu_read(wd_enabled))
return;
__this_cpu_write(wd_enabled, 1);
atomic_inc(&nmi_active);
pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_disable);
pcr_ops->write_pic(0, pcr_ops->nmi_picl_value(nmi_hz));
pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_enable);
}
static void nmi_adjust_hz_one(void *unused)
{
if (!__this_cpu_read(wd_enabled))
return;
pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_disable);
pcr_ops->write_pic(0, pcr_ops->nmi_picl_value(nmi_hz));
pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_enable);
}
void nmi_adjust_hz(unsigned int new_hz)
{
nmi_hz = new_hz;
on_each_cpu(nmi_adjust_hz_one, NULL, 1);
}
EXPORT_SYMBOL_GPL(nmi_adjust_hz);
static int nmi_shutdown(struct notifier_block *nb, unsigned long cmd, void *p)
{
on_each_cpu(stop_nmi_watchdog, NULL, 1);
return 0;
}
static struct notifier_block nmi_reboot_notifier = {
.notifier_call = nmi_shutdown,
};
int __init nmi_init(void)
{
int err;
on_each_cpu(start_nmi_watchdog, NULL, 1);
err = check_nmi_watchdog();
if (!err) {
err = register_reboot_notifier(&nmi_reboot_notifier);
if (err) {
on_each_cpu(stop_nmi_watchdog, NULL, 1);
atomic_set(&nmi_active, -1);
}
}
nmi_init_done = 1;
return err;
}
static int __init setup_nmi_watchdog(char *str)
{
if (!strncmp(str, "panic", 5))
panic_on_timeout = 1;
return 0;
}
__setup("nmi_watchdog=", setup_nmi_watchdog);
/*
* sparc specific NMI watchdog enable function.
* Enables watchdog if it is not enabled already.
*/
void watchdog_hardlockup_enable(unsigned int cpu)
{
if (atomic_read(&nmi_active) == -1) {
pr_warn("NMI watchdog cannot be enabled or disabled\n");
return;
}
/*
* watchdog thread could start even before nmi_init is called.
* Just Return in that case. Let nmi_init finish the init
* process first.
*/
if (!nmi_init_done)
return;
smp_call_function_single(cpu, start_nmi_watchdog, NULL, 1);
}
/*
* sparc specific NMI watchdog disable function.
* Disables watchdog if it is not disabled already.
*/
void watchdog_hardlockup_disable(unsigned int cpu)
{
if (atomic_read(&nmi_active) == -1)
pr_warn_once("NMI watchdog cannot be enabled or disabled\n");
else
smp_call_function_single(cpu, stop_nmi_watchdog, NULL, 1);
}
| linux-master | arch/sparc/kernel/nmi.c |
// SPDX-License-Identifier: GPL-2.0
/* pci_sabre.c: Sabre specific PCI controller support.
*
* Copyright (C) 1997, 1998, 1999, 2007 David S. Miller ([email protected])
* Copyright (C) 1998, 1999 Eddie C. Dost ([email protected])
* Copyright (C) 1999 Jakub Jelinek ([email protected])
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/of_device.h>
#include <asm/apb.h>
#include <asm/iommu.h>
#include <asm/irq.h>
#include <asm/prom.h>
#include <asm/upa.h>
#include "pci_impl.h"
#include "iommu_common.h"
#include "psycho_common.h"
#define DRIVER_NAME "sabre"
#define PFX DRIVER_NAME ": "
/* SABRE PCI controller register offsets and definitions. */
#define SABRE_UE_AFSR 0x0030UL
#define SABRE_UEAFSR_PDRD 0x4000000000000000UL /* Primary PCI DMA Read */
#define SABRE_UEAFSR_PDWR 0x2000000000000000UL /* Primary PCI DMA Write */
#define SABRE_UEAFSR_SDRD 0x0800000000000000UL /* Secondary PCI DMA Read */
#define SABRE_UEAFSR_SDWR 0x0400000000000000UL /* Secondary PCI DMA Write */
#define SABRE_UEAFSR_SDTE 0x0200000000000000UL /* Secondary DMA Translation Error */
#define SABRE_UEAFSR_PDTE 0x0100000000000000UL /* Primary DMA Translation Error */
#define SABRE_UEAFSR_BMSK 0x0000ffff00000000UL /* Bytemask */
#define SABRE_UEAFSR_OFF 0x00000000e0000000UL /* Offset (AFAR bits [5:3] */
#define SABRE_UEAFSR_BLK 0x0000000000800000UL /* Was block operation */
#define SABRE_UECE_AFAR 0x0038UL
#define SABRE_CE_AFSR 0x0040UL
#define SABRE_CEAFSR_PDRD 0x4000000000000000UL /* Primary PCI DMA Read */
#define SABRE_CEAFSR_PDWR 0x2000000000000000UL /* Primary PCI DMA Write */
#define SABRE_CEAFSR_SDRD 0x0800000000000000UL /* Secondary PCI DMA Read */
#define SABRE_CEAFSR_SDWR 0x0400000000000000UL /* Secondary PCI DMA Write */
#define SABRE_CEAFSR_ESYND 0x00ff000000000000UL /* ECC Syndrome */
#define SABRE_CEAFSR_BMSK 0x0000ffff00000000UL /* Bytemask */
#define SABRE_CEAFSR_OFF 0x00000000e0000000UL /* Offset */
#define SABRE_CEAFSR_BLK 0x0000000000800000UL /* Was block operation */
#define SABRE_UECE_AFAR_ALIAS 0x0048UL /* Aliases to 0x0038 */
#define SABRE_IOMMU_CONTROL 0x0200UL
#define SABRE_IOMMUCTRL_ERRSTS 0x0000000006000000UL /* Error status bits */
#define SABRE_IOMMUCTRL_ERR 0x0000000001000000UL /* Error present in IOTLB */
#define SABRE_IOMMUCTRL_LCKEN 0x0000000000800000UL /* IOTLB lock enable */
#define SABRE_IOMMUCTRL_LCKPTR 0x0000000000780000UL /* IOTLB lock pointer */
#define SABRE_IOMMUCTRL_TSBSZ 0x0000000000070000UL /* TSB Size */
#define SABRE_IOMMU_TSBSZ_1K 0x0000000000000000
#define SABRE_IOMMU_TSBSZ_2K 0x0000000000010000
#define SABRE_IOMMU_TSBSZ_4K 0x0000000000020000
#define SABRE_IOMMU_TSBSZ_8K 0x0000000000030000
#define SABRE_IOMMU_TSBSZ_16K 0x0000000000040000
#define SABRE_IOMMU_TSBSZ_32K 0x0000000000050000
#define SABRE_IOMMU_TSBSZ_64K 0x0000000000060000
#define SABRE_IOMMU_TSBSZ_128K 0x0000000000070000
#define SABRE_IOMMUCTRL_TBWSZ 0x0000000000000004UL /* TSB assumed page size */
#define SABRE_IOMMUCTRL_DENAB 0x0000000000000002UL /* Diagnostic Mode Enable */
#define SABRE_IOMMUCTRL_ENAB 0x0000000000000001UL /* IOMMU Enable */
#define SABRE_IOMMU_TSBBASE 0x0208UL
#define SABRE_IOMMU_FLUSH 0x0210UL
#define SABRE_IMAP_A_SLOT0 0x0c00UL
#define SABRE_IMAP_B_SLOT0 0x0c20UL
#define SABRE_IMAP_SCSI 0x1000UL
#define SABRE_IMAP_ETH 0x1008UL
#define SABRE_IMAP_BPP 0x1010UL
#define SABRE_IMAP_AU_REC 0x1018UL
#define SABRE_IMAP_AU_PLAY 0x1020UL
#define SABRE_IMAP_PFAIL 0x1028UL
#define SABRE_IMAP_KMS 0x1030UL
#define SABRE_IMAP_FLPY 0x1038UL
#define SABRE_IMAP_SHW 0x1040UL
#define SABRE_IMAP_KBD 0x1048UL
#define SABRE_IMAP_MS 0x1050UL
#define SABRE_IMAP_SER 0x1058UL
#define SABRE_IMAP_UE 0x1070UL
#define SABRE_IMAP_CE 0x1078UL
#define SABRE_IMAP_PCIERR 0x1080UL
#define SABRE_IMAP_GFX 0x1098UL
#define SABRE_IMAP_EUPA 0x10a0UL
#define SABRE_ICLR_A_SLOT0 0x1400UL
#define SABRE_ICLR_B_SLOT0 0x1480UL
#define SABRE_ICLR_SCSI 0x1800UL
#define SABRE_ICLR_ETH 0x1808UL
#define SABRE_ICLR_BPP 0x1810UL
#define SABRE_ICLR_AU_REC 0x1818UL
#define SABRE_ICLR_AU_PLAY 0x1820UL
#define SABRE_ICLR_PFAIL 0x1828UL
#define SABRE_ICLR_KMS 0x1830UL
#define SABRE_ICLR_FLPY 0x1838UL
#define SABRE_ICLR_SHW 0x1840UL
#define SABRE_ICLR_KBD 0x1848UL
#define SABRE_ICLR_MS 0x1850UL
#define SABRE_ICLR_SER 0x1858UL
#define SABRE_ICLR_UE 0x1870UL
#define SABRE_ICLR_CE 0x1878UL
#define SABRE_ICLR_PCIERR 0x1880UL
#define SABRE_WRSYNC 0x1c20UL
#define SABRE_PCICTRL 0x2000UL
#define SABRE_PCICTRL_MRLEN 0x0000001000000000UL /* Use MemoryReadLine for block loads/stores */
#define SABRE_PCICTRL_SERR 0x0000000400000000UL /* Set when SERR asserted on PCI bus */
#define SABRE_PCICTRL_ARBPARK 0x0000000000200000UL /* Bus Parking 0=Ultra-IIi 1=prev-bus-owner */
#define SABRE_PCICTRL_CPUPRIO 0x0000000000100000UL /* Ultra-IIi granted every other bus cycle */
#define SABRE_PCICTRL_ARBPRIO 0x00000000000f0000UL /* Slot which is granted every other bus cycle */
#define SABRE_PCICTRL_ERREN 0x0000000000000100UL /* PCI Error Interrupt Enable */
#define SABRE_PCICTRL_RTRYWE 0x0000000000000080UL /* DMA Flow Control 0=wait-if-possible 1=retry */
#define SABRE_PCICTRL_AEN 0x000000000000000fUL /* Slot PCI arbitration enables */
#define SABRE_PIOAFSR 0x2010UL
#define SABRE_PIOAFSR_PMA 0x8000000000000000UL /* Primary Master Abort */
#define SABRE_PIOAFSR_PTA 0x4000000000000000UL /* Primary Target Abort */
#define SABRE_PIOAFSR_PRTRY 0x2000000000000000UL /* Primary Excessive Retries */
#define SABRE_PIOAFSR_PPERR 0x1000000000000000UL /* Primary Parity Error */
#define SABRE_PIOAFSR_SMA 0x0800000000000000UL /* Secondary Master Abort */
#define SABRE_PIOAFSR_STA 0x0400000000000000UL /* Secondary Target Abort */
#define SABRE_PIOAFSR_SRTRY 0x0200000000000000UL /* Secondary Excessive Retries */
#define SABRE_PIOAFSR_SPERR 0x0100000000000000UL /* Secondary Parity Error */
#define SABRE_PIOAFSR_BMSK 0x0000ffff00000000UL /* Byte Mask */
#define SABRE_PIOAFSR_BLK 0x0000000080000000UL /* Was Block Operation */
#define SABRE_PIOAFAR 0x2018UL
#define SABRE_PCIDIAG 0x2020UL
#define SABRE_PCIDIAG_DRTRY 0x0000000000000040UL /* Disable PIO Retry Limit */
#define SABRE_PCIDIAG_IPAPAR 0x0000000000000008UL /* Invert PIO Address Parity */
#define SABRE_PCIDIAG_IPDPAR 0x0000000000000004UL /* Invert PIO Data Parity */
#define SABRE_PCIDIAG_IDDPAR 0x0000000000000002UL /* Invert DMA Data Parity */
#define SABRE_PCIDIAG_ELPBK 0x0000000000000001UL /* Loopback Enable - not supported */
#define SABRE_PCITASR 0x2028UL
#define SABRE_PCITASR_EF 0x0000000000000080UL /* Respond to 0xe0000000-0xffffffff */
#define SABRE_PCITASR_CD 0x0000000000000040UL /* Respond to 0xc0000000-0xdfffffff */
#define SABRE_PCITASR_AB 0x0000000000000020UL /* Respond to 0xa0000000-0xbfffffff */
#define SABRE_PCITASR_89 0x0000000000000010UL /* Respond to 0x80000000-0x9fffffff */
#define SABRE_PCITASR_67 0x0000000000000008UL /* Respond to 0x60000000-0x7fffffff */
#define SABRE_PCITASR_45 0x0000000000000004UL /* Respond to 0x40000000-0x5fffffff */
#define SABRE_PCITASR_23 0x0000000000000002UL /* Respond to 0x20000000-0x3fffffff */
#define SABRE_PCITASR_01 0x0000000000000001UL /* Respond to 0x00000000-0x1fffffff */
#define SABRE_PIOBUF_DIAG 0x5000UL
#define SABRE_DMABUF_DIAGLO 0x5100UL
#define SABRE_DMABUF_DIAGHI 0x51c0UL
#define SABRE_IMAP_GFX_ALIAS 0x6000UL /* Aliases to 0x1098 */
#define SABRE_IMAP_EUPA_ALIAS 0x8000UL /* Aliases to 0x10a0 */
#define SABRE_IOMMU_VADIAG 0xa400UL
#define SABRE_IOMMU_TCDIAG 0xa408UL
#define SABRE_IOMMU_TAG 0xa580UL
#define SABRE_IOMMUTAG_ERRSTS 0x0000000001800000UL /* Error status bits */
#define SABRE_IOMMUTAG_ERR 0x0000000000400000UL /* Error present */
#define SABRE_IOMMUTAG_WRITE 0x0000000000200000UL /* Page is writable */
#define SABRE_IOMMUTAG_STREAM 0x0000000000100000UL /* Streamable bit - unused */
#define SABRE_IOMMUTAG_SIZE 0x0000000000080000UL /* 0=8k 1=16k */
#define SABRE_IOMMUTAG_VPN 0x000000000007ffffUL /* Virtual Page Number [31:13] */
#define SABRE_IOMMU_DATA 0xa600UL
#define SABRE_IOMMUDATA_VALID 0x0000000040000000UL /* Valid */
#define SABRE_IOMMUDATA_USED 0x0000000020000000UL /* Used (for LRU algorithm) */
#define SABRE_IOMMUDATA_CACHE 0x0000000010000000UL /* Cacheable */
#define SABRE_IOMMUDATA_PPN 0x00000000001fffffUL /* Physical Page Number [33:13] */
#define SABRE_PCI_IRQSTATE 0xa800UL
#define SABRE_OBIO_IRQSTATE 0xa808UL
#define SABRE_FFBCFG 0xf000UL
#define SABRE_FFBCFG_SPRQS 0x000000000f000000 /* Slave P_RQST queue size */
#define SABRE_FFBCFG_ONEREAD 0x0000000000004000 /* Slave supports one outstanding read */
#define SABRE_MCCTRL0 0xf010UL
#define SABRE_MCCTRL0_RENAB 0x0000000080000000 /* Refresh Enable */
#define SABRE_MCCTRL0_EENAB 0x0000000010000000 /* Enable all ECC functions */
#define SABRE_MCCTRL0_11BIT 0x0000000000001000 /* Enable 11-bit column addressing */
#define SABRE_MCCTRL0_DPP 0x0000000000000f00 /* DIMM Pair Present Bits */
#define SABRE_MCCTRL0_RINTVL 0x00000000000000ff /* Refresh Interval */
#define SABRE_MCCTRL1 0xf018UL
#define SABRE_MCCTRL1_AMDC 0x0000000038000000 /* Advance Memdata Clock */
#define SABRE_MCCTRL1_ARDC 0x0000000007000000 /* Advance DRAM Read Data Clock */
#define SABRE_MCCTRL1_CSR 0x0000000000e00000 /* CAS to RAS delay for CBR refresh */
#define SABRE_MCCTRL1_CASRW 0x00000000001c0000 /* CAS length for read/write */
#define SABRE_MCCTRL1_RCD 0x0000000000038000 /* RAS to CAS delay */
#define SABRE_MCCTRL1_CP 0x0000000000007000 /* CAS Precharge */
#define SABRE_MCCTRL1_RP 0x0000000000000e00 /* RAS Precharge */
#define SABRE_MCCTRL1_RAS 0x00000000000001c0 /* Length of RAS for refresh */
#define SABRE_MCCTRL1_CASRW2 0x0000000000000038 /* Must be same as CASRW */
#define SABRE_MCCTRL1_RSC 0x0000000000000007 /* RAS after CAS hold time */
#define SABRE_RESETCTRL 0xf020UL
#define SABRE_CONFIGSPACE 0x001000000UL
#define SABRE_IOSPACE 0x002000000UL
#define SABRE_IOSPACE_SIZE 0x000ffffffUL
#define SABRE_MEMSPACE 0x100000000UL
#define SABRE_MEMSPACE_SIZE 0x07fffffffUL
static int hummingbird_p;
static struct pci_bus *sabre_root_bus;
static irqreturn_t sabre_ue_intr(int irq, void *dev_id)
{
struct pci_pbm_info *pbm = dev_id;
unsigned long afsr_reg = pbm->controller_regs + SABRE_UE_AFSR;
unsigned long afar_reg = pbm->controller_regs + SABRE_UECE_AFAR;
unsigned long afsr, afar, error_bits;
int reported;
/* Latch uncorrectable error status. */
afar = upa_readq(afar_reg);
afsr = upa_readq(afsr_reg);
/* Clear the primary/secondary error status bits. */
error_bits = afsr &
(SABRE_UEAFSR_PDRD | SABRE_UEAFSR_PDWR |
SABRE_UEAFSR_SDRD | SABRE_UEAFSR_SDWR |
SABRE_UEAFSR_SDTE | SABRE_UEAFSR_PDTE);
if (!error_bits)
return IRQ_NONE;
upa_writeq(error_bits, afsr_reg);
/* Log the error. */
printk("%s: Uncorrectable Error, primary error type[%s%s]\n",
pbm->name,
((error_bits & SABRE_UEAFSR_PDRD) ?
"DMA Read" :
((error_bits & SABRE_UEAFSR_PDWR) ?
"DMA Write" : "???")),
((error_bits & SABRE_UEAFSR_PDTE) ?
":Translation Error" : ""));
printk("%s: bytemask[%04lx] dword_offset[%lx] was_block(%d)\n",
pbm->name,
(afsr & SABRE_UEAFSR_BMSK) >> 32UL,
(afsr & SABRE_UEAFSR_OFF) >> 29UL,
((afsr & SABRE_UEAFSR_BLK) ? 1 : 0));
printk("%s: UE AFAR [%016lx]\n", pbm->name, afar);
printk("%s: UE Secondary errors [", pbm->name);
reported = 0;
if (afsr & SABRE_UEAFSR_SDRD) {
reported++;
printk("(DMA Read)");
}
if (afsr & SABRE_UEAFSR_SDWR) {
reported++;
printk("(DMA Write)");
}
if (afsr & SABRE_UEAFSR_SDTE) {
reported++;
printk("(Translation Error)");
}
if (!reported)
printk("(none)");
printk("]\n");
/* Interrogate IOMMU for error status. */
psycho_check_iommu_error(pbm, afsr, afar, UE_ERR);
return IRQ_HANDLED;
}
static irqreturn_t sabre_ce_intr(int irq, void *dev_id)
{
struct pci_pbm_info *pbm = dev_id;
unsigned long afsr_reg = pbm->controller_regs + SABRE_CE_AFSR;
unsigned long afar_reg = pbm->controller_regs + SABRE_UECE_AFAR;
unsigned long afsr, afar, error_bits;
int reported;
/* Latch error status. */
afar = upa_readq(afar_reg);
afsr = upa_readq(afsr_reg);
/* Clear primary/secondary error status bits. */
error_bits = afsr &
(SABRE_CEAFSR_PDRD | SABRE_CEAFSR_PDWR |
SABRE_CEAFSR_SDRD | SABRE_CEAFSR_SDWR);
if (!error_bits)
return IRQ_NONE;
upa_writeq(error_bits, afsr_reg);
/* Log the error. */
printk("%s: Correctable Error, primary error type[%s]\n",
pbm->name,
((error_bits & SABRE_CEAFSR_PDRD) ?
"DMA Read" :
((error_bits & SABRE_CEAFSR_PDWR) ?
"DMA Write" : "???")));
/* XXX Use syndrome and afar to print out module string just like
* XXX UDB CE trap handler does... -DaveM
*/
printk("%s: syndrome[%02lx] bytemask[%04lx] dword_offset[%lx] "
"was_block(%d)\n",
pbm->name,
(afsr & SABRE_CEAFSR_ESYND) >> 48UL,
(afsr & SABRE_CEAFSR_BMSK) >> 32UL,
(afsr & SABRE_CEAFSR_OFF) >> 29UL,
((afsr & SABRE_CEAFSR_BLK) ? 1 : 0));
printk("%s: CE AFAR [%016lx]\n", pbm->name, afar);
printk("%s: CE Secondary errors [", pbm->name);
reported = 0;
if (afsr & SABRE_CEAFSR_SDRD) {
reported++;
printk("(DMA Read)");
}
if (afsr & SABRE_CEAFSR_SDWR) {
reported++;
printk("(DMA Write)");
}
if (!reported)
printk("(none)");
printk("]\n");
return IRQ_HANDLED;
}
static void sabre_register_error_handlers(struct pci_pbm_info *pbm)
{
struct device_node *dp = pbm->op->dev.of_node;
struct platform_device *op;
unsigned long base = pbm->controller_regs;
u64 tmp;
int err;
if (pbm->chip_type == PBM_CHIP_TYPE_SABRE)
dp = dp->parent;
op = of_find_device_by_node(dp);
if (!op)
return;
/* Sabre/Hummingbird IRQ property layout is:
* 0: PCI ERR
* 1: UE ERR
* 2: CE ERR
* 3: POWER FAIL
*/
if (op->archdata.num_irqs < 4)
return;
/* We clear the error bits in the appropriate AFSR before
* registering the handler so that we don't get spurious
* interrupts.
*/
upa_writeq((SABRE_UEAFSR_PDRD | SABRE_UEAFSR_PDWR |
SABRE_UEAFSR_SDRD | SABRE_UEAFSR_SDWR |
SABRE_UEAFSR_SDTE | SABRE_UEAFSR_PDTE),
base + SABRE_UE_AFSR);
err = request_irq(op->archdata.irqs[1], sabre_ue_intr, 0, "SABRE_UE", pbm);
if (err)
printk(KERN_WARNING "%s: Couldn't register UE, err=%d.\n",
pbm->name, err);
upa_writeq((SABRE_CEAFSR_PDRD | SABRE_CEAFSR_PDWR |
SABRE_CEAFSR_SDRD | SABRE_CEAFSR_SDWR),
base + SABRE_CE_AFSR);
err = request_irq(op->archdata.irqs[2], sabre_ce_intr, 0, "SABRE_CE", pbm);
if (err)
printk(KERN_WARNING "%s: Couldn't register CE, err=%d.\n",
pbm->name, err);
err = request_irq(op->archdata.irqs[0], psycho_pcierr_intr, 0,
"SABRE_PCIERR", pbm);
if (err)
printk(KERN_WARNING "%s: Couldn't register PCIERR, err=%d.\n",
pbm->name, err);
tmp = upa_readq(base + SABRE_PCICTRL);
tmp |= SABRE_PCICTRL_ERREN;
upa_writeq(tmp, base + SABRE_PCICTRL);
}
static void apb_init(struct pci_bus *sabre_bus)
{
struct pci_dev *pdev;
list_for_each_entry(pdev, &sabre_bus->devices, bus_list) {
if (pdev->vendor == PCI_VENDOR_ID_SUN &&
pdev->device == PCI_DEVICE_ID_SUN_SIMBA) {
u16 word16;
pci_read_config_word(pdev, PCI_COMMAND, &word16);
word16 |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY |
PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY |
PCI_COMMAND_IO;
pci_write_config_word(pdev, PCI_COMMAND, word16);
/* Status register bits are "write 1 to clear". */
pci_write_config_word(pdev, PCI_STATUS, 0xffff);
pci_write_config_word(pdev, PCI_SEC_STATUS, 0xffff);
/* Use a primary/seconday latency timer value
* of 64.
*/
pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 64);
pci_write_config_byte(pdev, PCI_SEC_LATENCY_TIMER, 64);
/* Enable reporting/forwarding of master aborts,
* parity, and SERR.
*/
pci_write_config_byte(pdev, PCI_BRIDGE_CONTROL,
(PCI_BRIDGE_CTL_PARITY |
PCI_BRIDGE_CTL_SERR |
PCI_BRIDGE_CTL_MASTER_ABORT));
}
}
}
static void sabre_scan_bus(struct pci_pbm_info *pbm, struct device *parent)
{
static int once;
/* The APB bridge speaks to the Sabre host PCI bridge
* at 66Mhz, but the front side of APB runs at 33Mhz
* for both segments.
*
* Hummingbird systems do not use APB, so they run
* at 66MHZ.
*/
if (hummingbird_p)
pbm->is_66mhz_capable = 1;
else
pbm->is_66mhz_capable = 0;
/* This driver has not been verified to handle
* multiple SABREs yet, so trap this.
*
* Also note that the SABRE host bridge is hardwired
* to live at bus 0.
*/
if (once != 0) {
printk(KERN_ERR PFX "Multiple controllers unsupported.\n");
return;
}
once++;
pbm->pci_bus = pci_scan_one_pbm(pbm, parent);
if (!pbm->pci_bus)
return;
sabre_root_bus = pbm->pci_bus;
apb_init(pbm->pci_bus);
sabre_register_error_handlers(pbm);
}
static void sabre_pbm_init(struct pci_pbm_info *pbm,
struct platform_device *op)
{
psycho_pbm_init_common(pbm, op, "SABRE", PBM_CHIP_TYPE_SABRE);
pbm->pci_afsr = pbm->controller_regs + SABRE_PIOAFSR;
pbm->pci_afar = pbm->controller_regs + SABRE_PIOAFAR;
pbm->pci_csr = pbm->controller_regs + SABRE_PCICTRL;
sabre_scan_bus(pbm, &op->dev);
}
static const struct of_device_id sabre_match[];
static int sabre_probe(struct platform_device *op)
{
const struct of_device_id *match;
const struct linux_prom64_registers *pr_regs;
struct device_node *dp = op->dev.of_node;
struct pci_pbm_info *pbm;
u32 upa_portid, dma_mask;
struct iommu *iommu;
int tsbsize, err;
const u32 *vdma;
u64 clear_irq;
match = of_match_device(sabre_match, &op->dev);
hummingbird_p = match && (match->data != NULL);
if (!hummingbird_p) {
struct device_node *cpu_dp;
/* Of course, Sun has to encode things a thousand
* different ways, inconsistently.
*/
for_each_node_by_type(cpu_dp, "cpu") {
if (of_node_name_eq(cpu_dp, "SUNW,UltraSPARC-IIe"))
hummingbird_p = 1;
}
}
err = -ENOMEM;
pbm = kzalloc(sizeof(*pbm), GFP_KERNEL);
if (!pbm) {
printk(KERN_ERR PFX "Cannot allocate pci_pbm_info.\n");
goto out_err;
}
iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
if (!iommu) {
printk(KERN_ERR PFX "Cannot allocate PBM iommu.\n");
goto out_free_controller;
}
pbm->iommu = iommu;
upa_portid = of_getintprop_default(dp, "upa-portid", 0xff);
pbm->portid = upa_portid;
/*
* Map in SABRE register set and report the presence of this SABRE.
*/
pr_regs = of_get_property(dp, "reg", NULL);
err = -ENODEV;
if (!pr_regs) {
printk(KERN_ERR PFX "No reg property\n");
goto out_free_iommu;
}
/*
* First REG in property is base of entire SABRE register space.
*/
pbm->controller_regs = pr_regs[0].phys_addr;
/* Clear interrupts */
/* PCI first */
for (clear_irq = SABRE_ICLR_A_SLOT0; clear_irq < SABRE_ICLR_B_SLOT0 + 0x80; clear_irq += 8)
upa_writeq(0x0UL, pbm->controller_regs + clear_irq);
/* Then OBIO */
for (clear_irq = SABRE_ICLR_SCSI; clear_irq < SABRE_ICLR_SCSI + 0x80; clear_irq += 8)
upa_writeq(0x0UL, pbm->controller_regs + clear_irq);
/* Error interrupts are enabled later after the bus scan. */
upa_writeq((SABRE_PCICTRL_MRLEN | SABRE_PCICTRL_SERR |
SABRE_PCICTRL_ARBPARK | SABRE_PCICTRL_AEN),
pbm->controller_regs + SABRE_PCICTRL);
/* Now map in PCI config space for entire SABRE. */
pbm->config_space = pbm->controller_regs + SABRE_CONFIGSPACE;
vdma = of_get_property(dp, "virtual-dma", NULL);
if (!vdma) {
printk(KERN_ERR PFX "No virtual-dma property\n");
goto out_free_iommu;
}
dma_mask = vdma[0];
switch(vdma[1]) {
case 0x20000000:
dma_mask |= 0x1fffffff;
tsbsize = 64;
break;
case 0x40000000:
dma_mask |= 0x3fffffff;
tsbsize = 128;
break;
case 0x80000000:
dma_mask |= 0x7fffffff;
tsbsize = 128;
break;
default:
printk(KERN_ERR PFX "Strange virtual-dma size.\n");
goto out_free_iommu;
}
err = psycho_iommu_init(pbm, tsbsize, vdma[0], dma_mask, SABRE_WRSYNC);
if (err)
goto out_free_iommu;
/*
* Look for APB underneath.
*/
sabre_pbm_init(pbm, op);
pbm->next = pci_pbm_root;
pci_pbm_root = pbm;
dev_set_drvdata(&op->dev, pbm);
return 0;
out_free_iommu:
kfree(pbm->iommu);
out_free_controller:
kfree(pbm);
out_err:
return err;
}
static const struct of_device_id sabre_match[] = {
{
.name = "pci",
.compatible = "pci108e,a001",
.data = (void *) 1,
},
{
.name = "pci",
.compatible = "pci108e,a000",
},
{},
};
static struct platform_driver sabre_driver = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = sabre_match,
},
.probe = sabre_probe,
};
static int __init sabre_init(void)
{
return platform_driver_register(&sabre_driver);
}
subsys_initcall(sabre_init);
| linux-master | arch/sparc/kernel/pci_sabre.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Procedures for creating, accessing and interpreting the device tree.
*
* Paul Mackerras August 1996.
* Copyright (C) 1996-2005 Paul Mackerras.
*
* Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
* {engebret|bergner}@us.ibm.com
*
* Adapted for sparc64 by David S. Miller [email protected]
*/
#include <linux/memblock.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/cpu.h>
#include <linux/mm.h>
#include <linux/of.h>
#include <asm/prom.h>
#include <asm/oplib.h>
#include <asm/irq.h>
#include <asm/asi.h>
#include <asm/upa.h>
#include <asm/smp.h>
#include "prom.h"
void * __init prom_early_alloc(unsigned long size)
{
void *ret = memblock_alloc(size, SMP_CACHE_BYTES);
if (!ret) {
prom_printf("prom_early_alloc(%lu) failed\n", size);
prom_halt();
}
prom_early_allocated += size;
return ret;
}
/* The following routines deal with the black magic of fully naming a
* node.
*
* Certain well known named nodes are just the simple name string.
*
* Actual devices have an address specifier appended to the base name
* string, like this "foo@addr". The "addr" can be in any number of
* formats, and the platform plus the type of the node determine the
* format and how it is constructed.
*
* For children of the ROOT node, the naming convention is fixed and
* determined by whether this is a sun4u or sun4v system.
*
* For children of other nodes, it is bus type specific. So
* we walk up the tree until we discover a "device_type" property
* we recognize and we go from there.
*
* As an example, the boot device on my workstation has a full path:
*
* /pci@1e,600000/ide@d/disk@0,0:c
*/
static void __init sun4v_path_component(struct device_node *dp, char *tmp_buf)
{
const char *name = of_get_property(dp, "name", NULL);
struct linux_prom64_registers *regs;
struct property *rprop;
u32 high_bits, low_bits, type;
rprop = of_find_property(dp, "reg", NULL);
if (!rprop)
return;
regs = rprop->value;
if (!of_node_is_root(dp->parent)) {
sprintf(tmp_buf, "%s@%x,%x",
name,
(unsigned int) (regs->phys_addr >> 32UL),
(unsigned int) (regs->phys_addr & 0xffffffffUL));
return;
}
type = regs->phys_addr >> 60UL;
high_bits = (regs->phys_addr >> 32UL) & 0x0fffffffUL;
low_bits = (regs->phys_addr & 0xffffffffUL);
if (type == 0 || type == 8) {
const char *prefix = (type == 0) ? "m" : "i";
if (low_bits)
sprintf(tmp_buf, "%s@%s%x,%x",
name, prefix,
high_bits, low_bits);
else
sprintf(tmp_buf, "%s@%s%x",
name,
prefix,
high_bits);
} else if (type == 12) {
sprintf(tmp_buf, "%s@%x",
name, high_bits);
}
}
static void __init sun4u_path_component(struct device_node *dp, char *tmp_buf)
{
const char *name = of_get_property(dp, "name", NULL);
struct linux_prom64_registers *regs;
struct property *prop;
prop = of_find_property(dp, "reg", NULL);
if (!prop)
return;
regs = prop->value;
if (!of_node_is_root(dp->parent)) {
sprintf(tmp_buf, "%s@%x,%x",
name,
(unsigned int) (regs->phys_addr >> 32UL),
(unsigned int) (regs->phys_addr & 0xffffffffUL));
return;
}
prop = of_find_property(dp, "upa-portid", NULL);
if (!prop)
prop = of_find_property(dp, "portid", NULL);
if (prop) {
unsigned long mask = 0xffffffffUL;
if (tlb_type >= cheetah)
mask = 0x7fffff;
sprintf(tmp_buf, "%s@%x,%x",
name,
*(u32 *)prop->value,
(unsigned int) (regs->phys_addr & mask));
}
}
/* "name@slot,offset" */
static void __init sbus_path_component(struct device_node *dp, char *tmp_buf)
{
const char *name = of_get_property(dp, "name", NULL);
struct linux_prom_registers *regs;
struct property *prop;
prop = of_find_property(dp, "reg", NULL);
if (!prop)
return;
regs = prop->value;
sprintf(tmp_buf, "%s@%x,%x",
name,
regs->which_io,
regs->phys_addr);
}
/* "name@devnum[,func]" */
static void __init pci_path_component(struct device_node *dp, char *tmp_buf)
{
const char *name = of_get_property(dp, "name", NULL);
struct linux_prom_pci_registers *regs;
struct property *prop;
unsigned int devfn;
prop = of_find_property(dp, "reg", NULL);
if (!prop)
return;
regs = prop->value;
devfn = (regs->phys_hi >> 8) & 0xff;
if (devfn & 0x07) {
sprintf(tmp_buf, "%s@%x,%x",
name,
devfn >> 3,
devfn & 0x07);
} else {
sprintf(tmp_buf, "%s@%x",
name,
devfn >> 3);
}
}
/* "name@UPA_PORTID,offset" */
static void __init upa_path_component(struct device_node *dp, char *tmp_buf)
{
const char *name = of_get_property(dp, "name", NULL);
struct linux_prom64_registers *regs;
struct property *prop;
prop = of_find_property(dp, "reg", NULL);
if (!prop)
return;
regs = prop->value;
prop = of_find_property(dp, "upa-portid", NULL);
if (!prop)
return;
sprintf(tmp_buf, "%s@%x,%x",
name,
*(u32 *) prop->value,
(unsigned int) (regs->phys_addr & 0xffffffffUL));
}
/* "name@reg" */
static void __init vdev_path_component(struct device_node *dp, char *tmp_buf)
{
const char *name = of_get_property(dp, "name", NULL);
struct property *prop;
u32 *regs;
prop = of_find_property(dp, "reg", NULL);
if (!prop)
return;
regs = prop->value;
sprintf(tmp_buf, "%s@%x", name, *regs);
}
/* "name@addrhi,addrlo" */
static void __init ebus_path_component(struct device_node *dp, char *tmp_buf)
{
const char *name = of_get_property(dp, "name", NULL);
struct linux_prom64_registers *regs;
struct property *prop;
prop = of_find_property(dp, "reg", NULL);
if (!prop)
return;
regs = prop->value;
sprintf(tmp_buf, "%s@%x,%x",
name,
(unsigned int) (regs->phys_addr >> 32UL),
(unsigned int) (regs->phys_addr & 0xffffffffUL));
}
/* "name@bus,addr" */
static void __init i2c_path_component(struct device_node *dp, char *tmp_buf)
{
const char *name = of_get_property(dp, "name", NULL);
struct property *prop;
u32 *regs;
prop = of_find_property(dp, "reg", NULL);
if (!prop)
return;
regs = prop->value;
/* This actually isn't right... should look at the #address-cells
* property of the i2c bus node etc. etc.
*/
sprintf(tmp_buf, "%s@%x,%x",
name, regs[0], regs[1]);
}
/* "name@reg0[,reg1]" */
static void __init usb_path_component(struct device_node *dp, char *tmp_buf)
{
const char *name = of_get_property(dp, "name", NULL);
struct property *prop;
u32 *regs;
prop = of_find_property(dp, "reg", NULL);
if (!prop)
return;
regs = prop->value;
if (prop->length == sizeof(u32) || regs[1] == 1) {
sprintf(tmp_buf, "%s@%x",
name, regs[0]);
} else {
sprintf(tmp_buf, "%s@%x,%x",
name, regs[0], regs[1]);
}
}
/* "name@reg0reg1[,reg2reg3]" */
static void __init ieee1394_path_component(struct device_node *dp, char *tmp_buf)
{
const char *name = of_get_property(dp, "name", NULL);
struct property *prop;
u32 *regs;
prop = of_find_property(dp, "reg", NULL);
if (!prop)
return;
regs = prop->value;
if (regs[2] || regs[3]) {
sprintf(tmp_buf, "%s@%08x%08x,%04x%08x",
name, regs[0], regs[1], regs[2], regs[3]);
} else {
sprintf(tmp_buf, "%s@%08x%08x",
name, regs[0], regs[1]);
}
}
static void __init __build_path_component(struct device_node *dp, char *tmp_buf)
{
struct device_node *parent = dp->parent;
if (parent != NULL) {
if (of_node_is_type(parent, "pci") ||
of_node_is_type(parent, "pciex")) {
pci_path_component(dp, tmp_buf);
return;
}
if (of_node_is_type(parent, "sbus")) {
sbus_path_component(dp, tmp_buf);
return;
}
if (of_node_is_type(parent, "upa")) {
upa_path_component(dp, tmp_buf);
return;
}
if (of_node_is_type(parent, "ebus")) {
ebus_path_component(dp, tmp_buf);
return;
}
if (of_node_name_eq(parent, "usb") ||
of_node_name_eq(parent, "hub")) {
usb_path_component(dp, tmp_buf);
return;
}
if (of_node_is_type(parent, "i2c")) {
i2c_path_component(dp, tmp_buf);
return;
}
if (of_node_is_type(parent, "firewire")) {
ieee1394_path_component(dp, tmp_buf);
return;
}
if (of_node_is_type(parent, "virtual-devices")) {
vdev_path_component(dp, tmp_buf);
return;
}
/* "isa" is handled with platform naming */
}
/* Use platform naming convention. */
if (tlb_type == hypervisor) {
sun4v_path_component(dp, tmp_buf);
return;
} else {
sun4u_path_component(dp, tmp_buf);
}
}
char * __init build_path_component(struct device_node *dp)
{
const char *name = of_get_property(dp, "name", NULL);
char tmp_buf[64], *n;
tmp_buf[0] = '\0';
__build_path_component(dp, tmp_buf);
if (tmp_buf[0] == '\0')
strcpy(tmp_buf, name);
n = prom_early_alloc(strlen(tmp_buf) + 1);
strcpy(n, tmp_buf);
return n;
}
static const char *get_mid_prop(void)
{
return (tlb_type == spitfire ? "upa-portid" : "portid");
}
bool arch_find_n_match_cpu_physical_id(struct device_node *cpun,
int cpu, unsigned int *thread)
{
const char *mid_prop = get_mid_prop();
int this_cpu_id;
/* On hypervisor based platforms we interrogate the 'reg'
* property. On everything else we look for a 'upa-portid',
* 'portid', or 'cpuid' property.
*/
if (tlb_type == hypervisor) {
struct property *prop = of_find_property(cpun, "reg", NULL);
u32 *regs;
if (!prop) {
pr_warn("CPU node missing reg property\n");
return false;
}
regs = prop->value;
this_cpu_id = regs[0] & 0x0fffffff;
} else {
this_cpu_id = of_getintprop_default(cpun, mid_prop, -1);
if (this_cpu_id < 0) {
mid_prop = "cpuid";
this_cpu_id = of_getintprop_default(cpun, mid_prop, -1);
}
if (this_cpu_id < 0) {
pr_warn("CPU node missing cpu ID property\n");
return false;
}
}
if (this_cpu_id == cpu) {
if (thread) {
int proc_id = cpu_data(cpu).proc_id;
/* On sparc64, the cpu thread information is obtained
* either from OBP or the machine description. We've
* actually probed this information already long before
* this interface gets called so instead of interrogating
* both the OF node and the MDESC again, just use what
* we discovered already.
*/
if (proc_id < 0)
proc_id = 0;
*thread = proc_id;
}
return true;
}
return false;
}
static void *of_iterate_over_cpus(void *(*func)(struct device_node *, int, int), int arg)
{
struct device_node *dp;
const char *mid_prop;
mid_prop = get_mid_prop();
for_each_node_by_type(dp, "cpu") {
int cpuid = of_getintprop_default(dp, mid_prop, -1);
const char *this_mid_prop = mid_prop;
void *ret;
if (cpuid < 0) {
this_mid_prop = "cpuid";
cpuid = of_getintprop_default(dp, this_mid_prop, -1);
}
if (cpuid < 0) {
prom_printf("OF: Serious problem, cpu lacks "
"%s property", this_mid_prop);
prom_halt();
}
#ifdef CONFIG_SMP
if (cpuid >= NR_CPUS) {
printk(KERN_WARNING "Ignoring CPU %d which is "
">= NR_CPUS (%d)\n",
cpuid, NR_CPUS);
continue;
}
#endif
ret = func(dp, cpuid, arg);
if (ret)
return ret;
}
return NULL;
}
static void *check_cpu_node(struct device_node *dp, int cpuid, int id)
{
if (id == cpuid)
return dp;
return NULL;
}
struct device_node *of_find_node_by_cpuid(int cpuid)
{
return of_iterate_over_cpus(check_cpu_node, cpuid);
}
static void *record_one_cpu(struct device_node *dp, int cpuid, int arg)
{
ncpus_probed++;
#ifdef CONFIG_SMP
set_cpu_present(cpuid, true);
set_cpu_possible(cpuid, true);
#endif
return NULL;
}
void __init of_populate_present_mask(void)
{
if (tlb_type == hypervisor)
return;
ncpus_probed = 0;
of_iterate_over_cpus(record_one_cpu, 0);
}
static void *fill_in_one_cpu(struct device_node *dp, int cpuid, int arg)
{
struct device_node *portid_parent = NULL;
int portid = -1;
if (of_property_present(dp, "cpuid")) {
int limit = 2;
portid_parent = dp;
while (limit--) {
portid_parent = portid_parent->parent;
if (!portid_parent)
break;
portid = of_getintprop_default(portid_parent,
"portid", -1);
if (portid >= 0)
break;
}
}
#ifndef CONFIG_SMP
/* On uniprocessor we only want the values for the
* real physical cpu the kernel booted onto, however
* cpu_data() only has one entry at index 0.
*/
if (cpuid != real_hard_smp_processor_id())
return NULL;
cpuid = 0;
#endif
cpu_data(cpuid).clock_tick =
of_getintprop_default(dp, "clock-frequency", 0);
if (portid_parent) {
cpu_data(cpuid).dcache_size =
of_getintprop_default(dp, "l1-dcache-size",
16 * 1024);
cpu_data(cpuid).dcache_line_size =
of_getintprop_default(dp, "l1-dcache-line-size",
32);
cpu_data(cpuid).icache_size =
of_getintprop_default(dp, "l1-icache-size",
8 * 1024);
cpu_data(cpuid).icache_line_size =
of_getintprop_default(dp, "l1-icache-line-size",
32);
cpu_data(cpuid).ecache_size =
of_getintprop_default(dp, "l2-cache-size", 0);
cpu_data(cpuid).ecache_line_size =
of_getintprop_default(dp, "l2-cache-line-size", 0);
if (!cpu_data(cpuid).ecache_size ||
!cpu_data(cpuid).ecache_line_size) {
cpu_data(cpuid).ecache_size =
of_getintprop_default(portid_parent,
"l2-cache-size",
(4 * 1024 * 1024));
cpu_data(cpuid).ecache_line_size =
of_getintprop_default(portid_parent,
"l2-cache-line-size", 64);
}
cpu_data(cpuid).core_id = portid + 1;
cpu_data(cpuid).proc_id = portid;
} else {
cpu_data(cpuid).dcache_size =
of_getintprop_default(dp, "dcache-size", 16 * 1024);
cpu_data(cpuid).dcache_line_size =
of_getintprop_default(dp, "dcache-line-size", 32);
cpu_data(cpuid).icache_size =
of_getintprop_default(dp, "icache-size", 16 * 1024);
cpu_data(cpuid).icache_line_size =
of_getintprop_default(dp, "icache-line-size", 32);
cpu_data(cpuid).ecache_size =
of_getintprop_default(dp, "ecache-size",
(4 * 1024 * 1024));
cpu_data(cpuid).ecache_line_size =
of_getintprop_default(dp, "ecache-line-size", 64);
cpu_data(cpuid).core_id = 0;
cpu_data(cpuid).proc_id = -1;
}
return NULL;
}
void __init of_fill_in_cpu_data(void)
{
if (tlb_type == hypervisor)
return;
of_iterate_over_cpus(fill_in_one_cpu, 0);
smp_fill_in_sib_core_maps();
}
void __init of_console_init(void)
{
char *msg = "OF stdout device is: %s\n";
struct device_node *dp;
phandle node;
of_console_path = prom_early_alloc(256);
if (prom_ihandle2path(prom_stdout, of_console_path, 256) < 0) {
prom_printf("Cannot obtain path of stdout.\n");
prom_halt();
}
of_console_options = strrchr(of_console_path, ':');
if (of_console_options) {
of_console_options++;
if (*of_console_options == '\0')
of_console_options = NULL;
}
node = prom_inst2pkg(prom_stdout);
if (!node) {
prom_printf("Cannot resolve stdout node from "
"instance %08x.\n", prom_stdout);
prom_halt();
}
dp = of_find_node_by_phandle(node);
if (!of_node_is_type(dp, "display") && !of_node_is_type(dp, "serial")) {
prom_printf("Console device_type is neither display "
"nor serial.\n");
prom_halt();
}
of_console_device = dp;
printk(msg, of_console_path);
}
| linux-master | arch/sparc/kernel/prom_64.c |
// SPDX-License-Identifier: GPL-2.0
/*
* idprom.c: Routines to load the idprom into kernel addresses and
* interpret the data contained within.
*
* Copyright (C) 1995 David S. Miller ([email protected])
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/export.h>
#include <linux/etherdevice.h>
#include <asm/oplib.h>
#include <asm/idprom.h>
struct idprom *idprom;
EXPORT_SYMBOL(idprom);
static struct idprom idprom_buffer;
#ifdef CONFIG_SPARC32
#include <asm/machines.h> /* Fun with Sun released architectures. */
/* Here is the master table of Sun machines which use some implementation
* of the Sparc CPU and have a meaningful IDPROM machtype value that we
* know about. See asm-sparc/machines.h for empirical constants.
*/
static struct Sun_Machine_Models Sun_Machines[] = {
/* First, Leon */
{ .name = "Leon3 System-on-a-Chip", .id_machtype = (M_LEON | M_LEON3_SOC) },
/* Finally, early Sun4m's */
{ .name = "Sun4m SparcSystem600", .id_machtype = (SM_SUN4M | SM_4M_SS60) },
{ .name = "Sun4m SparcStation10/20", .id_machtype = (SM_SUN4M | SM_4M_SS50) },
{ .name = "Sun4m SparcStation5", .id_machtype = (SM_SUN4M | SM_4M_SS40) },
/* One entry for the OBP arch's which are sun4d, sun4e, and newer sun4m's */
{ .name = "Sun4M OBP based system", .id_machtype = (SM_SUN4M_OBP | 0x0) } };
static void __init display_system_type(unsigned char machtype)
{
char sysname[128];
register int i;
for (i = 0; i < ARRAY_SIZE(Sun_Machines); i++) {
if (Sun_Machines[i].id_machtype == machtype) {
if (machtype != (SM_SUN4M_OBP | 0x00) ||
prom_getproperty(prom_root_node, "banner-name",
sysname, sizeof(sysname)) <= 0)
printk(KERN_WARNING "TYPE: %s\n",
Sun_Machines[i].name);
else
printk(KERN_WARNING "TYPE: %s\n", sysname);
return;
}
}
prom_printf("IDPROM: Warning, bogus id_machtype value, 0x%x\n", machtype);
}
#else
static void __init display_system_type(unsigned char machtype)
{
}
#endif
unsigned char *arch_get_platform_mac_address(void)
{
return idprom->id_ethaddr;
}
/* Calculate the IDPROM checksum (xor of the data bytes). */
static unsigned char __init calc_idprom_cksum(struct idprom *idprom)
{
unsigned char cksum, i, *ptr = (unsigned char *)idprom;
for (i = cksum = 0; i <= 0x0E; i++)
cksum ^= *ptr++;
return cksum;
}
/* Create a local IDPROM copy, verify integrity, and display information. */
void __init idprom_init(void)
{
prom_get_idprom((char *) &idprom_buffer, sizeof(idprom_buffer));
idprom = &idprom_buffer;
if (idprom->id_format != 0x01)
prom_printf("IDPROM: Warning, unknown format type!\n");
if (idprom->id_cksum != calc_idprom_cksum(idprom))
prom_printf("IDPROM: Warning, checksum failure (nvram=%x, calc=%x)!\n",
idprom->id_cksum, calc_idprom_cksum(idprom));
display_system_type(idprom->id_machtype);
printk(KERN_WARNING "Ethernet address: %pM\n", idprom->id_ethaddr);
}
| linux-master | arch/sparc/kernel/idprom.c |
// SPDX-License-Identifier: GPL-2.0
/* arch/sparc64/kernel/kprobes.c
*
* Copyright (C) 2004 David S. Miller <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/kprobes.h>
#include <linux/extable.h>
#include <linux/kdebug.h>
#include <linux/slab.h>
#include <linux/context_tracking.h>
#include <asm/signal.h>
#include <asm/cacheflush.h>
#include <linux/uaccess.h>
/* We do not have hardware single-stepping on sparc64.
* So we implement software single-stepping with breakpoint
* traps. The top-level scheme is similar to that used
* in the x86 kprobes implementation.
*
* In the kprobe->ainsn.insn[] array we store the original
* instruction at index zero and a break instruction at
* index one.
*
* When we hit a kprobe we:
* - Run the pre-handler
* - Remember "regs->tnpc" and interrupt level stored in
* "regs->tstate" so we can restore them later
* - Disable PIL interrupts
* - Set regs->tpc to point to kprobe->ainsn.insn[0]
* - Set regs->tnpc to point to kprobe->ainsn.insn[1]
* - Mark that we are actively in a kprobe
*
* At this point we wait for the second breakpoint at
* kprobe->ainsn.insn[1] to hit. When it does we:
* - Run the post-handler
* - Set regs->tpc to "remembered" regs->tnpc stored above,
* restore the PIL interrupt level in "regs->tstate" as well
* - Make any adjustments necessary to regs->tnpc in order
* to handle relative branches correctly. See below.
* - Mark that we are no longer actively in a kprobe.
*/
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}};
int __kprobes arch_prepare_kprobe(struct kprobe *p)
{
if ((unsigned long) p->addr & 0x3UL)
return -EILSEQ;
p->ainsn.insn[0] = *p->addr;
flushi(&p->ainsn.insn[0]);
p->ainsn.insn[1] = BREAKPOINT_INSTRUCTION_2;
flushi(&p->ainsn.insn[1]);
p->opcode = *p->addr;
return 0;
}
void __kprobes arch_arm_kprobe(struct kprobe *p)
{
*p->addr = BREAKPOINT_INSTRUCTION;
flushi(p->addr);
}
void __kprobes arch_disarm_kprobe(struct kprobe *p)
{
*p->addr = p->opcode;
flushi(p->addr);
}
static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
{
kcb->prev_kprobe.kp = kprobe_running();
kcb->prev_kprobe.status = kcb->kprobe_status;
kcb->prev_kprobe.orig_tnpc = kcb->kprobe_orig_tnpc;
kcb->prev_kprobe.orig_tstate_pil = kcb->kprobe_orig_tstate_pil;
}
static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
{
__this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
kcb->kprobe_status = kcb->prev_kprobe.status;
kcb->kprobe_orig_tnpc = kcb->prev_kprobe.orig_tnpc;
kcb->kprobe_orig_tstate_pil = kcb->prev_kprobe.orig_tstate_pil;
}
static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
struct kprobe_ctlblk *kcb)
{
__this_cpu_write(current_kprobe, p);
kcb->kprobe_orig_tnpc = regs->tnpc;
kcb->kprobe_orig_tstate_pil = (regs->tstate & TSTATE_PIL);
}
static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs,
struct kprobe_ctlblk *kcb)
{
regs->tstate |= TSTATE_PIL;
/*single step inline, if it a breakpoint instruction*/
if (p->opcode == BREAKPOINT_INSTRUCTION) {
regs->tpc = (unsigned long) p->addr;
regs->tnpc = kcb->kprobe_orig_tnpc;
} else {
regs->tpc = (unsigned long) &p->ainsn.insn[0];
regs->tnpc = (unsigned long) &p->ainsn.insn[1];
}
}
static int __kprobes kprobe_handler(struct pt_regs *regs)
{
struct kprobe *p;
void *addr = (void *) regs->tpc;
int ret = 0;
struct kprobe_ctlblk *kcb;
/*
* We don't want to be preempted for the entire
* duration of kprobe processing
*/
preempt_disable();
kcb = get_kprobe_ctlblk();
if (kprobe_running()) {
p = get_kprobe(addr);
if (p) {
if (kcb->kprobe_status == KPROBE_HIT_SS) {
regs->tstate = ((regs->tstate & ~TSTATE_PIL) |
kcb->kprobe_orig_tstate_pil);
goto no_kprobe;
}
/* We have reentered the kprobe_handler(), since
* another probe was hit while within the handler.
* We here save the original kprobes variables and
* just single step on the instruction of the new probe
* without calling any user handlers.
*/
save_previous_kprobe(kcb);
set_current_kprobe(p, regs, kcb);
kprobes_inc_nmissed_count(p);
kcb->kprobe_status = KPROBE_REENTER;
prepare_singlestep(p, regs, kcb);
return 1;
} else if (*(u32 *)addr != BREAKPOINT_INSTRUCTION) {
/* The breakpoint instruction was removed by
* another cpu right after we hit, no further
* handling of this interrupt is appropriate
*/
ret = 1;
}
goto no_kprobe;
}
p = get_kprobe(addr);
if (!p) {
if (*(u32 *)addr != BREAKPOINT_INSTRUCTION) {
/*
* The breakpoint instruction was removed right
* after we hit it. Another cpu has removed
* either a probepoint or a debugger breakpoint
* at this address. In either case, no further
* handling of this interrupt is appropriate.
*/
ret = 1;
}
/* Not one of ours: let kernel handle it */
goto no_kprobe;
}
set_current_kprobe(p, regs, kcb);
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
if (p->pre_handler && p->pre_handler(p, regs)) {
reset_current_kprobe();
preempt_enable_no_resched();
return 1;
}
prepare_singlestep(p, regs, kcb);
kcb->kprobe_status = KPROBE_HIT_SS;
return 1;
no_kprobe:
preempt_enable_no_resched();
return ret;
}
/* If INSN is a relative control transfer instruction,
* return the corrected branch destination value.
*
* regs->tpc and regs->tnpc still hold the values of the
* program counters at the time of trap due to the execution
* of the BREAKPOINT_INSTRUCTION_2 at p->ainsn.insn[1]
*
*/
static unsigned long __kprobes relbranch_fixup(u32 insn, struct kprobe *p,
struct pt_regs *regs)
{
unsigned long real_pc = (unsigned long) p->addr;
/* Branch not taken, no mods necessary. */
if (regs->tnpc == regs->tpc + 0x4UL)
return real_pc + 0x8UL;
/* The three cases are call, branch w/prediction,
* and traditional branch.
*/
if ((insn & 0xc0000000) == 0x40000000 ||
(insn & 0xc1c00000) == 0x00400000 ||
(insn & 0xc1c00000) == 0x00800000) {
unsigned long ainsn_addr;
ainsn_addr = (unsigned long) &p->ainsn.insn[0];
/* The instruction did all the work for us
* already, just apply the offset to the correct
* instruction location.
*/
return (real_pc + (regs->tnpc - ainsn_addr));
}
/* It is jmpl or some other absolute PC modification instruction,
* leave NPC as-is.
*/
return regs->tnpc;
}
/* If INSN is an instruction which writes it's PC location
* into a destination register, fix that up.
*/
static void __kprobes retpc_fixup(struct pt_regs *regs, u32 insn,
unsigned long real_pc)
{
unsigned long *slot = NULL;
/* Simplest case is 'call', which always uses %o7 */
if ((insn & 0xc0000000) == 0x40000000) {
slot = ®s->u_regs[UREG_I7];
}
/* 'jmpl' encodes the register inside of the opcode */
if ((insn & 0xc1f80000) == 0x81c00000) {
unsigned long rd = ((insn >> 25) & 0x1f);
if (rd <= 15) {
slot = ®s->u_regs[rd];
} else {
/* Hard case, it goes onto the stack. */
flushw_all();
rd -= 16;
slot = (unsigned long *)
(regs->u_regs[UREG_FP] + STACK_BIAS);
slot += rd;
}
}
if (slot != NULL)
*slot = real_pc;
}
/*
* Called after single-stepping. p->addr is the address of the
* instruction which has been replaced by the breakpoint
* instruction. To avoid the SMP problems that can occur when we
* temporarily put back the original opcode to single-step, we
* single-stepped a copy of the instruction. The address of this
* copy is &p->ainsn.insn[0].
*
* This function prepares to return from the post-single-step
* breakpoint trap.
*/
static void __kprobes resume_execution(struct kprobe *p,
struct pt_regs *regs, struct kprobe_ctlblk *kcb)
{
u32 insn = p->ainsn.insn[0];
regs->tnpc = relbranch_fixup(insn, p, regs);
/* This assignment must occur after relbranch_fixup() */
regs->tpc = kcb->kprobe_orig_tnpc;
retpc_fixup(regs, insn, (unsigned long) p->addr);
regs->tstate = ((regs->tstate & ~TSTATE_PIL) |
kcb->kprobe_orig_tstate_pil);
}
static int __kprobes post_kprobe_handler(struct pt_regs *regs)
{
struct kprobe *cur = kprobe_running();
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
if (!cur)
return 0;
if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
kcb->kprobe_status = KPROBE_HIT_SSDONE;
cur->post_handler(cur, regs, 0);
}
resume_execution(cur, regs, kcb);
/*Restore back the original saved kprobes variables and continue. */
if (kcb->kprobe_status == KPROBE_REENTER) {
restore_previous_kprobe(kcb);
goto out;
}
reset_current_kprobe();
out:
preempt_enable_no_resched();
return 1;
}
int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
{
struct kprobe *cur = kprobe_running();
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
const struct exception_table_entry *entry;
switch(kcb->kprobe_status) {
case KPROBE_HIT_SS:
case KPROBE_REENTER:
/*
* We are here because the instruction being single
* stepped caused a page fault. We reset the current
* kprobe and the tpc points back to the probe address
* and allow the page fault handler to continue as a
* normal page fault.
*/
regs->tpc = (unsigned long)cur->addr;
regs->tnpc = kcb->kprobe_orig_tnpc;
regs->tstate = ((regs->tstate & ~TSTATE_PIL) |
kcb->kprobe_orig_tstate_pil);
if (kcb->kprobe_status == KPROBE_REENTER)
restore_previous_kprobe(kcb);
else
reset_current_kprobe();
preempt_enable_no_resched();
break;
case KPROBE_HIT_ACTIVE:
case KPROBE_HIT_SSDONE:
/*
* In case the user-specified fault handler returned
* zero, try to fix up.
*/
entry = search_exception_tables(regs->tpc);
if (entry) {
regs->tpc = entry->fixup;
regs->tnpc = regs->tpc + 4;
return 1;
}
/*
* fixup_exception() could not handle it,
* Let do_page_fault() fix it.
*/
break;
default:
break;
}
return 0;
}
/*
* Wrapper routine to for handling exceptions.
*/
int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data)
{
struct die_args *args = (struct die_args *)data;
int ret = NOTIFY_DONE;
if (args->regs && user_mode(args->regs))
return ret;
switch (val) {
case DIE_DEBUG:
if (kprobe_handler(args->regs))
ret = NOTIFY_STOP;
break;
case DIE_DEBUG_2:
if (post_kprobe_handler(args->regs))
ret = NOTIFY_STOP;
break;
default:
break;
}
return ret;
}
asmlinkage void __kprobes kprobe_trap(unsigned long trap_level,
struct pt_regs *regs)
{
enum ctx_state prev_state = exception_enter();
BUG_ON(trap_level != 0x170 && trap_level != 0x171);
if (user_mode(regs)) {
local_irq_enable();
bad_trap(regs, trap_level);
goto out;
}
/* trap_level == 0x170 --> ta 0x70
* trap_level == 0x171 --> ta 0x71
*/
if (notify_die((trap_level == 0x170) ? DIE_DEBUG : DIE_DEBUG_2,
(trap_level == 0x170) ? "debug" : "debug_2",
regs, 0, trap_level, SIGTRAP) != NOTIFY_STOP)
bad_trap(regs, trap_level);
out:
exception_exit(prev_state);
}
/* The value stored in the return address register is actually 2
* instructions before where the callee will return to.
* Sequences usually look something like this
*
* call some_function <--- return register points here
* nop <--- call delay slot
* whatever <--- where callee returns to
*
* To keep trampoline_probe_handler logic simpler, we normalize the
* value kept in ri->ret_addr so we don't need to keep adjusting it
* back and forth.
*/
void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
struct pt_regs *regs)
{
ri->ret_addr = (kprobe_opcode_t *)(regs->u_regs[UREG_RETPC] + 8);
ri->fp = NULL;
/* Replace the return addr with trampoline addr */
regs->u_regs[UREG_RETPC] =
((unsigned long)__kretprobe_trampoline) - 8;
}
/*
* Called when the probe at kretprobe trampoline is hit
*/
static int __kprobes trampoline_probe_handler(struct kprobe *p,
struct pt_regs *regs)
{
unsigned long orig_ret_address = 0;
orig_ret_address = __kretprobe_trampoline_handler(regs, NULL);
regs->tpc = orig_ret_address;
regs->tnpc = orig_ret_address + 4;
/*
* By returning a non-zero value, we are telling
* kprobe_handler() that we don't want the post_handler
* to run (and have re-enabled preemption)
*/
return 1;
}
static void __used kretprobe_trampoline_holder(void)
{
asm volatile(".global __kretprobe_trampoline\n"
"__kretprobe_trampoline:\n"
"\tnop\n"
"\tnop\n");
}
static struct kprobe trampoline_p = {
.addr = (kprobe_opcode_t *) &__kretprobe_trampoline,
.pre_handler = trampoline_probe_handler
};
int __init arch_init_kprobes(void)
{
return register_kprobe(&trampoline_p);
}
int __kprobes arch_trampoline_kprobe(struct kprobe *p)
{
if (p->addr == (kprobe_opcode_t *)&__kretprobe_trampoline)
return 1;
return 0;
}
| linux-master | arch/sparc/kernel/kprobes.c |
// SPDX-License-Identifier: GPL-2.0
/* mdesc.c: Sun4V machine description handling.
*
* Copyright (C) 2007, 2008 David S. Miller <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/log2.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/miscdevice.h>
#include <linux/memblock.h>
#include <linux/export.h>
#include <linux/refcount.h>
#include <asm/cpudata.h>
#include <asm/hypervisor.h>
#include <asm/mdesc.h>
#include <asm/prom.h>
#include <linux/uaccess.h>
#include <asm/oplib.h>
#include <asm/smp.h>
#include <asm/adi.h>
/* Unlike the OBP device tree, the machine description is a full-on
* DAG. An arbitrary number of ARCs are possible from one
* node to other nodes and thus we can't use the OBP device_node
* data structure to represent these nodes inside of the kernel.
*
* Actually, it isn't even a DAG, because there are back pointers
* which create cycles in the graph.
*
* mdesc_hdr and mdesc_elem describe the layout of the data structure
* we get from the Hypervisor.
*/
struct mdesc_hdr {
u32 version; /* Transport version */
u32 node_sz; /* node block size */
u32 name_sz; /* name block size */
u32 data_sz; /* data block size */
char data[];
} __attribute__((aligned(16)));
struct mdesc_elem {
u8 tag;
#define MD_LIST_END 0x00
#define MD_NODE 0x4e
#define MD_NODE_END 0x45
#define MD_NOOP 0x20
#define MD_PROP_ARC 0x61
#define MD_PROP_VAL 0x76
#define MD_PROP_STR 0x73
#define MD_PROP_DATA 0x64
u8 name_len;
u16 resv;
u32 name_offset;
union {
struct {
u32 data_len;
u32 data_offset;
} data;
u64 val;
} d;
};
struct mdesc_mem_ops {
struct mdesc_handle *(*alloc)(unsigned int mdesc_size);
void (*free)(struct mdesc_handle *handle);
};
struct mdesc_handle {
struct list_head list;
struct mdesc_mem_ops *mops;
void *self_base;
refcount_t refcnt;
unsigned int handle_size;
struct mdesc_hdr mdesc;
};
typedef int (*mdesc_node_info_get_f)(struct mdesc_handle *, u64,
union md_node_info *);
typedef void (*mdesc_node_info_rel_f)(union md_node_info *);
typedef bool (*mdesc_node_match_f)(union md_node_info *, union md_node_info *);
struct md_node_ops {
char *name;
mdesc_node_info_get_f get_info;
mdesc_node_info_rel_f rel_info;
mdesc_node_match_f node_match;
};
static int get_vdev_port_node_info(struct mdesc_handle *md, u64 node,
union md_node_info *node_info);
static void rel_vdev_port_node_info(union md_node_info *node_info);
static bool vdev_port_node_match(union md_node_info *a_node_info,
union md_node_info *b_node_info);
static int get_ds_port_node_info(struct mdesc_handle *md, u64 node,
union md_node_info *node_info);
static void rel_ds_port_node_info(union md_node_info *node_info);
static bool ds_port_node_match(union md_node_info *a_node_info,
union md_node_info *b_node_info);
/* supported node types which can be registered */
static struct md_node_ops md_node_ops_table[] = {
{"virtual-device-port", get_vdev_port_node_info,
rel_vdev_port_node_info, vdev_port_node_match},
{"domain-services-port", get_ds_port_node_info,
rel_ds_port_node_info, ds_port_node_match},
{NULL, NULL, NULL, NULL}
};
static void mdesc_get_node_ops(const char *node_name,
mdesc_node_info_get_f *get_info_f,
mdesc_node_info_rel_f *rel_info_f,
mdesc_node_match_f *match_f)
{
int i;
if (get_info_f)
*get_info_f = NULL;
if (rel_info_f)
*rel_info_f = NULL;
if (match_f)
*match_f = NULL;
if (!node_name)
return;
for (i = 0; md_node_ops_table[i].name != NULL; i++) {
if (strcmp(md_node_ops_table[i].name, node_name) == 0) {
if (get_info_f)
*get_info_f = md_node_ops_table[i].get_info;
if (rel_info_f)
*rel_info_f = md_node_ops_table[i].rel_info;
if (match_f)
*match_f = md_node_ops_table[i].node_match;
break;
}
}
}
static void mdesc_handle_init(struct mdesc_handle *hp,
unsigned int handle_size,
void *base)
{
BUG_ON(((unsigned long)&hp->mdesc) & (16UL - 1));
memset(hp, 0, handle_size);
INIT_LIST_HEAD(&hp->list);
hp->self_base = base;
refcount_set(&hp->refcnt, 1);
hp->handle_size = handle_size;
}
static struct mdesc_handle * __init mdesc_memblock_alloc(unsigned int mdesc_size)
{
unsigned int handle_size, alloc_size;
struct mdesc_handle *hp;
unsigned long paddr;
handle_size = (sizeof(struct mdesc_handle) -
sizeof(struct mdesc_hdr) +
mdesc_size);
alloc_size = PAGE_ALIGN(handle_size);
paddr = memblock_phys_alloc(alloc_size, PAGE_SIZE);
hp = NULL;
if (paddr) {
hp = __va(paddr);
mdesc_handle_init(hp, handle_size, hp);
}
return hp;
}
static void __init mdesc_memblock_free(struct mdesc_handle *hp)
{
unsigned int alloc_size;
unsigned long start;
BUG_ON(refcount_read(&hp->refcnt) != 0);
BUG_ON(!list_empty(&hp->list));
alloc_size = PAGE_ALIGN(hp->handle_size);
start = __pa(hp);
memblock_free_late(start, alloc_size);
}
static struct mdesc_mem_ops memblock_mdesc_ops = {
.alloc = mdesc_memblock_alloc,
.free = mdesc_memblock_free,
};
static struct mdesc_handle *mdesc_kmalloc(unsigned int mdesc_size)
{
unsigned int handle_size;
struct mdesc_handle *hp;
unsigned long addr;
void *base;
handle_size = (sizeof(struct mdesc_handle) -
sizeof(struct mdesc_hdr) +
mdesc_size);
base = kmalloc(handle_size + 15, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
if (!base)
return NULL;
addr = (unsigned long)base;
addr = (addr + 15UL) & ~15UL;
hp = (struct mdesc_handle *) addr;
mdesc_handle_init(hp, handle_size, base);
return hp;
}
static void mdesc_kfree(struct mdesc_handle *hp)
{
BUG_ON(refcount_read(&hp->refcnt) != 0);
BUG_ON(!list_empty(&hp->list));
kfree(hp->self_base);
}
static struct mdesc_mem_ops kmalloc_mdesc_memops = {
.alloc = mdesc_kmalloc,
.free = mdesc_kfree,
};
static struct mdesc_handle *mdesc_alloc(unsigned int mdesc_size,
struct mdesc_mem_ops *mops)
{
struct mdesc_handle *hp = mops->alloc(mdesc_size);
if (hp)
hp->mops = mops;
return hp;
}
static void mdesc_free(struct mdesc_handle *hp)
{
hp->mops->free(hp);
}
static struct mdesc_handle *cur_mdesc;
static LIST_HEAD(mdesc_zombie_list);
static DEFINE_SPINLOCK(mdesc_lock);
struct mdesc_handle *mdesc_grab(void)
{
struct mdesc_handle *hp;
unsigned long flags;
spin_lock_irqsave(&mdesc_lock, flags);
hp = cur_mdesc;
if (hp)
refcount_inc(&hp->refcnt);
spin_unlock_irqrestore(&mdesc_lock, flags);
return hp;
}
EXPORT_SYMBOL(mdesc_grab);
void mdesc_release(struct mdesc_handle *hp)
{
unsigned long flags;
spin_lock_irqsave(&mdesc_lock, flags);
if (refcount_dec_and_test(&hp->refcnt)) {
list_del_init(&hp->list);
hp->mops->free(hp);
}
spin_unlock_irqrestore(&mdesc_lock, flags);
}
EXPORT_SYMBOL(mdesc_release);
static DEFINE_MUTEX(mdesc_mutex);
static struct mdesc_notifier_client *client_list;
void mdesc_register_notifier(struct mdesc_notifier_client *client)
{
bool supported = false;
u64 node;
int i;
mutex_lock(&mdesc_mutex);
/* check to see if the node is supported for registration */
for (i = 0; md_node_ops_table[i].name != NULL; i++) {
if (strcmp(md_node_ops_table[i].name, client->node_name) == 0) {
supported = true;
break;
}
}
if (!supported) {
pr_err("MD: %s node not supported\n", client->node_name);
mutex_unlock(&mdesc_mutex);
return;
}
client->next = client_list;
client_list = client;
mdesc_for_each_node_by_name(cur_mdesc, node, client->node_name)
client->add(cur_mdesc, node, client->node_name);
mutex_unlock(&mdesc_mutex);
}
static const u64 *parent_cfg_handle(struct mdesc_handle *hp, u64 node)
{
const u64 *id;
u64 a;
id = NULL;
mdesc_for_each_arc(a, hp, node, MDESC_ARC_TYPE_BACK) {
u64 target;
target = mdesc_arc_target(hp, a);
id = mdesc_get_property(hp, target,
"cfg-handle", NULL);
if (id)
break;
}
return id;
}
static int get_vdev_port_node_info(struct mdesc_handle *md, u64 node,
union md_node_info *node_info)
{
const u64 *parent_cfg_hdlp;
const char *name;
const u64 *idp;
/*
* Virtual device nodes are distinguished by:
* 1. "id" property
* 2. "name" property
* 3. parent node "cfg-handle" property
*/
idp = mdesc_get_property(md, node, "id", NULL);
name = mdesc_get_property(md, node, "name", NULL);
parent_cfg_hdlp = parent_cfg_handle(md, node);
if (!idp || !name || !parent_cfg_hdlp)
return -1;
node_info->vdev_port.id = *idp;
node_info->vdev_port.name = kstrdup_const(name, GFP_KERNEL);
if (!node_info->vdev_port.name)
return -1;
node_info->vdev_port.parent_cfg_hdl = *parent_cfg_hdlp;
return 0;
}
static void rel_vdev_port_node_info(union md_node_info *node_info)
{
if (node_info && node_info->vdev_port.name) {
kfree_const(node_info->vdev_port.name);
node_info->vdev_port.name = NULL;
}
}
static bool vdev_port_node_match(union md_node_info *a_node_info,
union md_node_info *b_node_info)
{
if (a_node_info->vdev_port.id != b_node_info->vdev_port.id)
return false;
if (a_node_info->vdev_port.parent_cfg_hdl !=
b_node_info->vdev_port.parent_cfg_hdl)
return false;
if (strncmp(a_node_info->vdev_port.name,
b_node_info->vdev_port.name, MDESC_MAX_STR_LEN) != 0)
return false;
return true;
}
static int get_ds_port_node_info(struct mdesc_handle *md, u64 node,
union md_node_info *node_info)
{
const u64 *idp;
/* DS port nodes use the "id" property to distinguish them */
idp = mdesc_get_property(md, node, "id", NULL);
if (!idp)
return -1;
node_info->ds_port.id = *idp;
return 0;
}
static void rel_ds_port_node_info(union md_node_info *node_info)
{
}
static bool ds_port_node_match(union md_node_info *a_node_info,
union md_node_info *b_node_info)
{
if (a_node_info->ds_port.id != b_node_info->ds_port.id)
return false;
return true;
}
/* Run 'func' on nodes which are in A but not in B. */
static void invoke_on_missing(const char *name,
struct mdesc_handle *a,
struct mdesc_handle *b,
void (*func)(struct mdesc_handle *, u64,
const char *node_name))
{
mdesc_node_info_get_f get_info_func;
mdesc_node_info_rel_f rel_info_func;
mdesc_node_match_f node_match_func;
union md_node_info a_node_info;
union md_node_info b_node_info;
bool found;
u64 a_node;
u64 b_node;
int rv;
/*
* Find the get_info, rel_info and node_match ops for the given
* node name
*/
mdesc_get_node_ops(name, &get_info_func, &rel_info_func,
&node_match_func);
/* If we didn't find a match, the node type is not supported */
if (!get_info_func || !rel_info_func || !node_match_func) {
pr_err("MD: %s node type is not supported\n", name);
return;
}
mdesc_for_each_node_by_name(a, a_node, name) {
found = false;
rv = get_info_func(a, a_node, &a_node_info);
if (rv != 0) {
pr_err("MD: Cannot find 1 or more required match properties for %s node.\n",
name);
continue;
}
/* Check each node in B for node matching a_node */
mdesc_for_each_node_by_name(b, b_node, name) {
rv = get_info_func(b, b_node, &b_node_info);
if (rv != 0)
continue;
if (node_match_func(&a_node_info, &b_node_info)) {
found = true;
rel_info_func(&b_node_info);
break;
}
rel_info_func(&b_node_info);
}
rel_info_func(&a_node_info);
if (!found)
func(a, a_node, name);
}
}
static void notify_one(struct mdesc_notifier_client *p,
struct mdesc_handle *old_hp,
struct mdesc_handle *new_hp)
{
invoke_on_missing(p->node_name, old_hp, new_hp, p->remove);
invoke_on_missing(p->node_name, new_hp, old_hp, p->add);
}
static void mdesc_notify_clients(struct mdesc_handle *old_hp,
struct mdesc_handle *new_hp)
{
struct mdesc_notifier_client *p = client_list;
while (p) {
notify_one(p, old_hp, new_hp);
p = p->next;
}
}
void mdesc_update(void)
{
unsigned long len, real_len, status;
struct mdesc_handle *hp, *orig_hp;
unsigned long flags;
mutex_lock(&mdesc_mutex);
(void) sun4v_mach_desc(0UL, 0UL, &len);
hp = mdesc_alloc(len, &kmalloc_mdesc_memops);
if (!hp) {
printk(KERN_ERR "MD: mdesc alloc fails\n");
goto out;
}
status = sun4v_mach_desc(__pa(&hp->mdesc), len, &real_len);
if (status != HV_EOK || real_len > len) {
printk(KERN_ERR "MD: mdesc reread fails with %lu\n",
status);
refcount_dec(&hp->refcnt);
mdesc_free(hp);
goto out;
}
spin_lock_irqsave(&mdesc_lock, flags);
orig_hp = cur_mdesc;
cur_mdesc = hp;
spin_unlock_irqrestore(&mdesc_lock, flags);
mdesc_notify_clients(orig_hp, hp);
spin_lock_irqsave(&mdesc_lock, flags);
if (refcount_dec_and_test(&orig_hp->refcnt))
mdesc_free(orig_hp);
else
list_add(&orig_hp->list, &mdesc_zombie_list);
spin_unlock_irqrestore(&mdesc_lock, flags);
out:
mutex_unlock(&mdesc_mutex);
}
u64 mdesc_get_node(struct mdesc_handle *hp, const char *node_name,
union md_node_info *node_info)
{
mdesc_node_info_get_f get_info_func;
mdesc_node_info_rel_f rel_info_func;
mdesc_node_match_f node_match_func;
union md_node_info hp_node_info;
u64 hp_node;
int rv;
if (hp == NULL || node_name == NULL || node_info == NULL)
return MDESC_NODE_NULL;
/* Find the ops for the given node name */
mdesc_get_node_ops(node_name, &get_info_func, &rel_info_func,
&node_match_func);
/* If we didn't find ops for the given node name, it is not supported */
if (!get_info_func || !rel_info_func || !node_match_func) {
pr_err("MD: %s node is not supported\n", node_name);
return -EINVAL;
}
mdesc_for_each_node_by_name(hp, hp_node, node_name) {
rv = get_info_func(hp, hp_node, &hp_node_info);
if (rv != 0)
continue;
if (node_match_func(node_info, &hp_node_info))
break;
rel_info_func(&hp_node_info);
}
rel_info_func(&hp_node_info);
return hp_node;
}
EXPORT_SYMBOL(mdesc_get_node);
int mdesc_get_node_info(struct mdesc_handle *hp, u64 node,
const char *node_name, union md_node_info *node_info)
{
mdesc_node_info_get_f get_info_func;
int rv;
if (hp == NULL || node == MDESC_NODE_NULL ||
node_name == NULL || node_info == NULL)
return -EINVAL;
/* Find the get_info op for the given node name */
mdesc_get_node_ops(node_name, &get_info_func, NULL, NULL);
/* If we didn't find a get_info_func, the node name is not supported */
if (get_info_func == NULL) {
pr_err("MD: %s node is not supported\n", node_name);
return -EINVAL;
}
rv = get_info_func(hp, node, node_info);
if (rv != 0) {
pr_err("MD: Cannot find 1 or more required match properties for %s node.\n",
node_name);
return -1;
}
return 0;
}
EXPORT_SYMBOL(mdesc_get_node_info);
static struct mdesc_elem *node_block(struct mdesc_hdr *mdesc)
{
return (struct mdesc_elem *) mdesc->data;
}
static void *name_block(struct mdesc_hdr *mdesc)
{
return ((void *) node_block(mdesc)) + mdesc->node_sz;
}
static void *data_block(struct mdesc_hdr *mdesc)
{
return ((void *) name_block(mdesc)) + mdesc->name_sz;
}
u64 mdesc_node_by_name(struct mdesc_handle *hp,
u64 from_node, const char *name)
{
struct mdesc_elem *ep = node_block(&hp->mdesc);
const char *names = name_block(&hp->mdesc);
u64 last_node = hp->mdesc.node_sz / 16;
u64 ret;
if (from_node == MDESC_NODE_NULL) {
ret = from_node = 0;
} else if (from_node >= last_node) {
return MDESC_NODE_NULL;
} else {
ret = ep[from_node].d.val;
}
while (ret < last_node) {
if (ep[ret].tag != MD_NODE)
return MDESC_NODE_NULL;
if (!strcmp(names + ep[ret].name_offset, name))
break;
ret = ep[ret].d.val;
}
if (ret >= last_node)
ret = MDESC_NODE_NULL;
return ret;
}
EXPORT_SYMBOL(mdesc_node_by_name);
const void *mdesc_get_property(struct mdesc_handle *hp, u64 node,
const char *name, int *lenp)
{
const char *names = name_block(&hp->mdesc);
u64 last_node = hp->mdesc.node_sz / 16;
void *data = data_block(&hp->mdesc);
struct mdesc_elem *ep;
if (node == MDESC_NODE_NULL || node >= last_node)
return NULL;
ep = node_block(&hp->mdesc) + node;
ep++;
for (; ep->tag != MD_NODE_END; ep++) {
void *val = NULL;
int len = 0;
switch (ep->tag) {
case MD_PROP_VAL:
val = &ep->d.val;
len = 8;
break;
case MD_PROP_STR:
case MD_PROP_DATA:
val = data + ep->d.data.data_offset;
len = ep->d.data.data_len;
break;
default:
break;
}
if (!val)
continue;
if (!strcmp(names + ep->name_offset, name)) {
if (lenp)
*lenp = len;
return val;
}
}
return NULL;
}
EXPORT_SYMBOL(mdesc_get_property);
u64 mdesc_next_arc(struct mdesc_handle *hp, u64 from, const char *arc_type)
{
struct mdesc_elem *ep, *base = node_block(&hp->mdesc);
const char *names = name_block(&hp->mdesc);
u64 last_node = hp->mdesc.node_sz / 16;
if (from == MDESC_NODE_NULL || from >= last_node)
return MDESC_NODE_NULL;
ep = base + from;
ep++;
for (; ep->tag != MD_NODE_END; ep++) {
if (ep->tag != MD_PROP_ARC)
continue;
if (strcmp(names + ep->name_offset, arc_type))
continue;
return ep - base;
}
return MDESC_NODE_NULL;
}
EXPORT_SYMBOL(mdesc_next_arc);
u64 mdesc_arc_target(struct mdesc_handle *hp, u64 arc)
{
struct mdesc_elem *ep, *base = node_block(&hp->mdesc);
ep = base + arc;
return ep->d.val;
}
EXPORT_SYMBOL(mdesc_arc_target);
const char *mdesc_node_name(struct mdesc_handle *hp, u64 node)
{
struct mdesc_elem *ep, *base = node_block(&hp->mdesc);
const char *names = name_block(&hp->mdesc);
u64 last_node = hp->mdesc.node_sz / 16;
if (node == MDESC_NODE_NULL || node >= last_node)
return NULL;
ep = base + node;
if (ep->tag != MD_NODE)
return NULL;
return names + ep->name_offset;
}
EXPORT_SYMBOL(mdesc_node_name);
static u64 max_cpus = 64;
static void __init report_platform_properties(void)
{
struct mdesc_handle *hp = mdesc_grab();
u64 pn = mdesc_node_by_name(hp, MDESC_NODE_NULL, "platform");
const char *s;
const u64 *v;
if (pn == MDESC_NODE_NULL) {
prom_printf("No platform node in machine-description.\n");
prom_halt();
}
s = mdesc_get_property(hp, pn, "banner-name", NULL);
printk("PLATFORM: banner-name [%s]\n", s);
s = mdesc_get_property(hp, pn, "name", NULL);
printk("PLATFORM: name [%s]\n", s);
v = mdesc_get_property(hp, pn, "hostid", NULL);
if (v)
printk("PLATFORM: hostid [%08llx]\n", *v);
v = mdesc_get_property(hp, pn, "serial#", NULL);
if (v)
printk("PLATFORM: serial# [%08llx]\n", *v);
v = mdesc_get_property(hp, pn, "stick-frequency", NULL);
printk("PLATFORM: stick-frequency [%08llx]\n", *v);
v = mdesc_get_property(hp, pn, "mac-address", NULL);
if (v)
printk("PLATFORM: mac-address [%llx]\n", *v);
v = mdesc_get_property(hp, pn, "watchdog-resolution", NULL);
if (v)
printk("PLATFORM: watchdog-resolution [%llu ms]\n", *v);
v = mdesc_get_property(hp, pn, "watchdog-max-timeout", NULL);
if (v)
printk("PLATFORM: watchdog-max-timeout [%llu ms]\n", *v);
v = mdesc_get_property(hp, pn, "max-cpus", NULL);
if (v) {
max_cpus = *v;
printk("PLATFORM: max-cpus [%llu]\n", max_cpus);
}
#ifdef CONFIG_SMP
{
int max_cpu, i;
if (v) {
max_cpu = *v;
if (max_cpu > NR_CPUS)
max_cpu = NR_CPUS;
} else {
max_cpu = NR_CPUS;
}
for (i = 0; i < max_cpu; i++)
set_cpu_possible(i, true);
}
#endif
mdesc_release(hp);
}
static void fill_in_one_cache(cpuinfo_sparc *c, struct mdesc_handle *hp, u64 mp)
{
const u64 *level = mdesc_get_property(hp, mp, "level", NULL);
const u64 *size = mdesc_get_property(hp, mp, "size", NULL);
const u64 *line_size = mdesc_get_property(hp, mp, "line-size", NULL);
const char *type;
int type_len;
type = mdesc_get_property(hp, mp, "type", &type_len);
switch (*level) {
case 1:
if (of_find_in_proplist(type, "instn", type_len)) {
c->icache_size = *size;
c->icache_line_size = *line_size;
} else if (of_find_in_proplist(type, "data", type_len)) {
c->dcache_size = *size;
c->dcache_line_size = *line_size;
}
break;
case 2:
c->ecache_size = *size;
c->ecache_line_size = *line_size;
break;
default:
break;
}
if (*level == 1) {
u64 a;
mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_FWD) {
u64 target = mdesc_arc_target(hp, a);
const char *name = mdesc_node_name(hp, target);
if (!strcmp(name, "cache"))
fill_in_one_cache(c, hp, target);
}
}
}
static void find_back_node_value(struct mdesc_handle *hp, u64 node,
char *srch_val,
void (*func)(struct mdesc_handle *, u64, int),
u64 val, int depth)
{
u64 arc;
/* Since we have an estimate of recursion depth, do a sanity check. */
if (depth == 0)
return;
mdesc_for_each_arc(arc, hp, node, MDESC_ARC_TYPE_BACK) {
u64 n = mdesc_arc_target(hp, arc);
const char *name = mdesc_node_name(hp, n);
if (!strcmp(srch_val, name))
(*func)(hp, n, val);
find_back_node_value(hp, n, srch_val, func, val, depth-1);
}
}
static void __mark_core_id(struct mdesc_handle *hp, u64 node,
int core_id)
{
const u64 *id = mdesc_get_property(hp, node, "id", NULL);
if (*id < num_possible_cpus())
cpu_data(*id).core_id = core_id;
}
static void __mark_max_cache_id(struct mdesc_handle *hp, u64 node,
int max_cache_id)
{
const u64 *id = mdesc_get_property(hp, node, "id", NULL);
if (*id < num_possible_cpus()) {
cpu_data(*id).max_cache_id = max_cache_id;
/**
* On systems without explicit socket descriptions socket
* is max_cache_id
*/
cpu_data(*id).sock_id = max_cache_id;
}
}
static void mark_core_ids(struct mdesc_handle *hp, u64 mp,
int core_id)
{
find_back_node_value(hp, mp, "cpu", __mark_core_id, core_id, 10);
}
static void mark_max_cache_ids(struct mdesc_handle *hp, u64 mp,
int max_cache_id)
{
find_back_node_value(hp, mp, "cpu", __mark_max_cache_id,
max_cache_id, 10);
}
static void set_core_ids(struct mdesc_handle *hp)
{
int idx;
u64 mp;
idx = 1;
/* Identify unique cores by looking for cpus backpointed to by
* level 1 instruction caches.
*/
mdesc_for_each_node_by_name(hp, mp, "cache") {
const u64 *level;
const char *type;
int len;
level = mdesc_get_property(hp, mp, "level", NULL);
if (*level != 1)
continue;
type = mdesc_get_property(hp, mp, "type", &len);
if (!of_find_in_proplist(type, "instn", len))
continue;
mark_core_ids(hp, mp, idx);
idx++;
}
}
static int set_max_cache_ids_by_cache(struct mdesc_handle *hp, int level)
{
u64 mp;
int idx = 1;
int fnd = 0;
/**
* Identify unique highest level of shared cache by looking for cpus
* backpointed to by shared level N caches.
*/
mdesc_for_each_node_by_name(hp, mp, "cache") {
const u64 *cur_lvl;
cur_lvl = mdesc_get_property(hp, mp, "level", NULL);
if (*cur_lvl != level)
continue;
mark_max_cache_ids(hp, mp, idx);
idx++;
fnd = 1;
}
return fnd;
}
static void set_sock_ids_by_socket(struct mdesc_handle *hp, u64 mp)
{
int idx = 1;
mdesc_for_each_node_by_name(hp, mp, "socket") {
u64 a;
mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_FWD) {
u64 t = mdesc_arc_target(hp, a);
const char *name;
const u64 *id;
name = mdesc_node_name(hp, t);
if (strcmp(name, "cpu"))
continue;
id = mdesc_get_property(hp, t, "id", NULL);
if (*id < num_possible_cpus())
cpu_data(*id).sock_id = idx;
}
idx++;
}
}
static void set_sock_ids(struct mdesc_handle *hp)
{
u64 mp;
/**
* Find the highest level of shared cache which pre-T7 is also
* the socket.
*/
if (!set_max_cache_ids_by_cache(hp, 3))
set_max_cache_ids_by_cache(hp, 2);
/* If machine description exposes sockets data use it.*/
mp = mdesc_node_by_name(hp, MDESC_NODE_NULL, "sockets");
if (mp != MDESC_NODE_NULL)
set_sock_ids_by_socket(hp, mp);
}
static void mark_proc_ids(struct mdesc_handle *hp, u64 mp, int proc_id)
{
u64 a;
mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_BACK) {
u64 t = mdesc_arc_target(hp, a);
const char *name;
const u64 *id;
name = mdesc_node_name(hp, t);
if (strcmp(name, "cpu"))
continue;
id = mdesc_get_property(hp, t, "id", NULL);
if (*id < NR_CPUS)
cpu_data(*id).proc_id = proc_id;
}
}
static void __set_proc_ids(struct mdesc_handle *hp, const char *exec_unit_name)
{
int idx;
u64 mp;
idx = 0;
mdesc_for_each_node_by_name(hp, mp, exec_unit_name) {
const char *type;
int len;
type = mdesc_get_property(hp, mp, "type", &len);
if (!of_find_in_proplist(type, "int", len) &&
!of_find_in_proplist(type, "integer", len))
continue;
mark_proc_ids(hp, mp, idx);
idx++;
}
}
static void set_proc_ids(struct mdesc_handle *hp)
{
__set_proc_ids(hp, "exec_unit");
__set_proc_ids(hp, "exec-unit");
}
static void get_one_mondo_bits(const u64 *p, unsigned int *mask,
unsigned long def, unsigned long max)
{
u64 val;
if (!p)
goto use_default;
val = *p;
if (!val || val >= 64)
goto use_default;
if (val > max)
val = max;
*mask = ((1U << val) * 64U) - 1U;
return;
use_default:
*mask = ((1U << def) * 64U) - 1U;
}
static void get_mondo_data(struct mdesc_handle *hp, u64 mp,
struct trap_per_cpu *tb)
{
static int printed;
const u64 *val;
val = mdesc_get_property(hp, mp, "q-cpu-mondo-#bits", NULL);
get_one_mondo_bits(val, &tb->cpu_mondo_qmask, 7, ilog2(max_cpus * 2));
val = mdesc_get_property(hp, mp, "q-dev-mondo-#bits", NULL);
get_one_mondo_bits(val, &tb->dev_mondo_qmask, 7, 8);
val = mdesc_get_property(hp, mp, "q-resumable-#bits", NULL);
get_one_mondo_bits(val, &tb->resum_qmask, 6, 7);
val = mdesc_get_property(hp, mp, "q-nonresumable-#bits", NULL);
get_one_mondo_bits(val, &tb->nonresum_qmask, 2, 2);
if (!printed++) {
pr_info("SUN4V: Mondo queue sizes "
"[cpu(%u) dev(%u) r(%u) nr(%u)]\n",
tb->cpu_mondo_qmask + 1,
tb->dev_mondo_qmask + 1,
tb->resum_qmask + 1,
tb->nonresum_qmask + 1);
}
}
static void *mdesc_iterate_over_cpus(void *(*func)(struct mdesc_handle *, u64, int, void *), void *arg, cpumask_t *mask)
{
struct mdesc_handle *hp = mdesc_grab();
void *ret = NULL;
u64 mp;
mdesc_for_each_node_by_name(hp, mp, "cpu") {
const u64 *id = mdesc_get_property(hp, mp, "id", NULL);
int cpuid = *id;
#ifdef CONFIG_SMP
if (cpuid >= NR_CPUS) {
printk(KERN_WARNING "Ignoring CPU %d which is "
">= NR_CPUS (%d)\n",
cpuid, NR_CPUS);
continue;
}
if (!cpumask_test_cpu(cpuid, mask))
continue;
#endif
ret = func(hp, mp, cpuid, arg);
if (ret)
goto out;
}
out:
mdesc_release(hp);
return ret;
}
static void *record_one_cpu(struct mdesc_handle *hp, u64 mp, int cpuid,
void *arg)
{
ncpus_probed++;
#ifdef CONFIG_SMP
set_cpu_present(cpuid, true);
#endif
return NULL;
}
void mdesc_populate_present_mask(cpumask_t *mask)
{
if (tlb_type != hypervisor)
return;
ncpus_probed = 0;
mdesc_iterate_over_cpus(record_one_cpu, NULL, mask);
}
static void * __init check_one_pgsz(struct mdesc_handle *hp, u64 mp, int cpuid, void *arg)
{
const u64 *pgsz_prop = mdesc_get_property(hp, mp, "mmu-page-size-list", NULL);
unsigned long *pgsz_mask = arg;
u64 val;
val = (HV_PGSZ_MASK_8K | HV_PGSZ_MASK_64K |
HV_PGSZ_MASK_512K | HV_PGSZ_MASK_4MB);
if (pgsz_prop)
val = *pgsz_prop;
if (!*pgsz_mask)
*pgsz_mask = val;
else
*pgsz_mask &= val;
return NULL;
}
void __init mdesc_get_page_sizes(cpumask_t *mask, unsigned long *pgsz_mask)
{
*pgsz_mask = 0;
mdesc_iterate_over_cpus(check_one_pgsz, pgsz_mask, mask);
}
static void *fill_in_one_cpu(struct mdesc_handle *hp, u64 mp, int cpuid,
void *arg)
{
const u64 *cfreq = mdesc_get_property(hp, mp, "clock-frequency", NULL);
struct trap_per_cpu *tb;
cpuinfo_sparc *c;
u64 a;
#ifndef CONFIG_SMP
/* On uniprocessor we only want the values for the
* real physical cpu the kernel booted onto, however
* cpu_data() only has one entry at index 0.
*/
if (cpuid != real_hard_smp_processor_id())
return NULL;
cpuid = 0;
#endif
c = &cpu_data(cpuid);
c->clock_tick = *cfreq;
tb = &trap_block[cpuid];
get_mondo_data(hp, mp, tb);
mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_FWD) {
u64 j, t = mdesc_arc_target(hp, a);
const char *t_name;
t_name = mdesc_node_name(hp, t);
if (!strcmp(t_name, "cache")) {
fill_in_one_cache(c, hp, t);
continue;
}
mdesc_for_each_arc(j, hp, t, MDESC_ARC_TYPE_FWD) {
u64 n = mdesc_arc_target(hp, j);
const char *n_name;
n_name = mdesc_node_name(hp, n);
if (!strcmp(n_name, "cache"))
fill_in_one_cache(c, hp, n);
}
}
c->core_id = 0;
c->proc_id = -1;
return NULL;
}
void mdesc_fill_in_cpu_data(cpumask_t *mask)
{
struct mdesc_handle *hp;
mdesc_iterate_over_cpus(fill_in_one_cpu, NULL, mask);
hp = mdesc_grab();
set_core_ids(hp);
set_proc_ids(hp);
set_sock_ids(hp);
mdesc_release(hp);
smp_fill_in_sib_core_maps();
}
/* mdesc_open() - Grab a reference to mdesc_handle when /dev/mdesc is
* opened. Hold this reference until /dev/mdesc is closed to ensure
* mdesc data structure is not released underneath us. Store the
* pointer to mdesc structure in private_data for read and seek to use
*/
static int mdesc_open(struct inode *inode, struct file *file)
{
struct mdesc_handle *hp = mdesc_grab();
if (!hp)
return -ENODEV;
file->private_data = hp;
return 0;
}
static ssize_t mdesc_read(struct file *file, char __user *buf,
size_t len, loff_t *offp)
{
struct mdesc_handle *hp = file->private_data;
unsigned char *mdesc;
int bytes_left, count = len;
if (*offp >= hp->handle_size)
return 0;
bytes_left = hp->handle_size - *offp;
if (count > bytes_left)
count = bytes_left;
mdesc = (unsigned char *)&hp->mdesc;
mdesc += *offp;
if (!copy_to_user(buf, mdesc, count)) {
*offp += count;
return count;
} else {
return -EFAULT;
}
}
static loff_t mdesc_llseek(struct file *file, loff_t offset, int whence)
{
struct mdesc_handle *hp = file->private_data;
return no_seek_end_llseek_size(file, offset, whence, hp->handle_size);
}
/* mdesc_close() - /dev/mdesc is being closed, release the reference to
* mdesc structure.
*/
static int mdesc_close(struct inode *inode, struct file *file)
{
mdesc_release(file->private_data);
return 0;
}
static const struct file_operations mdesc_fops = {
.open = mdesc_open,
.read = mdesc_read,
.llseek = mdesc_llseek,
.release = mdesc_close,
.owner = THIS_MODULE,
};
static struct miscdevice mdesc_misc = {
.minor = MISC_DYNAMIC_MINOR,
.name = "mdesc",
.fops = &mdesc_fops,
};
static int __init mdesc_misc_init(void)
{
return misc_register(&mdesc_misc);
}
__initcall(mdesc_misc_init);
void __init sun4v_mdesc_init(void)
{
struct mdesc_handle *hp;
unsigned long len, real_len, status;
(void) sun4v_mach_desc(0UL, 0UL, &len);
printk("MDESC: Size is %lu bytes.\n", len);
hp = mdesc_alloc(len, &memblock_mdesc_ops);
if (hp == NULL) {
prom_printf("MDESC: alloc of %lu bytes failed.\n", len);
prom_halt();
}
status = sun4v_mach_desc(__pa(&hp->mdesc), len, &real_len);
if (status != HV_EOK || real_len > len) {
prom_printf("sun4v_mach_desc fails, err(%lu), "
"len(%lu), real_len(%lu)\n",
status, len, real_len);
mdesc_free(hp);
prom_halt();
}
cur_mdesc = hp;
mdesc_adi_init();
report_platform_properties();
}
| linux-master | arch/sparc/kernel/mdesc.c |
// SPDX-License-Identifier: GPL-2.0
/* sstate.c: System soft state support.
*
* Copyright (C) 2007, 2008 David S. Miller <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/notifier.h>
#include <linux/panic_notifier.h>
#include <linux/reboot.h>
#include <linux/init.h>
#include <asm/hypervisor.h>
#include <asm/spitfire.h>
#include <asm/oplib.h>
#include <asm/head.h>
#include <asm/io.h>
#include "kernel.h"
static int hv_supports_soft_state;
static void do_set_sstate(unsigned long state, const char *msg)
{
unsigned long err;
if (!hv_supports_soft_state)
return;
err = sun4v_mach_set_soft_state(state, kimage_addr_to_ra(msg));
if (err) {
printk(KERN_WARNING "SSTATE: Failed to set soft-state to "
"state[%lx] msg[%s], err=%lu\n",
state, msg, err);
}
}
static const char booting_msg[32] __attribute__((aligned(32))) =
"Linux booting";
static const char running_msg[32] __attribute__((aligned(32))) =
"Linux running";
static const char halting_msg[32] __attribute__((aligned(32))) =
"Linux halting";
static const char poweroff_msg[32] __attribute__((aligned(32))) =
"Linux powering off";
static const char rebooting_msg[32] __attribute__((aligned(32))) =
"Linux rebooting";
static const char panicking_msg[32] __attribute__((aligned(32))) =
"Linux panicking";
static int sstate_reboot_call(struct notifier_block *np, unsigned long type, void *_unused)
{
const char *msg;
switch (type) {
case SYS_DOWN:
default:
msg = rebooting_msg;
break;
case SYS_HALT:
msg = halting_msg;
break;
case SYS_POWER_OFF:
msg = poweroff_msg;
break;
}
do_set_sstate(HV_SOFT_STATE_TRANSITION, msg);
return NOTIFY_OK;
}
static struct notifier_block sstate_reboot_notifier = {
.notifier_call = sstate_reboot_call,
};
static int sstate_panic_event(struct notifier_block *n, unsigned long event, void *ptr)
{
do_set_sstate(HV_SOFT_STATE_TRANSITION, panicking_msg);
return NOTIFY_DONE;
}
static struct notifier_block sstate_panic_block = {
.notifier_call = sstate_panic_event,
.priority = INT_MAX,
};
static int __init sstate_init(void)
{
unsigned long major, minor;
if (tlb_type != hypervisor)
return 0;
major = 1;
minor = 0;
if (sun4v_hvapi_register(HV_GRP_SOFT_STATE, major, &minor))
return 0;
hv_supports_soft_state = 1;
prom_sun4v_guest_soft_state();
do_set_sstate(HV_SOFT_STATE_TRANSITION, booting_msg);
atomic_notifier_chain_register(&panic_notifier_list,
&sstate_panic_block);
register_reboot_notifier(&sstate_reboot_notifier);
return 0;
}
core_initcall(sstate_init);
static int __init sstate_running(void)
{
do_set_sstate(HV_SOFT_STATE_NORMAL, running_msg);
return 0;
}
late_initcall(sstate_running);
| linux-master | arch/sparc/kernel/sstate.c |
// SPDX-License-Identifier: GPL-2.0
/*
* IOMMU mmap management and range allocation functions.
* Based almost entirely upon the powerpc iommu allocator.
*/
#include <linux/export.h>
#include <linux/bitmap.h>
#include <linux/bug.h>
#include <linux/iommu-helper.h>
#include <linux/dma-mapping.h>
#include <linux/hash.h>
#include <asm/iommu-common.h>
static unsigned long iommu_large_alloc = 15;
static DEFINE_PER_CPU(unsigned int, iommu_hash_common);
static inline bool need_flush(struct iommu_map_table *iommu)
{
return ((iommu->flags & IOMMU_NEED_FLUSH) != 0);
}
static inline void set_flush(struct iommu_map_table *iommu)
{
iommu->flags |= IOMMU_NEED_FLUSH;
}
static inline void clear_flush(struct iommu_map_table *iommu)
{
iommu->flags &= ~IOMMU_NEED_FLUSH;
}
static void setup_iommu_pool_hash(void)
{
unsigned int i;
static bool do_once;
if (do_once)
return;
do_once = true;
for_each_possible_cpu(i)
per_cpu(iommu_hash_common, i) = hash_32(i, IOMMU_POOL_HASHBITS);
}
/*
* Initialize iommu_pool entries for the iommu_map_table. `num_entries'
* is the number of table entries. If `large_pool' is set to true,
* the top 1/4 of the table will be set aside for pool allocations
* of more than iommu_large_alloc pages.
*/
void iommu_tbl_pool_init(struct iommu_map_table *iommu,
unsigned long num_entries,
u32 table_shift,
void (*lazy_flush)(struct iommu_map_table *),
bool large_pool, u32 npools,
bool skip_span_boundary_check)
{
unsigned int start, i;
struct iommu_pool *p = &(iommu->large_pool);
setup_iommu_pool_hash();
if (npools == 0)
iommu->nr_pools = IOMMU_NR_POOLS;
else
iommu->nr_pools = npools;
BUG_ON(npools > IOMMU_NR_POOLS);
iommu->table_shift = table_shift;
iommu->lazy_flush = lazy_flush;
start = 0;
if (skip_span_boundary_check)
iommu->flags |= IOMMU_NO_SPAN_BOUND;
if (large_pool)
iommu->flags |= IOMMU_HAS_LARGE_POOL;
if (!large_pool)
iommu->poolsize = num_entries/iommu->nr_pools;
else
iommu->poolsize = (num_entries * 3 / 4)/iommu->nr_pools;
for (i = 0; i < iommu->nr_pools; i++) {
spin_lock_init(&(iommu->pools[i].lock));
iommu->pools[i].start = start;
iommu->pools[i].hint = start;
start += iommu->poolsize; /* start for next pool */
iommu->pools[i].end = start - 1;
}
if (!large_pool)
return;
/* initialize large_pool */
spin_lock_init(&(p->lock));
p->start = start;
p->hint = p->start;
p->end = num_entries;
}
unsigned long iommu_tbl_range_alloc(struct device *dev,
struct iommu_map_table *iommu,
unsigned long npages,
unsigned long *handle,
unsigned long mask,
unsigned int align_order)
{
unsigned int pool_hash = __this_cpu_read(iommu_hash_common);
unsigned long n, end, start, limit, boundary_size;
struct iommu_pool *pool;
int pass = 0;
unsigned int pool_nr;
unsigned int npools = iommu->nr_pools;
unsigned long flags;
bool large_pool = ((iommu->flags & IOMMU_HAS_LARGE_POOL) != 0);
bool largealloc = (large_pool && npages > iommu_large_alloc);
unsigned long shift;
unsigned long align_mask = 0;
if (align_order > 0)
align_mask = ~0ul >> (BITS_PER_LONG - align_order);
/* Sanity check */
if (unlikely(npages == 0)) {
WARN_ON_ONCE(1);
return IOMMU_ERROR_CODE;
}
if (largealloc) {
pool = &(iommu->large_pool);
pool_nr = 0; /* to keep compiler happy */
} else {
/* pick out pool_nr */
pool_nr = pool_hash & (npools - 1);
pool = &(iommu->pools[pool_nr]);
}
spin_lock_irqsave(&pool->lock, flags);
again:
if (pass == 0 && handle && *handle &&
(*handle >= pool->start) && (*handle < pool->end))
start = *handle;
else
start = pool->hint;
limit = pool->end;
/* The case below can happen if we have a small segment appended
* to a large, or when the previous alloc was at the very end of
* the available space. If so, go back to the beginning. If a
* flush is needed, it will get done based on the return value
* from iommu_area_alloc() below.
*/
if (start >= limit)
start = pool->start;
shift = iommu->table_map_base >> iommu->table_shift;
if (limit + shift > mask) {
limit = mask - shift + 1;
/* If we're constrained on address range, first try
* at the masked hint to avoid O(n) search complexity,
* but on second pass, start at 0 in pool 0.
*/
if ((start & mask) >= limit || pass > 0) {
spin_unlock(&(pool->lock));
pool = &(iommu->pools[0]);
spin_lock(&(pool->lock));
start = pool->start;
} else {
start &= mask;
}
}
/*
* if the skip_span_boundary_check had been set during init, we set
* things up so that iommu_is_span_boundary() merely checks if the
* (index + npages) < num_tsb_entries
*/
if ((iommu->flags & IOMMU_NO_SPAN_BOUND) != 0) {
shift = 0;
boundary_size = iommu->poolsize * iommu->nr_pools;
} else {
boundary_size = dma_get_seg_boundary_nr_pages(dev,
iommu->table_shift);
}
n = iommu_area_alloc(iommu->map, limit, start, npages, shift,
boundary_size, align_mask);
if (n == -1) {
if (likely(pass == 0)) {
/* First failure, rescan from the beginning. */
pool->hint = pool->start;
set_flush(iommu);
pass++;
goto again;
} else if (!largealloc && pass <= iommu->nr_pools) {
spin_unlock(&(pool->lock));
pool_nr = (pool_nr + 1) & (iommu->nr_pools - 1);
pool = &(iommu->pools[pool_nr]);
spin_lock(&(pool->lock));
pool->hint = pool->start;
set_flush(iommu);
pass++;
goto again;
} else {
/* give up */
n = IOMMU_ERROR_CODE;
goto bail;
}
}
if (iommu->lazy_flush &&
(n < pool->hint || need_flush(iommu))) {
clear_flush(iommu);
iommu->lazy_flush(iommu);
}
end = n + npages;
pool->hint = end;
/* Update handle for SG allocations */
if (handle)
*handle = end;
bail:
spin_unlock_irqrestore(&(pool->lock), flags);
return n;
}
static struct iommu_pool *get_pool(struct iommu_map_table *tbl,
unsigned long entry)
{
struct iommu_pool *p;
unsigned long largepool_start = tbl->large_pool.start;
bool large_pool = ((tbl->flags & IOMMU_HAS_LARGE_POOL) != 0);
/* The large pool is the last pool at the top of the table */
if (large_pool && entry >= largepool_start) {
p = &tbl->large_pool;
} else {
unsigned int pool_nr = entry / tbl->poolsize;
BUG_ON(pool_nr >= tbl->nr_pools);
p = &tbl->pools[pool_nr];
}
return p;
}
/* Caller supplies the index of the entry into the iommu map table
* itself when the mapping from dma_addr to the entry is not the
* default addr->entry mapping below.
*/
void iommu_tbl_range_free(struct iommu_map_table *iommu, u64 dma_addr,
unsigned long npages, unsigned long entry)
{
struct iommu_pool *pool;
unsigned long flags;
unsigned long shift = iommu->table_shift;
if (entry == IOMMU_ERROR_CODE) /* use default addr->entry mapping */
entry = (dma_addr - iommu->table_map_base) >> shift;
pool = get_pool(iommu, entry);
spin_lock_irqsave(&(pool->lock), flags);
bitmap_clear(iommu->map, entry, npages);
spin_unlock_irqrestore(&(pool->lock), flags);
}
| linux-master | arch/sparc/kernel/iommu-common.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <asm/oplib.h>
#include <asm/prom.h>
#include <asm/irq.h>
#include <asm/upa.h>
#include "prom.h"
#ifdef CONFIG_PCI
/* PSYCHO interrupt mapping support. */
#define PSYCHO_IMAP_A_SLOT0 0x0c00UL
#define PSYCHO_IMAP_B_SLOT0 0x0c20UL
static unsigned long psycho_pcislot_imap_offset(unsigned long ino)
{
unsigned int bus = (ino & 0x10) >> 4;
unsigned int slot = (ino & 0x0c) >> 2;
if (bus == 0)
return PSYCHO_IMAP_A_SLOT0 + (slot * 8);
else
return PSYCHO_IMAP_B_SLOT0 + (slot * 8);
}
#define PSYCHO_OBIO_IMAP_BASE 0x1000UL
#define PSYCHO_ONBOARD_IRQ_BASE 0x20
#define psycho_onboard_imap_offset(__ino) \
(PSYCHO_OBIO_IMAP_BASE + (((__ino) & 0x1f) << 3))
#define PSYCHO_ICLR_A_SLOT0 0x1400UL
#define PSYCHO_ICLR_SCSI 0x1800UL
#define psycho_iclr_offset(ino) \
((ino & 0x20) ? (PSYCHO_ICLR_SCSI + (((ino) & 0x1f) << 3)) : \
(PSYCHO_ICLR_A_SLOT0 + (((ino) & 0x1f)<<3)))
static unsigned int psycho_irq_build(struct device_node *dp,
unsigned int ino,
void *_data)
{
unsigned long controller_regs = (unsigned long) _data;
unsigned long imap, iclr;
unsigned long imap_off, iclr_off;
int inofixup = 0;
ino &= 0x3f;
if (ino < PSYCHO_ONBOARD_IRQ_BASE) {
/* PCI slot */
imap_off = psycho_pcislot_imap_offset(ino);
} else {
/* Onboard device */
imap_off = psycho_onboard_imap_offset(ino);
}
/* Now build the IRQ bucket. */
imap = controller_regs + imap_off;
iclr_off = psycho_iclr_offset(ino);
iclr = controller_regs + iclr_off;
if ((ino & 0x20) == 0)
inofixup = ino & 0x03;
return build_irq(inofixup, iclr, imap);
}
static void __init psycho_irq_trans_init(struct device_node *dp)
{
const struct linux_prom64_registers *regs;
dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller));
dp->irq_trans->irq_build = psycho_irq_build;
regs = of_get_property(dp, "reg", NULL);
dp->irq_trans->data = (void *) regs[2].phys_addr;
}
#define sabre_read(__reg) \
({ u64 __ret; \
__asm__ __volatile__("ldxa [%1] %2, %0" \
: "=r" (__ret) \
: "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
: "memory"); \
__ret; \
})
struct sabre_irq_data {
unsigned long controller_regs;
unsigned int pci_first_busno;
};
#define SABRE_CONFIGSPACE 0x001000000UL
#define SABRE_WRSYNC 0x1c20UL
#define SABRE_CONFIG_BASE(CONFIG_SPACE) \
(CONFIG_SPACE | (1UL << 24))
#define SABRE_CONFIG_ENCODE(BUS, DEVFN, REG) \
(((unsigned long)(BUS) << 16) | \
((unsigned long)(DEVFN) << 8) | \
((unsigned long)(REG)))
/* When a device lives behind a bridge deeper in the PCI bus topology
* than APB, a special sequence must run to make sure all pending DMA
* transfers at the time of IRQ delivery are visible in the coherency
* domain by the cpu. This sequence is to perform a read on the far
* side of the non-APB bridge, then perform a read of Sabre's DMA
* write-sync register.
*/
static void sabre_wsync_handler(unsigned int ino, void *_arg1, void *_arg2)
{
unsigned int phys_hi = (unsigned int) (unsigned long) _arg1;
struct sabre_irq_data *irq_data = _arg2;
unsigned long controller_regs = irq_data->controller_regs;
unsigned long sync_reg = controller_regs + SABRE_WRSYNC;
unsigned long config_space = controller_regs + SABRE_CONFIGSPACE;
unsigned int bus, devfn;
u16 _unused;
config_space = SABRE_CONFIG_BASE(config_space);
bus = (phys_hi >> 16) & 0xff;
devfn = (phys_hi >> 8) & 0xff;
config_space |= SABRE_CONFIG_ENCODE(bus, devfn, 0x00);
__asm__ __volatile__("membar #Sync\n\t"
"lduha [%1] %2, %0\n\t"
"membar #Sync"
: "=r" (_unused)
: "r" ((u16 *) config_space),
"i" (ASI_PHYS_BYPASS_EC_E_L)
: "memory");
sabre_read(sync_reg);
}
#define SABRE_IMAP_A_SLOT0 0x0c00UL
#define SABRE_IMAP_B_SLOT0 0x0c20UL
#define SABRE_ICLR_A_SLOT0 0x1400UL
#define SABRE_ICLR_B_SLOT0 0x1480UL
#define SABRE_ICLR_SCSI 0x1800UL
#define SABRE_ICLR_ETH 0x1808UL
#define SABRE_ICLR_BPP 0x1810UL
#define SABRE_ICLR_AU_REC 0x1818UL
#define SABRE_ICLR_AU_PLAY 0x1820UL
#define SABRE_ICLR_PFAIL 0x1828UL
#define SABRE_ICLR_KMS 0x1830UL
#define SABRE_ICLR_FLPY 0x1838UL
#define SABRE_ICLR_SHW 0x1840UL
#define SABRE_ICLR_KBD 0x1848UL
#define SABRE_ICLR_MS 0x1850UL
#define SABRE_ICLR_SER 0x1858UL
#define SABRE_ICLR_UE 0x1870UL
#define SABRE_ICLR_CE 0x1878UL
#define SABRE_ICLR_PCIERR 0x1880UL
static unsigned long sabre_pcislot_imap_offset(unsigned long ino)
{
unsigned int bus = (ino & 0x10) >> 4;
unsigned int slot = (ino & 0x0c) >> 2;
if (bus == 0)
return SABRE_IMAP_A_SLOT0 + (slot * 8);
else
return SABRE_IMAP_B_SLOT0 + (slot * 8);
}
#define SABRE_OBIO_IMAP_BASE 0x1000UL
#define SABRE_ONBOARD_IRQ_BASE 0x20
#define sabre_onboard_imap_offset(__ino) \
(SABRE_OBIO_IMAP_BASE + (((__ino) & 0x1f) << 3))
#define sabre_iclr_offset(ino) \
((ino & 0x20) ? (SABRE_ICLR_SCSI + (((ino) & 0x1f) << 3)) : \
(SABRE_ICLR_A_SLOT0 + (((ino) & 0x1f)<<3)))
static int sabre_device_needs_wsync(struct device_node *dp)
{
struct device_node *parent = dp->parent;
const char *parent_model, *parent_compat;
/* This traversal up towards the root is meant to
* handle two cases:
*
* 1) non-PCI bus sitting under PCI, such as 'ebus'
* 2) the PCI controller interrupts themselves, which
* will use the sabre_irq_build but do not need
* the DMA synchronization handling
*/
while (parent) {
if (of_node_is_type(parent, "pci"))
break;
parent = parent->parent;
}
if (!parent)
return 0;
parent_model = of_get_property(parent,
"model", NULL);
if (parent_model &&
(!strcmp(parent_model, "SUNW,sabre") ||
!strcmp(parent_model, "SUNW,simba")))
return 0;
parent_compat = of_get_property(parent,
"compatible", NULL);
if (parent_compat &&
(!strcmp(parent_compat, "pci108e,a000") ||
!strcmp(parent_compat, "pci108e,a001")))
return 0;
return 1;
}
static unsigned int sabre_irq_build(struct device_node *dp,
unsigned int ino,
void *_data)
{
struct sabre_irq_data *irq_data = _data;
unsigned long controller_regs = irq_data->controller_regs;
const struct linux_prom_pci_registers *regs;
unsigned long imap, iclr;
unsigned long imap_off, iclr_off;
int inofixup = 0;
int irq;
ino &= 0x3f;
if (ino < SABRE_ONBOARD_IRQ_BASE) {
/* PCI slot */
imap_off = sabre_pcislot_imap_offset(ino);
} else {
/* onboard device */
imap_off = sabre_onboard_imap_offset(ino);
}
/* Now build the IRQ bucket. */
imap = controller_regs + imap_off;
iclr_off = sabre_iclr_offset(ino);
iclr = controller_regs + iclr_off;
if ((ino & 0x20) == 0)
inofixup = ino & 0x03;
irq = build_irq(inofixup, iclr, imap);
/* If the parent device is a PCI<->PCI bridge other than
* APB, we have to install a pre-handler to ensure that
* all pending DMA is drained before the interrupt handler
* is run.
*/
regs = of_get_property(dp, "reg", NULL);
if (regs && sabre_device_needs_wsync(dp)) {
irq_install_pre_handler(irq,
sabre_wsync_handler,
(void *) (long) regs->phys_hi,
(void *) irq_data);
}
return irq;
}
static void __init sabre_irq_trans_init(struct device_node *dp)
{
const struct linux_prom64_registers *regs;
struct sabre_irq_data *irq_data;
const u32 *busrange;
dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller));
dp->irq_trans->irq_build = sabre_irq_build;
irq_data = prom_early_alloc(sizeof(struct sabre_irq_data));
regs = of_get_property(dp, "reg", NULL);
irq_data->controller_regs = regs[0].phys_addr;
busrange = of_get_property(dp, "bus-range", NULL);
irq_data->pci_first_busno = busrange[0];
dp->irq_trans->data = irq_data;
}
/* SCHIZO interrupt mapping support. Unlike Psycho, for this controller the
* imap/iclr registers are per-PBM.
*/
#define SCHIZO_IMAP_BASE 0x1000UL
#define SCHIZO_ICLR_BASE 0x1400UL
static unsigned long schizo_imap_offset(unsigned long ino)
{
return SCHIZO_IMAP_BASE + (ino * 8UL);
}
static unsigned long schizo_iclr_offset(unsigned long ino)
{
return SCHIZO_ICLR_BASE + (ino * 8UL);
}
static unsigned long schizo_ino_to_iclr(unsigned long pbm_regs,
unsigned int ino)
{
return pbm_regs + schizo_iclr_offset(ino);
}
static unsigned long schizo_ino_to_imap(unsigned long pbm_regs,
unsigned int ino)
{
return pbm_regs + schizo_imap_offset(ino);
}
#define schizo_read(__reg) \
({ u64 __ret; \
__asm__ __volatile__("ldxa [%1] %2, %0" \
: "=r" (__ret) \
: "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
: "memory"); \
__ret; \
})
#define schizo_write(__reg, __val) \
__asm__ __volatile__("stxa %0, [%1] %2" \
: /* no outputs */ \
: "r" (__val), "r" (__reg), \
"i" (ASI_PHYS_BYPASS_EC_E) \
: "memory")
static void tomatillo_wsync_handler(unsigned int ino, void *_arg1, void *_arg2)
{
unsigned long sync_reg = (unsigned long) _arg2;
u64 mask = 1UL << (ino & IMAP_INO);
u64 val;
int limit;
schizo_write(sync_reg, mask);
limit = 100000;
val = 0;
while (--limit) {
val = schizo_read(sync_reg);
if (!(val & mask))
break;
}
if (limit <= 0) {
printk("tomatillo_wsync_handler: DMA won't sync [%llx:%llx]\n",
val, mask);
}
if (_arg1) {
static unsigned char cacheline[64]
__attribute__ ((aligned (64)));
__asm__ __volatile__("rd %%fprs, %0\n\t"
"or %0, %4, %1\n\t"
"wr %1, 0x0, %%fprs\n\t"
"stda %%f0, [%5] %6\n\t"
"wr %0, 0x0, %%fprs\n\t"
"membar #Sync"
: "=&r" (mask), "=&r" (val)
: "0" (mask), "1" (val),
"i" (FPRS_FEF), "r" (&cacheline[0]),
"i" (ASI_BLK_COMMIT_P));
}
}
struct schizo_irq_data {
unsigned long pbm_regs;
unsigned long sync_reg;
u32 portid;
int chip_version;
};
static unsigned int schizo_irq_build(struct device_node *dp,
unsigned int ino,
void *_data)
{
struct schizo_irq_data *irq_data = _data;
unsigned long pbm_regs = irq_data->pbm_regs;
unsigned long imap, iclr;
int ign_fixup;
int irq;
int is_tomatillo;
ino &= 0x3f;
/* Now build the IRQ bucket. */
imap = schizo_ino_to_imap(pbm_regs, ino);
iclr = schizo_ino_to_iclr(pbm_regs, ino);
/* On Schizo, no inofixup occurs. This is because each
* INO has it's own IMAP register. On Psycho and Sabre
* there is only one IMAP register for each PCI slot even
* though four different INOs can be generated by each
* PCI slot.
*
* But, for JBUS variants (essentially, Tomatillo), we have
* to fixup the lowest bit of the interrupt group number.
*/
ign_fixup = 0;
is_tomatillo = (irq_data->sync_reg != 0UL);
if (is_tomatillo) {
if (irq_data->portid & 1)
ign_fixup = (1 << 6);
}
irq = build_irq(ign_fixup, iclr, imap);
if (is_tomatillo) {
irq_install_pre_handler(irq,
tomatillo_wsync_handler,
((irq_data->chip_version <= 4) ?
(void *) 1 : (void *) 0),
(void *) irq_data->sync_reg);
}
return irq;
}
static void __init __schizo_irq_trans_init(struct device_node *dp,
int is_tomatillo)
{
const struct linux_prom64_registers *regs;
struct schizo_irq_data *irq_data;
dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller));
dp->irq_trans->irq_build = schizo_irq_build;
irq_data = prom_early_alloc(sizeof(struct schizo_irq_data));
regs = of_get_property(dp, "reg", NULL);
dp->irq_trans->data = irq_data;
irq_data->pbm_regs = regs[0].phys_addr;
if (is_tomatillo)
irq_data->sync_reg = regs[3].phys_addr + 0x1a18UL;
else
irq_data->sync_reg = 0UL;
irq_data->portid = of_getintprop_default(dp, "portid", 0);
irq_data->chip_version = of_getintprop_default(dp, "version#", 0);
}
static void __init schizo_irq_trans_init(struct device_node *dp)
{
__schizo_irq_trans_init(dp, 0);
}
static void __init tomatillo_irq_trans_init(struct device_node *dp)
{
__schizo_irq_trans_init(dp, 1);
}
static unsigned int pci_sun4v_irq_build(struct device_node *dp,
unsigned int devino,
void *_data)
{
u32 devhandle = (u32) (unsigned long) _data;
return sun4v_build_irq(devhandle, devino);
}
static void __init pci_sun4v_irq_trans_init(struct device_node *dp)
{
const struct linux_prom64_registers *regs;
dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller));
dp->irq_trans->irq_build = pci_sun4v_irq_build;
regs = of_get_property(dp, "reg", NULL);
dp->irq_trans->data = (void *) (unsigned long)
((regs->phys_addr >> 32UL) & 0x0fffffff);
}
struct fire_irq_data {
unsigned long pbm_regs;
u32 portid;
};
#define FIRE_IMAP_BASE 0x001000
#define FIRE_ICLR_BASE 0x001400
static unsigned long fire_imap_offset(unsigned long ino)
{
return FIRE_IMAP_BASE + (ino * 8UL);
}
static unsigned long fire_iclr_offset(unsigned long ino)
{
return FIRE_ICLR_BASE + (ino * 8UL);
}
static unsigned long fire_ino_to_iclr(unsigned long pbm_regs,
unsigned int ino)
{
return pbm_regs + fire_iclr_offset(ino);
}
static unsigned long fire_ino_to_imap(unsigned long pbm_regs,
unsigned int ino)
{
return pbm_regs + fire_imap_offset(ino);
}
static unsigned int fire_irq_build(struct device_node *dp,
unsigned int ino,
void *_data)
{
struct fire_irq_data *irq_data = _data;
unsigned long pbm_regs = irq_data->pbm_regs;
unsigned long imap, iclr;
unsigned long int_ctrlr;
ino &= 0x3f;
/* Now build the IRQ bucket. */
imap = fire_ino_to_imap(pbm_regs, ino);
iclr = fire_ino_to_iclr(pbm_regs, ino);
/* Set the interrupt controller number. */
int_ctrlr = 1 << 6;
upa_writeq(int_ctrlr, imap);
/* The interrupt map registers do not have an INO field
* like other chips do. They return zero in the INO
* field, and the interrupt controller number is controlled
* in bits 6 to 9. So in order for build_irq() to get
* the INO right we pass it in as part of the fixup
* which will get added to the map register zero value
* read by build_irq().
*/
ino |= (irq_data->portid << 6);
ino -= int_ctrlr;
return build_irq(ino, iclr, imap);
}
static void __init fire_irq_trans_init(struct device_node *dp)
{
const struct linux_prom64_registers *regs;
struct fire_irq_data *irq_data;
dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller));
dp->irq_trans->irq_build = fire_irq_build;
irq_data = prom_early_alloc(sizeof(struct fire_irq_data));
regs = of_get_property(dp, "reg", NULL);
dp->irq_trans->data = irq_data;
irq_data->pbm_regs = regs[0].phys_addr;
irq_data->portid = of_getintprop_default(dp, "portid", 0);
}
#endif /* CONFIG_PCI */
#ifdef CONFIG_SBUS
/* INO number to IMAP register offset for SYSIO external IRQ's.
* This should conform to both Sunfire/Wildfire server and Fusion
* desktop designs.
*/
#define SYSIO_IMAP_SLOT0 0x2c00UL
#define SYSIO_IMAP_SLOT1 0x2c08UL
#define SYSIO_IMAP_SLOT2 0x2c10UL
#define SYSIO_IMAP_SLOT3 0x2c18UL
#define SYSIO_IMAP_SCSI 0x3000UL
#define SYSIO_IMAP_ETH 0x3008UL
#define SYSIO_IMAP_BPP 0x3010UL
#define SYSIO_IMAP_AUDIO 0x3018UL
#define SYSIO_IMAP_PFAIL 0x3020UL
#define SYSIO_IMAP_KMS 0x3028UL
#define SYSIO_IMAP_FLPY 0x3030UL
#define SYSIO_IMAP_SHW 0x3038UL
#define SYSIO_IMAP_KBD 0x3040UL
#define SYSIO_IMAP_MS 0x3048UL
#define SYSIO_IMAP_SER 0x3050UL
#define SYSIO_IMAP_TIM0 0x3060UL
#define SYSIO_IMAP_TIM1 0x3068UL
#define SYSIO_IMAP_UE 0x3070UL
#define SYSIO_IMAP_CE 0x3078UL
#define SYSIO_IMAP_SBERR 0x3080UL
#define SYSIO_IMAP_PMGMT 0x3088UL
#define SYSIO_IMAP_GFX 0x3090UL
#define SYSIO_IMAP_EUPA 0x3098UL
#define bogon ((unsigned long) -1)
static unsigned long sysio_irq_offsets[] = {
/* SBUS Slot 0 --> 3, level 1 --> 7 */
SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0,
SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0,
SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1,
SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1,
SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2,
SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2,
SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3,
SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3,
/* Onboard devices (not relevant/used on SunFire). */
SYSIO_IMAP_SCSI,
SYSIO_IMAP_ETH,
SYSIO_IMAP_BPP,
bogon,
SYSIO_IMAP_AUDIO,
SYSIO_IMAP_PFAIL,
bogon,
bogon,
SYSIO_IMAP_KMS,
SYSIO_IMAP_FLPY,
SYSIO_IMAP_SHW,
SYSIO_IMAP_KBD,
SYSIO_IMAP_MS,
SYSIO_IMAP_SER,
bogon,
bogon,
SYSIO_IMAP_TIM0,
SYSIO_IMAP_TIM1,
bogon,
bogon,
SYSIO_IMAP_UE,
SYSIO_IMAP_CE,
SYSIO_IMAP_SBERR,
SYSIO_IMAP_PMGMT,
SYSIO_IMAP_GFX,
SYSIO_IMAP_EUPA,
};
#undef bogon
#define NUM_SYSIO_OFFSETS ARRAY_SIZE(sysio_irq_offsets)
/* Convert Interrupt Mapping register pointer to associated
* Interrupt Clear register pointer, SYSIO specific version.
*/
#define SYSIO_ICLR_UNUSED0 0x3400UL
#define SYSIO_ICLR_SLOT0 0x3408UL
#define SYSIO_ICLR_SLOT1 0x3448UL
#define SYSIO_ICLR_SLOT2 0x3488UL
#define SYSIO_ICLR_SLOT3 0x34c8UL
static unsigned long sysio_imap_to_iclr(unsigned long imap)
{
unsigned long diff = SYSIO_ICLR_UNUSED0 - SYSIO_IMAP_SLOT0;
return imap + diff;
}
static unsigned int sbus_of_build_irq(struct device_node *dp,
unsigned int ino,
void *_data)
{
unsigned long reg_base = (unsigned long) _data;
const struct linux_prom_registers *regs;
unsigned long imap, iclr;
int sbus_slot = 0;
int sbus_level = 0;
ino &= 0x3f;
regs = of_get_property(dp, "reg", NULL);
if (regs)
sbus_slot = regs->which_io;
if (ino < 0x20)
ino += (sbus_slot * 8);
imap = sysio_irq_offsets[ino];
if (imap == ((unsigned long)-1)) {
prom_printf("get_irq_translations: Bad SYSIO INO[%x]\n",
ino);
prom_halt();
}
imap += reg_base;
/* SYSIO inconsistency. For external SLOTS, we have to select
* the right ICLR register based upon the lower SBUS irq level
* bits.
*/
if (ino >= 0x20) {
iclr = sysio_imap_to_iclr(imap);
} else {
sbus_level = ino & 0x7;
switch(sbus_slot) {
case 0:
iclr = reg_base + SYSIO_ICLR_SLOT0;
break;
case 1:
iclr = reg_base + SYSIO_ICLR_SLOT1;
break;
case 2:
iclr = reg_base + SYSIO_ICLR_SLOT2;
break;
default:
case 3:
iclr = reg_base + SYSIO_ICLR_SLOT3;
break;
}
iclr += ((unsigned long)sbus_level - 1UL) * 8UL;
}
return build_irq(sbus_level, iclr, imap);
}
static void __init sbus_irq_trans_init(struct device_node *dp)
{
const struct linux_prom64_registers *regs;
dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller));
dp->irq_trans->irq_build = sbus_of_build_irq;
regs = of_get_property(dp, "reg", NULL);
dp->irq_trans->data = (void *) (unsigned long) regs->phys_addr;
}
#endif /* CONFIG_SBUS */
static unsigned int central_build_irq(struct device_node *dp,
unsigned int ino,
void *_data)
{
struct device_node *central_dp = _data;
struct platform_device *central_op = of_find_device_by_node(central_dp);
struct resource *res;
unsigned long imap, iclr;
u32 tmp;
if (of_node_name_eq(dp, "eeprom")) {
res = ¢ral_op->resource[5];
} else if (of_node_name_eq(dp, "zs")) {
res = ¢ral_op->resource[4];
} else if (of_node_name_eq(dp, "clock-board")) {
res = ¢ral_op->resource[3];
} else {
return ino;
}
imap = res->start + 0x00UL;
iclr = res->start + 0x10UL;
/* Set the INO state to idle, and disable. */
upa_writel(0, iclr);
upa_readl(iclr);
tmp = upa_readl(imap);
tmp &= ~0x80000000;
upa_writel(tmp, imap);
return build_irq(0, iclr, imap);
}
static void __init central_irq_trans_init(struct device_node *dp)
{
dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller));
dp->irq_trans->irq_build = central_build_irq;
dp->irq_trans->data = dp;
}
struct irq_trans {
const char *name;
void (*init)(struct device_node *);
};
#ifdef CONFIG_PCI
static struct irq_trans __initdata pci_irq_trans_table[] = {
{ "SUNW,sabre", sabre_irq_trans_init },
{ "pci108e,a000", sabre_irq_trans_init },
{ "pci108e,a001", sabre_irq_trans_init },
{ "SUNW,psycho", psycho_irq_trans_init },
{ "pci108e,8000", psycho_irq_trans_init },
{ "SUNW,schizo", schizo_irq_trans_init },
{ "pci108e,8001", schizo_irq_trans_init },
{ "SUNW,schizo+", schizo_irq_trans_init },
{ "pci108e,8002", schizo_irq_trans_init },
{ "SUNW,tomatillo", tomatillo_irq_trans_init },
{ "pci108e,a801", tomatillo_irq_trans_init },
{ "SUNW,sun4v-pci", pci_sun4v_irq_trans_init },
{ "pciex108e,80f0", fire_irq_trans_init },
};
#endif
static unsigned int sun4v_vdev_irq_build(struct device_node *dp,
unsigned int devino,
void *_data)
{
u32 devhandle = (u32) (unsigned long) _data;
return sun4v_build_irq(devhandle, devino);
}
static void __init sun4v_vdev_irq_trans_init(struct device_node *dp)
{
const struct linux_prom64_registers *regs;
dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller));
dp->irq_trans->irq_build = sun4v_vdev_irq_build;
regs = of_get_property(dp, "reg", NULL);
dp->irq_trans->data = (void *) (unsigned long)
((regs->phys_addr >> 32UL) & 0x0fffffff);
}
void __init irq_trans_init(struct device_node *dp)
{
#ifdef CONFIG_PCI
const char *model;
int i;
#endif
#ifdef CONFIG_PCI
model = of_get_property(dp, "model", NULL);
if (!model)
model = of_get_property(dp, "compatible", NULL);
if (model) {
for (i = 0; i < ARRAY_SIZE(pci_irq_trans_table); i++) {
struct irq_trans *t = &pci_irq_trans_table[i];
if (!strcmp(model, t->name)) {
t->init(dp);
return;
}
}
}
#endif
#ifdef CONFIG_SBUS
if (of_node_name_eq(dp, "sbus") ||
of_node_name_eq(dp, "sbi")) {
sbus_irq_trans_init(dp);
return;
}
#endif
if (of_node_name_eq(dp, "fhc") &&
of_node_name_eq(dp->parent, "central")) {
central_irq_trans_init(dp);
return;
}
if (of_node_name_eq(dp, "virtual-devices") ||
of_node_name_eq(dp, "niu")) {
sun4v_vdev_irq_trans_init(dp);
return;
}
}
| linux-master | arch/sparc/kernel/prom_irqtrans.c |
// SPDX-License-Identifier: GPL-2.0
/* windows.c: Routines to deal with register window management
* at the C-code level.
*
* Copyright (C) 1995 David S. Miller ([email protected])
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <asm/cacheflush.h>
#include <linux/uaccess.h>
#include "kernel.h"
/* Do save's until all user register windows are out of the cpu. */
void flush_user_windows(void)
{
register int ctr asm("g5");
ctr = 0;
__asm__ __volatile__(
"\n1:\n\t"
"ld [%%g6 + %2], %%g4\n\t"
"orcc %%g0, %%g4, %%g0\n\t"
"add %0, 1, %0\n\t"
"bne 1b\n\t"
" save %%sp, -64, %%sp\n"
"2:\n\t"
"subcc %0, 1, %0\n\t"
"bne 2b\n\t"
" restore %%g0, %%g0, %%g0\n"
: "=&r" (ctr)
: "0" (ctr),
"i" ((const unsigned long)TI_UWINMASK)
: "g4", "cc");
}
static inline void shift_window_buffer(int first_win, int last_win, struct thread_info *tp)
{
int i;
for(i = first_win; i < last_win; i++) {
tp->rwbuf_stkptrs[i] = tp->rwbuf_stkptrs[i+1];
memcpy(&tp->reg_window[i], &tp->reg_window[i+1], sizeof(struct reg_window32));
}
}
/* Place as many of the user's current register windows
* on the stack that we can. Even if the %sp is unaligned
* we still copy the window there, the only case that we don't
* succeed is if the %sp points to a bum mapping altogether.
* setup_frame() and do_sigreturn() use this before shifting
* the user stack around. Future instruction and hardware
* bug workaround routines will need this functionality as
* well.
*/
void synchronize_user_stack(void)
{
struct thread_info *tp = current_thread_info();
int window;
flush_user_windows();
if(!tp->w_saved)
return;
/* Ok, there is some dirty work to do. */
for(window = tp->w_saved - 1; window >= 0; window--) {
unsigned long sp = tp->rwbuf_stkptrs[window];
/* Ok, let it rip. */
if (copy_to_user((char __user *) sp, &tp->reg_window[window],
sizeof(struct reg_window32)))
continue;
shift_window_buffer(window, tp->w_saved - 1, tp);
tp->w_saved--;
}
}
#if 0
/* An optimization. */
static inline void copy_aligned_window(void *dest, const void *src)
{
__asm__ __volatile__("ldd [%1], %%g2\n\t"
"ldd [%1 + 0x8], %%g4\n\t"
"std %%g2, [%0]\n\t"
"std %%g4, [%0 + 0x8]\n\t"
"ldd [%1 + 0x10], %%g2\n\t"
"ldd [%1 + 0x18], %%g4\n\t"
"std %%g2, [%0 + 0x10]\n\t"
"std %%g4, [%0 + 0x18]\n\t"
"ldd [%1 + 0x20], %%g2\n\t"
"ldd [%1 + 0x28], %%g4\n\t"
"std %%g2, [%0 + 0x20]\n\t"
"std %%g4, [%0 + 0x28]\n\t"
"ldd [%1 + 0x30], %%g2\n\t"
"ldd [%1 + 0x38], %%g4\n\t"
"std %%g2, [%0 + 0x30]\n\t"
"std %%g4, [%0 + 0x38]\n\t" : :
"r" (dest), "r" (src) :
"g2", "g3", "g4", "g5");
}
#endif
/* Try to push the windows in a threads window buffer to the
* user stack. Unaligned %sp's are not allowed here.
*/
void try_to_clear_window_buffer(struct pt_regs *regs, int who)
{
struct thread_info *tp = current_thread_info();
int window;
flush_user_windows();
for(window = 0; window < tp->w_saved; window++) {
unsigned long sp = tp->rwbuf_stkptrs[window];
if ((sp & 7) ||
copy_to_user((char __user *) sp, &tp->reg_window[window],
sizeof(struct reg_window32))) {
force_exit_sig(SIGILL);
return;
}
}
tp->w_saved = 0;
}
| linux-master | arch/sparc/kernel/windows.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/thread_info.h>
#include <linux/uaccess.h>
#include <linux/sched.h>
#include <asm/sigcontext.h>
#include <asm/fpumacro.h>
#include <asm/ptrace.h>
#include <asm/switch_to.h>
#include "sigutil.h"
int save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
{
int err = 0;
#ifdef CONFIG_SMP
if (test_tsk_thread_flag(current, TIF_USEDFPU)) {
put_psr(get_psr() | PSR_EF);
fpsave(¤t->thread.float_regs[0], ¤t->thread.fsr,
¤t->thread.fpqueue[0], ¤t->thread.fpqdepth);
regs->psr &= ~(PSR_EF);
clear_tsk_thread_flag(current, TIF_USEDFPU);
}
#else
if (current == last_task_used_math) {
put_psr(get_psr() | PSR_EF);
fpsave(¤t->thread.float_regs[0], ¤t->thread.fsr,
¤t->thread.fpqueue[0], ¤t->thread.fpqdepth);
last_task_used_math = NULL;
regs->psr &= ~(PSR_EF);
}
#endif
err |= __copy_to_user(&fpu->si_float_regs[0],
¤t->thread.float_regs[0],
(sizeof(unsigned long) * 32));
err |= __put_user(current->thread.fsr, &fpu->si_fsr);
err |= __put_user(current->thread.fpqdepth, &fpu->si_fpqdepth);
if (current->thread.fpqdepth != 0)
err |= __copy_to_user(&fpu->si_fpqueue[0],
¤t->thread.fpqueue[0],
((sizeof(unsigned long) +
(sizeof(unsigned long *)))*16));
clear_used_math();
return err;
}
int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
{
int err;
if (((unsigned long) fpu) & 3)
return -EFAULT;
#ifdef CONFIG_SMP
if (test_tsk_thread_flag(current, TIF_USEDFPU))
regs->psr &= ~PSR_EF;
#else
if (current == last_task_used_math) {
last_task_used_math = NULL;
regs->psr &= ~PSR_EF;
}
#endif
set_used_math();
clear_tsk_thread_flag(current, TIF_USEDFPU);
if (!access_ok(fpu, sizeof(*fpu)))
return -EFAULT;
err = __copy_from_user(¤t->thread.float_regs[0], &fpu->si_float_regs[0],
(sizeof(unsigned long) * 32));
err |= __get_user(current->thread.fsr, &fpu->si_fsr);
err |= __get_user(current->thread.fpqdepth, &fpu->si_fpqdepth);
if (current->thread.fpqdepth != 0)
err |= __copy_from_user(¤t->thread.fpqueue[0],
&fpu->si_fpqueue[0],
((sizeof(unsigned long) +
(sizeof(unsigned long *)))*16));
return err;
}
int save_rwin_state(int wsaved, __siginfo_rwin_t __user *rwin)
{
int i, err = __put_user(wsaved, &rwin->wsaved);
for (i = 0; i < wsaved; i++) {
struct reg_window32 *rp;
unsigned long fp;
rp = ¤t_thread_info()->reg_window[i];
fp = current_thread_info()->rwbuf_stkptrs[i];
err |= copy_to_user(&rwin->reg_window[i], rp,
sizeof(struct reg_window32));
err |= __put_user(fp, &rwin->rwbuf_stkptrs[i]);
}
return err;
}
int restore_rwin_state(__siginfo_rwin_t __user *rp)
{
struct thread_info *t = current_thread_info();
int i, wsaved, err;
if (((unsigned long) rp) & 3)
return -EFAULT;
get_user(wsaved, &rp->wsaved);
if (wsaved > NSWINS)
return -EFAULT;
err = 0;
for (i = 0; i < wsaved; i++) {
err |= copy_from_user(&t->reg_window[i],
&rp->reg_window[i],
sizeof(struct reg_window32));
err |= __get_user(t->rwbuf_stkptrs[i],
&rp->rwbuf_stkptrs[i]);
}
if (err)
return err;
t->w_saved = wsaved;
synchronize_user_stack();
if (t->w_saved)
return -EFAULT;
return 0;
}
| linux-master | arch/sparc/kernel/sigutil_32.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SS1000/SC2000 interrupt handling.
*
* Copyright (C) 1997,1998 Jakub Jelinek ([email protected])
* Heavily based on arch/sparc/kernel/irq.c.
*/
#include <linux/kernel_stat.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
#include <asm/timer.h>
#include <asm/traps.h>
#include <asm/irq.h>
#include <asm/io.h>
#include <asm/sbi.h>
#include <asm/cacheflush.h>
#include <asm/setup.h>
#include <asm/oplib.h>
#include "kernel.h"
#include "irq.h"
/* Sun4d interrupts fall roughly into two categories. SBUS and
* cpu local. CPU local interrupts cover the timer interrupts
* and whatnot, and we encode those as normal PILs between
* 0 and 15.
* SBUS interrupts are encodes as a combination of board, level and slot.
*/
struct sun4d_handler_data {
unsigned int cpuid; /* target cpu */
unsigned int real_irq; /* interrupt level */
};
static unsigned int sun4d_encode_irq(int board, int lvl, int slot)
{
return (board + 1) << 5 | (lvl << 2) | slot;
}
struct sun4d_timer_regs {
u32 l10_timer_limit;
u32 l10_cur_countx;
u32 l10_limit_noclear;
u32 ctrl;
u32 l10_cur_count;
};
static struct sun4d_timer_regs __iomem *sun4d_timers;
#define SUN4D_TIMER_IRQ 10
/* Specify which cpu handle interrupts from which board.
* Index is board - value is cpu.
*/
static unsigned char board_to_cpu[32];
static int pil_to_sbus[] = {
0,
0,
1,
2,
0,
3,
0,
4,
0,
5,
0,
6,
0,
7,
0,
0,
};
/* Exported for sun4d_smp.c */
DEFINE_SPINLOCK(sun4d_imsk_lock);
/* SBUS interrupts are encoded integers including the board number
* (plus one), the SBUS level, and the SBUS slot number. Sun4D
* IRQ dispatch is done by:
*
* 1) Reading the BW local interrupt table in order to get the bus
* interrupt mask.
*
* This table is indexed by SBUS interrupt level which can be
* derived from the PIL we got interrupted on.
*
* 2) For each bus showing interrupt pending from #1, read the
* SBI interrupt state register. This will indicate which slots
* have interrupts pending for that SBUS interrupt level.
*
* 3) Call the genreric IRQ support.
*/
static void sun4d_sbus_handler_irq(int sbusl)
{
unsigned int bus_mask;
unsigned int sbino, slot;
unsigned int sbil;
bus_mask = bw_get_intr_mask(sbusl) & 0x3ffff;
bw_clear_intr_mask(sbusl, bus_mask);
sbil = (sbusl << 2);
/* Loop for each pending SBI */
for (sbino = 0; bus_mask; sbino++, bus_mask >>= 1) {
unsigned int idx, mask;
if (!(bus_mask & 1))
continue;
/* XXX This seems to ACK the irq twice. acquire_sbi()
* XXX uses swap, therefore this writes 0xf << sbil,
* XXX then later release_sbi() will write the individual
* XXX bits which were set again.
*/
mask = acquire_sbi(SBI2DEVID(sbino), 0xf << sbil);
mask &= (0xf << sbil);
/* Loop for each pending SBI slot */
slot = (1 << sbil);
for (idx = 0; mask != 0; idx++, slot <<= 1) {
unsigned int pil;
struct irq_bucket *p;
if (!(mask & slot))
continue;
mask &= ~slot;
pil = sun4d_encode_irq(sbino, sbusl, idx);
p = irq_map[pil];
while (p) {
struct irq_bucket *next;
next = p->next;
generic_handle_irq(p->irq);
p = next;
}
release_sbi(SBI2DEVID(sbino), slot);
}
}
}
void sun4d_handler_irq(unsigned int pil, struct pt_regs *regs)
{
struct pt_regs *old_regs;
/* SBUS IRQ level (1 - 7) */
int sbusl = pil_to_sbus[pil];
/* FIXME: Is this necessary?? */
cc_get_ipen();
cc_set_iclr(1 << pil);
#ifdef CONFIG_SMP
/*
* Check IPI data structures after IRQ has been cleared. Hard and Soft
* IRQ can happen at the same time, so both cases are always handled.
*/
if (pil == SUN4D_IPI_IRQ)
sun4d_ipi_interrupt();
#endif
old_regs = set_irq_regs(regs);
irq_enter();
if (sbusl == 0) {
/* cpu interrupt */
struct irq_bucket *p;
p = irq_map[pil];
while (p) {
struct irq_bucket *next;
next = p->next;
generic_handle_irq(p->irq);
p = next;
}
} else {
/* SBUS interrupt */
sun4d_sbus_handler_irq(sbusl);
}
irq_exit();
set_irq_regs(old_regs);
}
static void sun4d_mask_irq(struct irq_data *data)
{
struct sun4d_handler_data *handler_data = irq_data_get_irq_handler_data(data);
unsigned int real_irq;
#ifdef CONFIG_SMP
int cpuid = handler_data->cpuid;
unsigned long flags;
#endif
real_irq = handler_data->real_irq;
#ifdef CONFIG_SMP
spin_lock_irqsave(&sun4d_imsk_lock, flags);
cc_set_imsk_other(cpuid, cc_get_imsk_other(cpuid) | (1 << real_irq));
spin_unlock_irqrestore(&sun4d_imsk_lock, flags);
#else
cc_set_imsk(cc_get_imsk() | (1 << real_irq));
#endif
}
static void sun4d_unmask_irq(struct irq_data *data)
{
struct sun4d_handler_data *handler_data = irq_data_get_irq_handler_data(data);
unsigned int real_irq;
#ifdef CONFIG_SMP
int cpuid = handler_data->cpuid;
unsigned long flags;
#endif
real_irq = handler_data->real_irq;
#ifdef CONFIG_SMP
spin_lock_irqsave(&sun4d_imsk_lock, flags);
cc_set_imsk_other(cpuid, cc_get_imsk_other(cpuid) & ~(1 << real_irq));
spin_unlock_irqrestore(&sun4d_imsk_lock, flags);
#else
cc_set_imsk(cc_get_imsk() & ~(1 << real_irq));
#endif
}
static unsigned int sun4d_startup_irq(struct irq_data *data)
{
irq_link(data->irq);
sun4d_unmask_irq(data);
return 0;
}
static void sun4d_shutdown_irq(struct irq_data *data)
{
sun4d_mask_irq(data);
irq_unlink(data->irq);
}
static struct irq_chip sun4d_irq = {
.name = "sun4d",
.irq_startup = sun4d_startup_irq,
.irq_shutdown = sun4d_shutdown_irq,
.irq_unmask = sun4d_unmask_irq,
.irq_mask = sun4d_mask_irq,
};
#ifdef CONFIG_SMP
/* Setup IRQ distribution scheme. */
void __init sun4d_distribute_irqs(void)
{
struct device_node *dp;
int cpuid = cpu_logical_map(1);
if (cpuid == -1)
cpuid = cpu_logical_map(0);
for_each_node_by_name(dp, "sbi") {
int devid = of_getintprop_default(dp, "device-id", 0);
int board = of_getintprop_default(dp, "board#", 0);
board_to_cpu[board] = cpuid;
set_sbi_tid(devid, cpuid << 3);
}
printk(KERN_ERR "All sbus IRQs directed to CPU%d\n", cpuid);
}
#endif
static void sun4d_clear_clock_irq(void)
{
sbus_readl(&sun4d_timers->l10_timer_limit);
}
static void sun4d_load_profile_irq(int cpu, unsigned int limit)
{
unsigned int value = limit ? timer_value(limit) : 0;
bw_set_prof_limit(cpu, value);
}
static void __init sun4d_load_profile_irqs(void)
{
int cpu = 0, mid;
while (!cpu_find_by_instance(cpu, NULL, &mid)) {
sun4d_load_profile_irq(mid >> 3, 0);
cpu++;
}
}
static unsigned int _sun4d_build_device_irq(unsigned int real_irq,
unsigned int pil,
unsigned int board)
{
struct sun4d_handler_data *handler_data;
unsigned int irq;
irq = irq_alloc(real_irq, pil);
if (irq == 0) {
prom_printf("IRQ: allocate for %d %d %d failed\n",
real_irq, pil, board);
goto err_out;
}
handler_data = irq_get_handler_data(irq);
if (unlikely(handler_data))
goto err_out;
handler_data = kzalloc(sizeof(struct sun4d_handler_data), GFP_ATOMIC);
if (unlikely(!handler_data)) {
prom_printf("IRQ: kzalloc(sun4d_handler_data) failed.\n");
prom_halt();
}
handler_data->cpuid = board_to_cpu[board];
handler_data->real_irq = real_irq;
irq_set_chip_and_handler_name(irq, &sun4d_irq,
handle_level_irq, "level");
irq_set_handler_data(irq, handler_data);
err_out:
return irq;
}
static unsigned int sun4d_build_device_irq(struct platform_device *op,
unsigned int real_irq)
{
struct device_node *dp = op->dev.of_node;
struct device_node *board_parent, *bus = dp->parent;
char *bus_connection;
const struct linux_prom_registers *regs;
unsigned int pil;
unsigned int irq;
int board, slot;
int sbusl;
irq = real_irq;
while (bus) {
if (of_node_name_eq(bus, "sbi")) {
bus_connection = "io-unit";
break;
}
if (of_node_name_eq(bus, "bootbus")) {
bus_connection = "cpu-unit";
break;
}
bus = bus->parent;
}
if (!bus)
goto err_out;
regs = of_get_property(dp, "reg", NULL);
if (!regs)
goto err_out;
slot = regs->which_io;
/*
* If Bus nodes parent is not io-unit/cpu-unit or the io-unit/cpu-unit
* lacks a "board#" property, something is very wrong.
*/
if (!of_node_name_eq(bus->parent, bus_connection)) {
printk(KERN_ERR "%pOF: Error, parent is not %s.\n",
bus, bus_connection);
goto err_out;
}
board_parent = bus->parent;
board = of_getintprop_default(board_parent, "board#", -1);
if (board == -1) {
printk(KERN_ERR "%pOF: Error, lacks board# property.\n",
board_parent);
goto err_out;
}
sbusl = pil_to_sbus[real_irq];
if (sbusl)
pil = sun4d_encode_irq(board, sbusl, slot);
else
pil = real_irq;
irq = _sun4d_build_device_irq(real_irq, pil, board);
err_out:
return irq;
}
static unsigned int sun4d_build_timer_irq(unsigned int board,
unsigned int real_irq)
{
return _sun4d_build_device_irq(real_irq, real_irq, board);
}
static void __init sun4d_fixup_trap_table(void)
{
#ifdef CONFIG_SMP
unsigned long flags;
struct tt_entry *trap_table = &sparc_ttable[SP_TRAP_IRQ1 + (14 - 1)];
/* Adjust so that we jump directly to smp4d_ticker */
lvl14_save[2] += smp4d_ticker - real_irq_entry;
/* For SMP we use the level 14 ticker, however the bootup code
* has copied the firmware's level 14 vector into the boot cpu's
* trap table, we must fix this now or we get squashed.
*/
local_irq_save(flags);
patchme_maybe_smp_msg[0] = 0x01000000; /* NOP out the branch */
trap_table->inst_one = lvl14_save[0];
trap_table->inst_two = lvl14_save[1];
trap_table->inst_three = lvl14_save[2];
trap_table->inst_four = lvl14_save[3];
local_ops->cache_all();
local_irq_restore(flags);
#endif
}
static void __init sun4d_init_timers(void)
{
struct device_node *dp;
struct resource res;
unsigned int irq;
const u32 *reg;
int err;
int board;
dp = of_find_node_by_name(NULL, "cpu-unit");
if (!dp) {
prom_printf("sun4d_init_timers: Unable to find cpu-unit\n");
prom_halt();
}
/* Which cpu-unit we use is arbitrary, we can view the bootbus timer
* registers via any cpu's mapping. The first 'reg' property is the
* bootbus.
*/
reg = of_get_property(dp, "reg", NULL);
if (!reg) {
prom_printf("sun4d_init_timers: No reg property\n");
prom_halt();
}
board = of_getintprop_default(dp, "board#", -1);
if (board == -1) {
prom_printf("sun4d_init_timers: No board# property on cpu-unit\n");
prom_halt();
}
of_node_put(dp);
res.start = reg[1];
res.end = reg[2] - 1;
res.flags = reg[0] & 0xff;
sun4d_timers = of_ioremap(&res, BW_TIMER_LIMIT,
sizeof(struct sun4d_timer_regs), "user timer");
if (!sun4d_timers) {
prom_printf("sun4d_init_timers: Can't map timer regs\n");
prom_halt();
}
#ifdef CONFIG_SMP
sparc_config.cs_period = SBUS_CLOCK_RATE * 2; /* 2 seconds */
#else
sparc_config.cs_period = SBUS_CLOCK_RATE / HZ; /* 1/HZ sec */
sparc_config.features |= FEAT_L10_CLOCKEVENT;
#endif
sparc_config.features |= FEAT_L10_CLOCKSOURCE;
sbus_writel(timer_value(sparc_config.cs_period),
&sun4d_timers->l10_timer_limit);
master_l10_counter = &sun4d_timers->l10_cur_count;
irq = sun4d_build_timer_irq(board, SUN4D_TIMER_IRQ);
err = request_irq(irq, timer_interrupt, IRQF_TIMER, "timer", NULL);
if (err) {
prom_printf("sun4d_init_timers: request_irq() failed with %d\n",
err);
prom_halt();
}
sun4d_load_profile_irqs();
sun4d_fixup_trap_table();
}
void __init sun4d_init_sbi_irq(void)
{
struct device_node *dp;
int target_cpu;
target_cpu = boot_cpu_id;
for_each_node_by_name(dp, "sbi") {
int devid = of_getintprop_default(dp, "device-id", 0);
int board = of_getintprop_default(dp, "board#", 0);
unsigned int mask;
set_sbi_tid(devid, target_cpu << 3);
board_to_cpu[board] = target_cpu;
/* Get rid of pending irqs from PROM */
mask = acquire_sbi(devid, 0xffffffff);
if (mask) {
printk(KERN_ERR "Clearing pending IRQs %08x on SBI %d\n",
mask, board);
release_sbi(devid, mask);
}
}
}
void __init sun4d_init_IRQ(void)
{
local_irq_disable();
sparc_config.init_timers = sun4d_init_timers;
sparc_config.build_device_irq = sun4d_build_device_irq;
sparc_config.clock_rate = SBUS_CLOCK_RATE;
sparc_config.clear_clock_irq = sun4d_clear_clock_irq;
sparc_config.load_profile_irq = sun4d_load_profile_irq;
/* Cannot enable interrupts until OBP ticker is disabled. */
}
| linux-master | arch/sparc/kernel/sun4d_irq.c |
// SPDX-License-Identifier: GPL-2.0
/* ebus.c: EBUS DMA library code.
*
* Copyright (C) 1997 Eddie C. Dost ([email protected])
* Copyright (C) 1999 David S. Miller ([email protected])
*/
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <asm/ebus_dma.h>
#include <asm/io.h>
#define EBDMA_CSR 0x00UL /* Control/Status */
#define EBDMA_ADDR 0x04UL /* DMA Address */
#define EBDMA_COUNT 0x08UL /* DMA Count */
#define EBDMA_CSR_INT_PEND 0x00000001
#define EBDMA_CSR_ERR_PEND 0x00000002
#define EBDMA_CSR_DRAIN 0x00000004
#define EBDMA_CSR_INT_EN 0x00000010
#define EBDMA_CSR_RESET 0x00000080
#define EBDMA_CSR_WRITE 0x00000100
#define EBDMA_CSR_EN_DMA 0x00000200
#define EBDMA_CSR_CYC_PEND 0x00000400
#define EBDMA_CSR_DIAG_RD_DONE 0x00000800
#define EBDMA_CSR_DIAG_WR_DONE 0x00001000
#define EBDMA_CSR_EN_CNT 0x00002000
#define EBDMA_CSR_TC 0x00004000
#define EBDMA_CSR_DIS_CSR_DRN 0x00010000
#define EBDMA_CSR_BURST_SZ_MASK 0x000c0000
#define EBDMA_CSR_BURST_SZ_1 0x00080000
#define EBDMA_CSR_BURST_SZ_4 0x00000000
#define EBDMA_CSR_BURST_SZ_8 0x00040000
#define EBDMA_CSR_BURST_SZ_16 0x000c0000
#define EBDMA_CSR_DIAG_EN 0x00100000
#define EBDMA_CSR_DIS_ERR_PEND 0x00400000
#define EBDMA_CSR_TCI_DIS 0x00800000
#define EBDMA_CSR_EN_NEXT 0x01000000
#define EBDMA_CSR_DMA_ON 0x02000000
#define EBDMA_CSR_A_LOADED 0x04000000
#define EBDMA_CSR_NA_LOADED 0x08000000
#define EBDMA_CSR_DEV_ID_MASK 0xf0000000
#define EBUS_DMA_RESET_TIMEOUT 10000
static void __ebus_dma_reset(struct ebus_dma_info *p, int no_drain)
{
int i;
u32 val = 0;
writel(EBDMA_CSR_RESET, p->regs + EBDMA_CSR);
udelay(1);
if (no_drain)
return;
for (i = EBUS_DMA_RESET_TIMEOUT; i > 0; i--) {
val = readl(p->regs + EBDMA_CSR);
if (!(val & (EBDMA_CSR_DRAIN | EBDMA_CSR_CYC_PEND)))
break;
udelay(10);
}
}
static irqreturn_t ebus_dma_irq(int irq, void *dev_id)
{
struct ebus_dma_info *p = dev_id;
unsigned long flags;
u32 csr = 0;
spin_lock_irqsave(&p->lock, flags);
csr = readl(p->regs + EBDMA_CSR);
writel(csr, p->regs + EBDMA_CSR);
spin_unlock_irqrestore(&p->lock, flags);
if (csr & EBDMA_CSR_ERR_PEND) {
printk(KERN_CRIT "ebus_dma(%s): DMA error!\n", p->name);
p->callback(p, EBUS_DMA_EVENT_ERROR, p->client_cookie);
return IRQ_HANDLED;
} else if (csr & EBDMA_CSR_INT_PEND) {
p->callback(p,
(csr & EBDMA_CSR_TC) ?
EBUS_DMA_EVENT_DMA : EBUS_DMA_EVENT_DEVICE,
p->client_cookie);
return IRQ_HANDLED;
}
return IRQ_NONE;
}
int ebus_dma_register(struct ebus_dma_info *p)
{
u32 csr;
if (!p->regs)
return -EINVAL;
if (p->flags & ~(EBUS_DMA_FLAG_USE_EBDMA_HANDLER |
EBUS_DMA_FLAG_TCI_DISABLE))
return -EINVAL;
if ((p->flags & EBUS_DMA_FLAG_USE_EBDMA_HANDLER) && !p->callback)
return -EINVAL;
if (!strlen(p->name))
return -EINVAL;
__ebus_dma_reset(p, 1);
csr = EBDMA_CSR_BURST_SZ_16 | EBDMA_CSR_EN_CNT;
if (p->flags & EBUS_DMA_FLAG_TCI_DISABLE)
csr |= EBDMA_CSR_TCI_DIS;
writel(csr, p->regs + EBDMA_CSR);
return 0;
}
EXPORT_SYMBOL(ebus_dma_register);
int ebus_dma_irq_enable(struct ebus_dma_info *p, int on)
{
unsigned long flags;
u32 csr;
if (on) {
if (p->flags & EBUS_DMA_FLAG_USE_EBDMA_HANDLER) {
if (request_irq(p->irq, ebus_dma_irq, IRQF_SHARED, p->name, p))
return -EBUSY;
}
spin_lock_irqsave(&p->lock, flags);
csr = readl(p->regs + EBDMA_CSR);
csr |= EBDMA_CSR_INT_EN;
writel(csr, p->regs + EBDMA_CSR);
spin_unlock_irqrestore(&p->lock, flags);
} else {
spin_lock_irqsave(&p->lock, flags);
csr = readl(p->regs + EBDMA_CSR);
csr &= ~EBDMA_CSR_INT_EN;
writel(csr, p->regs + EBDMA_CSR);
spin_unlock_irqrestore(&p->lock, flags);
if (p->flags & EBUS_DMA_FLAG_USE_EBDMA_HANDLER) {
free_irq(p->irq, p);
}
}
return 0;
}
EXPORT_SYMBOL(ebus_dma_irq_enable);
void ebus_dma_unregister(struct ebus_dma_info *p)
{
unsigned long flags;
u32 csr;
int irq_on = 0;
spin_lock_irqsave(&p->lock, flags);
csr = readl(p->regs + EBDMA_CSR);
if (csr & EBDMA_CSR_INT_EN) {
csr &= ~EBDMA_CSR_INT_EN;
writel(csr, p->regs + EBDMA_CSR);
irq_on = 1;
}
spin_unlock_irqrestore(&p->lock, flags);
if (irq_on)
free_irq(p->irq, p);
}
EXPORT_SYMBOL(ebus_dma_unregister);
int ebus_dma_request(struct ebus_dma_info *p, dma_addr_t bus_addr, size_t len)
{
unsigned long flags;
u32 csr;
int err;
if (len >= (1 << 24))
return -EINVAL;
spin_lock_irqsave(&p->lock, flags);
csr = readl(p->regs + EBDMA_CSR);
err = -EINVAL;
if (!(csr & EBDMA_CSR_EN_DMA))
goto out;
err = -EBUSY;
if (csr & EBDMA_CSR_NA_LOADED)
goto out;
writel(len, p->regs + EBDMA_COUNT);
writel(bus_addr, p->regs + EBDMA_ADDR);
err = 0;
out:
spin_unlock_irqrestore(&p->lock, flags);
return err;
}
EXPORT_SYMBOL(ebus_dma_request);
void ebus_dma_prepare(struct ebus_dma_info *p, int write)
{
unsigned long flags;
u32 csr;
spin_lock_irqsave(&p->lock, flags);
__ebus_dma_reset(p, 0);
csr = (EBDMA_CSR_INT_EN |
EBDMA_CSR_EN_CNT |
EBDMA_CSR_BURST_SZ_16 |
EBDMA_CSR_EN_NEXT);
if (write)
csr |= EBDMA_CSR_WRITE;
if (p->flags & EBUS_DMA_FLAG_TCI_DISABLE)
csr |= EBDMA_CSR_TCI_DIS;
writel(csr, p->regs + EBDMA_CSR);
spin_unlock_irqrestore(&p->lock, flags);
}
EXPORT_SYMBOL(ebus_dma_prepare);
unsigned int ebus_dma_residue(struct ebus_dma_info *p)
{
return readl(p->regs + EBDMA_COUNT);
}
EXPORT_SYMBOL(ebus_dma_residue);
unsigned int ebus_dma_addr(struct ebus_dma_info *p)
{
return readl(p->regs + EBDMA_ADDR);
}
EXPORT_SYMBOL(ebus_dma_addr);
void ebus_dma_enable(struct ebus_dma_info *p, int on)
{
unsigned long flags;
u32 orig_csr, csr;
spin_lock_irqsave(&p->lock, flags);
orig_csr = csr = readl(p->regs + EBDMA_CSR);
if (on)
csr |= EBDMA_CSR_EN_DMA;
else
csr &= ~EBDMA_CSR_EN_DMA;
if ((orig_csr & EBDMA_CSR_EN_DMA) !=
(csr & EBDMA_CSR_EN_DMA))
writel(csr, p->regs + EBDMA_CSR);
spin_unlock_irqrestore(&p->lock, flags);
}
EXPORT_SYMBOL(ebus_dma_enable);
| linux-master | arch/sparc/kernel/ebus.c |
// SPDX-License-Identifier: GPL-2.0
/* smp.c: Sparc64 SMP support.
*
* Copyright (C) 1997, 2007, 2008 David S. Miller ([email protected])
*/
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/sched/mm.h>
#include <linux/sched/hotplug.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/threads.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/fs.h>
#include <linux/seq_file.h>
#include <linux/cache.h>
#include <linux/jiffies.h>
#include <linux/profile.h>
#include <linux/memblock.h>
#include <linux/vmalloc.h>
#include <linux/ftrace.h>
#include <linux/cpu.h>
#include <linux/slab.h>
#include <linux/kgdb.h>
#include <asm/head.h>
#include <asm/ptrace.h>
#include <linux/atomic.h>
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
#include <asm/cpudata.h>
#include <asm/hvtramp.h>
#include <asm/io.h>
#include <asm/timer.h>
#include <asm/setup.h>
#include <asm/irq.h>
#include <asm/irq_regs.h>
#include <asm/page.h>
#include <asm/oplib.h>
#include <linux/uaccess.h>
#include <asm/starfire.h>
#include <asm/tlb.h>
#include <asm/pgalloc.h>
#include <asm/sections.h>
#include <asm/prom.h>
#include <asm/mdesc.h>
#include <asm/ldc.h>
#include <asm/hypervisor.h>
#include <asm/pcr.h>
#include "cpumap.h"
#include "kernel.h"
DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
{ [0 ... NR_CPUS-1] = CPU_MASK_NONE };
cpumask_t cpu_core_sib_map[NR_CPUS] __read_mostly = {
[0 ... NR_CPUS-1] = CPU_MASK_NONE };
cpumask_t cpu_core_sib_cache_map[NR_CPUS] __read_mostly = {
[0 ... NR_CPUS - 1] = CPU_MASK_NONE };
EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
EXPORT_SYMBOL(cpu_core_map);
EXPORT_SYMBOL(cpu_core_sib_map);
EXPORT_SYMBOL(cpu_core_sib_cache_map);
static cpumask_t smp_commenced_mask;
static DEFINE_PER_CPU(bool, poke);
static bool cpu_poke;
void smp_info(struct seq_file *m)
{
int i;
seq_printf(m, "State:\n");
for_each_online_cpu(i)
seq_printf(m, "CPU%d:\t\tonline\n", i);
}
void smp_bogo(struct seq_file *m)
{
int i;
for_each_online_cpu(i)
seq_printf(m,
"Cpu%dClkTck\t: %016lx\n",
i, cpu_data(i).clock_tick);
}
extern void setup_sparc64_timer(void);
static volatile unsigned long callin_flag = 0;
void smp_callin(void)
{
int cpuid = hard_smp_processor_id();
__local_per_cpu_offset = __per_cpu_offset(cpuid);
if (tlb_type == hypervisor)
sun4v_ktsb_register();
__flush_tlb_all();
setup_sparc64_timer();
if (cheetah_pcache_forced_on)
cheetah_enable_pcache();
callin_flag = 1;
__asm__ __volatile__("membar #Sync\n\t"
"flush %%g6" : : : "memory");
/* Clear this or we will die instantly when we
* schedule back to this idler...
*/
current_thread_info()->new_child = 0;
/* Attach to the address space of init_task. */
mmgrab(&init_mm);
current->active_mm = &init_mm;
/* inform the notifiers about the new cpu */
notify_cpu_starting(cpuid);
while (!cpumask_test_cpu(cpuid, &smp_commenced_mask))
rmb();
set_cpu_online(cpuid, true);
local_irq_enable();
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
}
void cpu_panic(void)
{
printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
panic("SMP bolixed\n");
}
/* This tick register synchronization scheme is taken entirely from
* the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit.
*
* The only change I've made is to rework it so that the master
* initiates the synchonization instead of the slave. -DaveM
*/
#define MASTER 0
#define SLAVE (SMP_CACHE_BYTES/sizeof(unsigned long))
#define NUM_ROUNDS 64 /* magic value */
#define NUM_ITERS 5 /* likewise */
static DEFINE_RAW_SPINLOCK(itc_sync_lock);
static unsigned long go[SLAVE + 1];
#define DEBUG_TICK_SYNC 0
static inline long get_delta (long *rt, long *master)
{
unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0;
unsigned long tcenter, t0, t1, tm;
unsigned long i;
for (i = 0; i < NUM_ITERS; i++) {
t0 = tick_ops->get_tick();
go[MASTER] = 1;
membar_safe("#StoreLoad");
while (!(tm = go[SLAVE]))
rmb();
go[SLAVE] = 0;
wmb();
t1 = tick_ops->get_tick();
if (t1 - t0 < best_t1 - best_t0)
best_t0 = t0, best_t1 = t1, best_tm = tm;
}
*rt = best_t1 - best_t0;
*master = best_tm - best_t0;
/* average best_t0 and best_t1 without overflow: */
tcenter = (best_t0/2 + best_t1/2);
if (best_t0 % 2 + best_t1 % 2 == 2)
tcenter++;
return tcenter - best_tm;
}
void smp_synchronize_tick_client(void)
{
long i, delta, adj, adjust_latency = 0, done = 0;
unsigned long flags, rt, master_time_stamp;
#if DEBUG_TICK_SYNC
struct {
long rt; /* roundtrip time */
long master; /* master's timestamp */
long diff; /* difference between midpoint and master's timestamp */
long lat; /* estimate of itc adjustment latency */
} t[NUM_ROUNDS];
#endif
go[MASTER] = 1;
while (go[MASTER])
rmb();
local_irq_save(flags);
{
for (i = 0; i < NUM_ROUNDS; i++) {
delta = get_delta(&rt, &master_time_stamp);
if (delta == 0)
done = 1; /* let's lock on to this... */
if (!done) {
if (i > 0) {
adjust_latency += -delta;
adj = -delta + adjust_latency/4;
} else
adj = -delta;
tick_ops->add_tick(adj);
}
#if DEBUG_TICK_SYNC
t[i].rt = rt;
t[i].master = master_time_stamp;
t[i].diff = delta;
t[i].lat = adjust_latency/4;
#endif
}
}
local_irq_restore(flags);
#if DEBUG_TICK_SYNC
for (i = 0; i < NUM_ROUNDS; i++)
printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
t[i].rt, t[i].master, t[i].diff, t[i].lat);
#endif
printk(KERN_INFO "CPU %d: synchronized TICK with master CPU "
"(last diff %ld cycles, maxerr %lu cycles)\n",
smp_processor_id(), delta, rt);
}
static void smp_start_sync_tick_client(int cpu);
static void smp_synchronize_one_tick(int cpu)
{
unsigned long flags, i;
go[MASTER] = 0;
smp_start_sync_tick_client(cpu);
/* wait for client to be ready */
while (!go[MASTER])
rmb();
/* now let the client proceed into his loop */
go[MASTER] = 0;
membar_safe("#StoreLoad");
raw_spin_lock_irqsave(&itc_sync_lock, flags);
{
for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) {
while (!go[MASTER])
rmb();
go[MASTER] = 0;
wmb();
go[SLAVE] = tick_ops->get_tick();
membar_safe("#StoreLoad");
}
}
raw_spin_unlock_irqrestore(&itc_sync_lock, flags);
}
#if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg,
void **descrp)
{
extern unsigned long sparc64_ttable_tl0;
extern unsigned long kern_locked_tte_data;
struct hvtramp_descr *hdesc;
unsigned long trampoline_ra;
struct trap_per_cpu *tb;
u64 tte_vaddr, tte_data;
unsigned long hv_err;
int i;
hdesc = kzalloc(sizeof(*hdesc) +
(sizeof(struct hvtramp_mapping) *
num_kernel_image_mappings - 1),
GFP_KERNEL);
if (!hdesc) {
printk(KERN_ERR "ldom_startcpu_cpuid: Cannot allocate "
"hvtramp_descr.\n");
return;
}
*descrp = hdesc;
hdesc->cpu = cpu;
hdesc->num_mappings = num_kernel_image_mappings;
tb = &trap_block[cpu];
hdesc->fault_info_va = (unsigned long) &tb->fault_info;
hdesc->fault_info_pa = kimage_addr_to_ra(&tb->fault_info);
hdesc->thread_reg = thread_reg;
tte_vaddr = (unsigned long) KERNBASE;
tte_data = kern_locked_tte_data;
for (i = 0; i < hdesc->num_mappings; i++) {
hdesc->maps[i].vaddr = tte_vaddr;
hdesc->maps[i].tte = tte_data;
tte_vaddr += 0x400000;
tte_data += 0x400000;
}
trampoline_ra = kimage_addr_to_ra(hv_cpu_startup);
hv_err = sun4v_cpu_start(cpu, trampoline_ra,
kimage_addr_to_ra(&sparc64_ttable_tl0),
__pa(hdesc));
if (hv_err)
printk(KERN_ERR "ldom_startcpu_cpuid: sun4v_cpu_start() "
"gives error %lu\n", hv_err);
}
#endif
extern unsigned long sparc64_cpu_startup;
/* The OBP cpu startup callback truncates the 3rd arg cookie to
* 32-bits (I think) so to be safe we have it read the pointer
* contained here so we work on >4GB machines. -DaveM
*/
static struct thread_info *cpu_new_thread = NULL;
static int smp_boot_one_cpu(unsigned int cpu, struct task_struct *idle)
{
unsigned long entry =
(unsigned long)(&sparc64_cpu_startup);
unsigned long cookie =
(unsigned long)(&cpu_new_thread);
void *descr = NULL;
int timeout, ret;
callin_flag = 0;
cpu_new_thread = task_thread_info(idle);
if (tlb_type == hypervisor) {
#if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
if (ldom_domaining_enabled)
ldom_startcpu_cpuid(cpu,
(unsigned long) cpu_new_thread,
&descr);
else
#endif
prom_startcpu_cpuid(cpu, entry, cookie);
} else {
struct device_node *dp = of_find_node_by_cpuid(cpu);
prom_startcpu(dp->phandle, entry, cookie);
}
for (timeout = 0; timeout < 50000; timeout++) {
if (callin_flag)
break;
udelay(100);
}
if (callin_flag) {
ret = 0;
} else {
printk("Processor %d is stuck.\n", cpu);
ret = -ENODEV;
}
cpu_new_thread = NULL;
kfree(descr);
return ret;
}
static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, unsigned long cpu)
{
u64 result, target;
int stuck, tmp;
if (this_is_starfire) {
/* map to real upaid */
cpu = (((cpu & 0x3c) << 1) |
((cpu & 0x40) >> 4) |
(cpu & 0x3));
}
target = (cpu << 14) | 0x70;
again:
/* Ok, this is the real Spitfire Errata #54.
* One must read back from a UDB internal register
* after writes to the UDB interrupt dispatch, but
* before the membar Sync for that write.
* So we use the high UDB control register (ASI 0x7f,
* ADDR 0x20) for the dummy read. -DaveM
*/
tmp = 0x40;
__asm__ __volatile__(
"wrpr %1, %2, %%pstate\n\t"
"stxa %4, [%0] %3\n\t"
"stxa %5, [%0+%8] %3\n\t"
"add %0, %8, %0\n\t"
"stxa %6, [%0+%8] %3\n\t"
"membar #Sync\n\t"
"stxa %%g0, [%7] %3\n\t"
"membar #Sync\n\t"
"mov 0x20, %%g1\n\t"
"ldxa [%%g1] 0x7f, %%g0\n\t"
"membar #Sync"
: "=r" (tmp)
: "r" (pstate), "i" (PSTATE_IE), "i" (ASI_INTR_W),
"r" (data0), "r" (data1), "r" (data2), "r" (target),
"r" (0x10), "0" (tmp)
: "g1");
/* NOTE: PSTATE_IE is still clear. */
stuck = 100000;
do {
__asm__ __volatile__("ldxa [%%g0] %1, %0"
: "=r" (result)
: "i" (ASI_INTR_DISPATCH_STAT));
if (result == 0) {
__asm__ __volatile__("wrpr %0, 0x0, %%pstate"
: : "r" (pstate));
return;
}
stuck -= 1;
if (stuck == 0)
break;
} while (result & 0x1);
__asm__ __volatile__("wrpr %0, 0x0, %%pstate"
: : "r" (pstate));
if (stuck == 0) {
printk("CPU[%d]: mondo stuckage result[%016llx]\n",
smp_processor_id(), result);
} else {
udelay(2);
goto again;
}
}
static void spitfire_xcall_deliver(struct trap_per_cpu *tb, int cnt)
{
u64 *mondo, data0, data1, data2;
u16 *cpu_list;
u64 pstate;
int i;
__asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
cpu_list = __va(tb->cpu_list_pa);
mondo = __va(tb->cpu_mondo_block_pa);
data0 = mondo[0];
data1 = mondo[1];
data2 = mondo[2];
for (i = 0; i < cnt; i++)
spitfire_xcall_helper(data0, data1, data2, pstate, cpu_list[i]);
}
/* Cheetah now allows to send the whole 64-bytes of data in the interrupt
* packet, but we have no use for that. However we do take advantage of
* the new pipelining feature (ie. dispatch to multiple cpus simultaneously).
*/
static void cheetah_xcall_deliver(struct trap_per_cpu *tb, int cnt)
{
int nack_busy_id, is_jbus, need_more;
u64 *mondo, pstate, ver, busy_mask;
u16 *cpu_list;
cpu_list = __va(tb->cpu_list_pa);
mondo = __va(tb->cpu_mondo_block_pa);
/* Unfortunately, someone at Sun had the brilliant idea to make the
* busy/nack fields hard-coded by ITID number for this Ultra-III
* derivative processor.
*/
__asm__ ("rdpr %%ver, %0" : "=r" (ver));
is_jbus = ((ver >> 32) == __JALAPENO_ID ||
(ver >> 32) == __SERRANO_ID);
__asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
retry:
need_more = 0;
__asm__ __volatile__("wrpr %0, %1, %%pstate\n\t"
: : "r" (pstate), "i" (PSTATE_IE));
/* Setup the dispatch data registers. */
__asm__ __volatile__("stxa %0, [%3] %6\n\t"
"stxa %1, [%4] %6\n\t"
"stxa %2, [%5] %6\n\t"
"membar #Sync\n\t"
: /* no outputs */
: "r" (mondo[0]), "r" (mondo[1]), "r" (mondo[2]),
"r" (0x40), "r" (0x50), "r" (0x60),
"i" (ASI_INTR_W));
nack_busy_id = 0;
busy_mask = 0;
{
int i;
for (i = 0; i < cnt; i++) {
u64 target, nr;
nr = cpu_list[i];
if (nr == 0xffff)
continue;
target = (nr << 14) | 0x70;
if (is_jbus) {
busy_mask |= (0x1UL << (nr * 2));
} else {
target |= (nack_busy_id << 24);
busy_mask |= (0x1UL <<
(nack_busy_id * 2));
}
__asm__ __volatile__(
"stxa %%g0, [%0] %1\n\t"
"membar #Sync\n\t"
: /* no outputs */
: "r" (target), "i" (ASI_INTR_W));
nack_busy_id++;
if (nack_busy_id == 32) {
need_more = 1;
break;
}
}
}
/* Now, poll for completion. */
{
u64 dispatch_stat, nack_mask;
long stuck;
stuck = 100000 * nack_busy_id;
nack_mask = busy_mask << 1;
do {
__asm__ __volatile__("ldxa [%%g0] %1, %0"
: "=r" (dispatch_stat)
: "i" (ASI_INTR_DISPATCH_STAT));
if (!(dispatch_stat & (busy_mask | nack_mask))) {
__asm__ __volatile__("wrpr %0, 0x0, %%pstate"
: : "r" (pstate));
if (unlikely(need_more)) {
int i, this_cnt = 0;
for (i = 0; i < cnt; i++) {
if (cpu_list[i] == 0xffff)
continue;
cpu_list[i] = 0xffff;
this_cnt++;
if (this_cnt == 32)
break;
}
goto retry;
}
return;
}
if (!--stuck)
break;
} while (dispatch_stat & busy_mask);
__asm__ __volatile__("wrpr %0, 0x0, %%pstate"
: : "r" (pstate));
if (dispatch_stat & busy_mask) {
/* Busy bits will not clear, continue instead
* of freezing up on this cpu.
*/
printk("CPU[%d]: mondo stuckage result[%016llx]\n",
smp_processor_id(), dispatch_stat);
} else {
int i, this_busy_nack = 0;
/* Delay some random time with interrupts enabled
* to prevent deadlock.
*/
udelay(2 * nack_busy_id);
/* Clear out the mask bits for cpus which did not
* NACK us.
*/
for (i = 0; i < cnt; i++) {
u64 check_mask, nr;
nr = cpu_list[i];
if (nr == 0xffff)
continue;
if (is_jbus)
check_mask = (0x2UL << (2*nr));
else
check_mask = (0x2UL <<
this_busy_nack);
if ((dispatch_stat & check_mask) == 0)
cpu_list[i] = 0xffff;
this_busy_nack += 2;
if (this_busy_nack == 64)
break;
}
goto retry;
}
}
}
#define CPU_MONDO_COUNTER(cpuid) (cpu_mondo_counter[cpuid])
#define MONDO_USEC_WAIT_MIN 2
#define MONDO_USEC_WAIT_MAX 100
#define MONDO_RETRY_LIMIT 500000
/* Multi-cpu list version.
*
* Deliver xcalls to 'cnt' number of cpus in 'cpu_list'.
* Sometimes not all cpus receive the mondo, requiring us to re-send
* the mondo until all cpus have received, or cpus are truly stuck
* unable to receive mondo, and we timeout.
* Occasionally a target cpu strand is borrowed briefly by hypervisor to
* perform guest service, such as PCIe error handling. Consider the
* service time, 1 second overall wait is reasonable for 1 cpu.
* Here two in-between mondo check wait time are defined: 2 usec for
* single cpu quick turn around and up to 100usec for large cpu count.
* Deliver mondo to large number of cpus could take longer, we adjusts
* the retry count as long as target cpus are making forward progress.
*/
static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt)
{
int this_cpu, tot_cpus, prev_sent, i, rem;
int usec_wait, retries, tot_retries;
u16 first_cpu = 0xffff;
unsigned long xc_rcvd = 0;
unsigned long status;
int ecpuerror_id = 0;
int enocpu_id = 0;
u16 *cpu_list;
u16 cpu;
this_cpu = smp_processor_id();
cpu_list = __va(tb->cpu_list_pa);
usec_wait = cnt * MONDO_USEC_WAIT_MIN;
if (usec_wait > MONDO_USEC_WAIT_MAX)
usec_wait = MONDO_USEC_WAIT_MAX;
retries = tot_retries = 0;
tot_cpus = cnt;
prev_sent = 0;
do {
int n_sent, mondo_delivered, target_cpu_busy;
status = sun4v_cpu_mondo_send(cnt,
tb->cpu_list_pa,
tb->cpu_mondo_block_pa);
/* HV_EOK means all cpus received the xcall, we're done. */
if (likely(status == HV_EOK))
goto xcall_done;
/* If not these non-fatal errors, panic */
if (unlikely((status != HV_EWOULDBLOCK) &&
(status != HV_ECPUERROR) &&
(status != HV_ENOCPU)))
goto fatal_errors;
/* First, see if we made any forward progress.
*
* Go through the cpu_list, count the target cpus that have
* received our mondo (n_sent), and those that did not (rem).
* Re-pack cpu_list with the cpus remain to be retried in the
* front - this simplifies tracking the truly stalled cpus.
*
* The hypervisor indicates successful sends by setting
* cpu list entries to the value 0xffff.
*
* EWOULDBLOCK means some target cpus did not receive the
* mondo and retry usually helps.
*
* ECPUERROR means at least one target cpu is in error state,
* it's usually safe to skip the faulty cpu and retry.
*
* ENOCPU means one of the target cpu doesn't belong to the
* domain, perhaps offlined which is unexpected, but not
* fatal and it's okay to skip the offlined cpu.
*/
rem = 0;
n_sent = 0;
for (i = 0; i < cnt; i++) {
cpu = cpu_list[i];
if (likely(cpu == 0xffff)) {
n_sent++;
} else if ((status == HV_ECPUERROR) &&
(sun4v_cpu_state(cpu) == HV_CPU_STATE_ERROR)) {
ecpuerror_id = cpu + 1;
} else if (status == HV_ENOCPU && !cpu_online(cpu)) {
enocpu_id = cpu + 1;
} else {
cpu_list[rem++] = cpu;
}
}
/* No cpu remained, we're done. */
if (rem == 0)
break;
/* Otherwise, update the cpu count for retry. */
cnt = rem;
/* Record the overall number of mondos received by the
* first of the remaining cpus.
*/
if (first_cpu != cpu_list[0]) {
first_cpu = cpu_list[0];
xc_rcvd = CPU_MONDO_COUNTER(first_cpu);
}
/* Was any mondo delivered successfully? */
mondo_delivered = (n_sent > prev_sent);
prev_sent = n_sent;
/* or, was any target cpu busy processing other mondos? */
target_cpu_busy = (xc_rcvd < CPU_MONDO_COUNTER(first_cpu));
xc_rcvd = CPU_MONDO_COUNTER(first_cpu);
/* Retry count is for no progress. If we're making progress,
* reset the retry count.
*/
if (likely(mondo_delivered || target_cpu_busy)) {
tot_retries += retries;
retries = 0;
} else if (unlikely(retries > MONDO_RETRY_LIMIT)) {
goto fatal_mondo_timeout;
}
/* Delay a little bit to let other cpus catch up on
* their cpu mondo queue work.
*/
if (!mondo_delivered)
udelay(usec_wait);
retries++;
} while (1);
xcall_done:
if (unlikely(ecpuerror_id > 0)) {
pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) was in error state\n",
this_cpu, ecpuerror_id - 1);
} else if (unlikely(enocpu_id > 0)) {
pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) does not belong to the domain\n",
this_cpu, enocpu_id - 1);
}
return;
fatal_errors:
/* fatal errors include bad alignment, etc */
pr_crit("CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) mondo_block_pa(%lx)\n",
this_cpu, tot_cpus, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
panic("Unexpected SUN4V mondo error %lu\n", status);
fatal_mondo_timeout:
/* some cpus being non-responsive to the cpu mondo */
pr_crit("CPU[%d]: SUN4V mondo timeout, cpu(%d) made no forward progress after %d retries. Total target cpus(%d).\n",
this_cpu, first_cpu, (tot_retries + retries), tot_cpus);
panic("SUN4V mondo timeout panic\n");
}
static void (*xcall_deliver_impl)(struct trap_per_cpu *, int);
static void xcall_deliver(u64 data0, u64 data1, u64 data2, const cpumask_t *mask)
{
struct trap_per_cpu *tb;
int this_cpu, i, cnt;
unsigned long flags;
u16 *cpu_list;
u64 *mondo;
/* We have to do this whole thing with interrupts fully disabled.
* Otherwise if we send an xcall from interrupt context it will
* corrupt both our mondo block and cpu list state.
*
* One consequence of this is that we cannot use timeout mechanisms
* that depend upon interrupts being delivered locally. So, for
* example, we cannot sample jiffies and expect it to advance.
*
* Fortunately, udelay() uses %stick/%tick so we can use that.
*/
local_irq_save(flags);
this_cpu = smp_processor_id();
tb = &trap_block[this_cpu];
mondo = __va(tb->cpu_mondo_block_pa);
mondo[0] = data0;
mondo[1] = data1;
mondo[2] = data2;
wmb();
cpu_list = __va(tb->cpu_list_pa);
/* Setup the initial cpu list. */
cnt = 0;
for_each_cpu(i, mask) {
if (i == this_cpu || !cpu_online(i))
continue;
cpu_list[cnt++] = i;
}
if (cnt)
xcall_deliver_impl(tb, cnt);
local_irq_restore(flags);
}
/* Send cross call to all processors mentioned in MASK_P
* except self. Really, there are only two cases currently,
* "cpu_online_mask" and "mm_cpumask(mm)".
*/
static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, const cpumask_t *mask)
{
u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff));
xcall_deliver(data0, data1, data2, mask);
}
/* Send cross call to all processors except self. */
static void smp_cross_call(unsigned long *func, u32 ctx, u64 data1, u64 data2)
{
smp_cross_call_masked(func, ctx, data1, data2, cpu_online_mask);
}
extern unsigned long xcall_sync_tick;
static void smp_start_sync_tick_client(int cpu)
{
xcall_deliver((u64) &xcall_sync_tick, 0, 0,
cpumask_of(cpu));
}
extern unsigned long xcall_call_function;
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
xcall_deliver((u64) &xcall_call_function, 0, 0, mask);
}
extern unsigned long xcall_call_function_single;
void arch_send_call_function_single_ipi(int cpu)
{
xcall_deliver((u64) &xcall_call_function_single, 0, 0,
cpumask_of(cpu));
}
void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs)
{
clear_softint(1 << irq);
irq_enter();
generic_smp_call_function_interrupt();
irq_exit();
}
void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs)
{
clear_softint(1 << irq);
irq_enter();
generic_smp_call_function_single_interrupt();
irq_exit();
}
static void tsb_sync(void *info)
{
struct trap_per_cpu *tp = &trap_block[raw_smp_processor_id()];
struct mm_struct *mm = info;
/* It is not valid to test "current->active_mm == mm" here.
*
* The value of "current" is not changed atomically with
* switch_mm(). But that's OK, we just need to check the
* current cpu's trap block PGD physical address.
*/
if (tp->pgd_paddr == __pa(mm->pgd))
tsb_context_switch(mm);
}
void smp_tsb_sync(struct mm_struct *mm)
{
smp_call_function_many(mm_cpumask(mm), tsb_sync, mm, 1);
}
extern unsigned long xcall_flush_tlb_mm;
extern unsigned long xcall_flush_tlb_page;
extern unsigned long xcall_flush_tlb_kernel_range;
extern unsigned long xcall_fetch_glob_regs;
extern unsigned long xcall_fetch_glob_pmu;
extern unsigned long xcall_fetch_glob_pmu_n4;
extern unsigned long xcall_receive_signal;
extern unsigned long xcall_new_mmu_context_version;
#ifdef CONFIG_KGDB
extern unsigned long xcall_kgdb_capture;
#endif
#ifdef DCACHE_ALIASING_POSSIBLE
extern unsigned long xcall_flush_dcache_page_cheetah;
#endif
extern unsigned long xcall_flush_dcache_page_spitfire;
static inline void __local_flush_dcache_folio(struct folio *folio)
{
unsigned int i, nr = folio_nr_pages(folio);
#ifdef DCACHE_ALIASING_POSSIBLE
for (i = 0; i < nr; i++)
__flush_dcache_page(folio_address(folio) + i * PAGE_SIZE,
((tlb_type == spitfire) &&
folio_flush_mapping(folio) != NULL));
#else
if (folio_flush_mapping(folio) != NULL &&
tlb_type == spitfire) {
unsigned long pfn = folio_pfn(folio)
for (i = 0; i < nr; i++)
__flush_icache_page((pfn + i) * PAGE_SIZE);
}
#endif
}
void smp_flush_dcache_folio_impl(struct folio *folio, int cpu)
{
int this_cpu;
if (tlb_type == hypervisor)
return;
#ifdef CONFIG_DEBUG_DCFLUSH
atomic_inc(&dcpage_flushes);
#endif
this_cpu = get_cpu();
if (cpu == this_cpu) {
__local_flush_dcache_folio(folio);
} else if (cpu_online(cpu)) {
void *pg_addr = folio_address(folio);
u64 data0 = 0;
if (tlb_type == spitfire) {
data0 = ((u64)&xcall_flush_dcache_page_spitfire);
if (folio_flush_mapping(folio) != NULL)
data0 |= ((u64)1 << 32);
} else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
#ifdef DCACHE_ALIASING_POSSIBLE
data0 = ((u64)&xcall_flush_dcache_page_cheetah);
#endif
}
if (data0) {
unsigned int i, nr = folio_nr_pages(folio);
for (i = 0; i < nr; i++) {
xcall_deliver(data0, __pa(pg_addr),
(u64) pg_addr, cpumask_of(cpu));
#ifdef CONFIG_DEBUG_DCFLUSH
atomic_inc(&dcpage_flushes_xcall);
#endif
pg_addr += PAGE_SIZE;
}
}
}
put_cpu();
}
void flush_dcache_folio_all(struct mm_struct *mm, struct folio *folio)
{
void *pg_addr;
u64 data0;
if (tlb_type == hypervisor)
return;
preempt_disable();
#ifdef CONFIG_DEBUG_DCFLUSH
atomic_inc(&dcpage_flushes);
#endif
data0 = 0;
pg_addr = folio_address(folio);
if (tlb_type == spitfire) {
data0 = ((u64)&xcall_flush_dcache_page_spitfire);
if (folio_flush_mapping(folio) != NULL)
data0 |= ((u64)1 << 32);
} else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
#ifdef DCACHE_ALIASING_POSSIBLE
data0 = ((u64)&xcall_flush_dcache_page_cheetah);
#endif
}
if (data0) {
unsigned int i, nr = folio_nr_pages(folio);
for (i = 0; i < nr; i++) {
xcall_deliver(data0, __pa(pg_addr),
(u64) pg_addr, cpu_online_mask);
#ifdef CONFIG_DEBUG_DCFLUSH
atomic_inc(&dcpage_flushes_xcall);
#endif
pg_addr += PAGE_SIZE;
}
}
__local_flush_dcache_folio(folio);
preempt_enable();
}
#ifdef CONFIG_KGDB
void kgdb_roundup_cpus(void)
{
smp_cross_call(&xcall_kgdb_capture, 0, 0, 0);
}
#endif
void smp_fetch_global_regs(void)
{
smp_cross_call(&xcall_fetch_glob_regs, 0, 0, 0);
}
void smp_fetch_global_pmu(void)
{
if (tlb_type == hypervisor &&
sun4v_chip_type >= SUN4V_CHIP_NIAGARA4)
smp_cross_call(&xcall_fetch_glob_pmu_n4, 0, 0, 0);
else
smp_cross_call(&xcall_fetch_glob_pmu, 0, 0, 0);
}
/* We know that the window frames of the user have been flushed
* to the stack before we get here because all callers of us
* are flush_tlb_*() routines, and these run after flush_cache_*()
* which performs the flushw.
*
* mm->cpu_vm_mask is a bit mask of which cpus an address
* space has (potentially) executed on, this is the heuristic
* we use to limit cross calls.
*/
/* This currently is only used by the hugetlb arch pre-fault
* hook on UltraSPARC-III+ and later when changing the pagesize
* bits of the context register for an address space.
*/
void smp_flush_tlb_mm(struct mm_struct *mm)
{
u32 ctx = CTX_HWBITS(mm->context);
get_cpu();
smp_cross_call_masked(&xcall_flush_tlb_mm,
ctx, 0, 0,
mm_cpumask(mm));
__flush_tlb_mm(ctx, SECONDARY_CONTEXT);
put_cpu();
}
struct tlb_pending_info {
unsigned long ctx;
unsigned long nr;
unsigned long *vaddrs;
};
static void tlb_pending_func(void *info)
{
struct tlb_pending_info *t = info;
__flush_tlb_pending(t->ctx, t->nr, t->vaddrs);
}
void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)
{
u32 ctx = CTX_HWBITS(mm->context);
struct tlb_pending_info info;
get_cpu();
info.ctx = ctx;
info.nr = nr;
info.vaddrs = vaddrs;
smp_call_function_many(mm_cpumask(mm), tlb_pending_func,
&info, 1);
__flush_tlb_pending(ctx, nr, vaddrs);
put_cpu();
}
void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
{
unsigned long context = CTX_HWBITS(mm->context);
get_cpu();
smp_cross_call_masked(&xcall_flush_tlb_page,
context, vaddr, 0,
mm_cpumask(mm));
__flush_tlb_page(context, vaddr);
put_cpu();
}
void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
start &= PAGE_MASK;
end = PAGE_ALIGN(end);
if (start != end) {
smp_cross_call(&xcall_flush_tlb_kernel_range,
0, start, end);
__flush_tlb_kernel_range(start, end);
}
}
/* CPU capture. */
/* #define CAPTURE_DEBUG */
extern unsigned long xcall_capture;
static atomic_t smp_capture_depth = ATOMIC_INIT(0);
static atomic_t smp_capture_registry = ATOMIC_INIT(0);
static unsigned long penguins_are_doing_time;
void smp_capture(void)
{
int result = atomic_add_return(1, &smp_capture_depth);
if (result == 1) {
int ncpus = num_online_cpus();
#ifdef CAPTURE_DEBUG
printk("CPU[%d]: Sending penguins to jail...",
smp_processor_id());
#endif
penguins_are_doing_time = 1;
atomic_inc(&smp_capture_registry);
smp_cross_call(&xcall_capture, 0, 0, 0);
while (atomic_read(&smp_capture_registry) != ncpus)
rmb();
#ifdef CAPTURE_DEBUG
printk("done\n");
#endif
}
}
void smp_release(void)
{
if (atomic_dec_and_test(&smp_capture_depth)) {
#ifdef CAPTURE_DEBUG
printk("CPU[%d]: Giving pardon to "
"imprisoned penguins\n",
smp_processor_id());
#endif
penguins_are_doing_time = 0;
membar_safe("#StoreLoad");
atomic_dec(&smp_capture_registry);
}
}
/* Imprisoned penguins run with %pil == PIL_NORMAL_MAX, but PSTATE_IE
* set, so they can service tlb flush xcalls...
*/
extern void prom_world(int);
void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs)
{
clear_softint(1 << irq);
preempt_disable();
__asm__ __volatile__("flushw");
prom_world(1);
atomic_inc(&smp_capture_registry);
membar_safe("#StoreLoad");
while (penguins_are_doing_time)
rmb();
atomic_dec(&smp_capture_registry);
prom_world(0);
preempt_enable();
}
void __init smp_prepare_cpus(unsigned int max_cpus)
{
}
void smp_prepare_boot_cpu(void)
{
}
void __init smp_setup_processor_id(void)
{
if (tlb_type == spitfire)
xcall_deliver_impl = spitfire_xcall_deliver;
else if (tlb_type == cheetah || tlb_type == cheetah_plus)
xcall_deliver_impl = cheetah_xcall_deliver;
else
xcall_deliver_impl = hypervisor_xcall_deliver;
}
void __init smp_fill_in_cpu_possible_map(void)
{
int possible_cpus = num_possible_cpus();
int i;
if (possible_cpus > nr_cpu_ids)
possible_cpus = nr_cpu_ids;
for (i = 0; i < possible_cpus; i++)
set_cpu_possible(i, true);
for (; i < NR_CPUS; i++)
set_cpu_possible(i, false);
}
void smp_fill_in_sib_core_maps(void)
{
unsigned int i;
for_each_present_cpu(i) {
unsigned int j;
cpumask_clear(&cpu_core_map[i]);
if (cpu_data(i).core_id == 0) {
cpumask_set_cpu(i, &cpu_core_map[i]);
continue;
}
for_each_present_cpu(j) {
if (cpu_data(i).core_id ==
cpu_data(j).core_id)
cpumask_set_cpu(j, &cpu_core_map[i]);
}
}
for_each_present_cpu(i) {
unsigned int j;
for_each_present_cpu(j) {
if (cpu_data(i).max_cache_id ==
cpu_data(j).max_cache_id)
cpumask_set_cpu(j, &cpu_core_sib_cache_map[i]);
if (cpu_data(i).sock_id == cpu_data(j).sock_id)
cpumask_set_cpu(j, &cpu_core_sib_map[i]);
}
}
for_each_present_cpu(i) {
unsigned int j;
cpumask_clear(&per_cpu(cpu_sibling_map, i));
if (cpu_data(i).proc_id == -1) {
cpumask_set_cpu(i, &per_cpu(cpu_sibling_map, i));
continue;
}
for_each_present_cpu(j) {
if (cpu_data(i).proc_id ==
cpu_data(j).proc_id)
cpumask_set_cpu(j, &per_cpu(cpu_sibling_map, i));
}
}
}
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
int ret = smp_boot_one_cpu(cpu, tidle);
if (!ret) {
cpumask_set_cpu(cpu, &smp_commenced_mask);
while (!cpu_online(cpu))
mb();
if (!cpu_online(cpu)) {
ret = -ENODEV;
} else {
/* On SUN4V, writes to %tick and %stick are
* not allowed.
*/
if (tlb_type != hypervisor)
smp_synchronize_one_tick(cpu);
}
}
return ret;
}
#ifdef CONFIG_HOTPLUG_CPU
void cpu_play_dead(void)
{
int cpu = smp_processor_id();
unsigned long pstate;
idle_task_exit();
if (tlb_type == hypervisor) {
struct trap_per_cpu *tb = &trap_block[cpu];
sun4v_cpu_qconf(HV_CPU_QUEUE_CPU_MONDO,
tb->cpu_mondo_pa, 0);
sun4v_cpu_qconf(HV_CPU_QUEUE_DEVICE_MONDO,
tb->dev_mondo_pa, 0);
sun4v_cpu_qconf(HV_CPU_QUEUE_RES_ERROR,
tb->resum_mondo_pa, 0);
sun4v_cpu_qconf(HV_CPU_QUEUE_NONRES_ERROR,
tb->nonresum_mondo_pa, 0);
}
cpumask_clear_cpu(cpu, &smp_commenced_mask);
membar_safe("#Sync");
local_irq_disable();
__asm__ __volatile__(
"rdpr %%pstate, %0\n\t"
"wrpr %0, %1, %%pstate"
: "=r" (pstate)
: "i" (PSTATE_IE));
while (1)
barrier();
}
int __cpu_disable(void)
{
int cpu = smp_processor_id();
cpuinfo_sparc *c;
int i;
for_each_cpu(i, &cpu_core_map[cpu])
cpumask_clear_cpu(cpu, &cpu_core_map[i]);
cpumask_clear(&cpu_core_map[cpu]);
for_each_cpu(i, &per_cpu(cpu_sibling_map, cpu))
cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, i));
cpumask_clear(&per_cpu(cpu_sibling_map, cpu));
c = &cpu_data(cpu);
c->core_id = 0;
c->proc_id = -1;
smp_wmb();
/* Make sure no interrupts point to this cpu. */
fixup_irqs();
local_irq_enable();
mdelay(1);
local_irq_disable();
set_cpu_online(cpu, false);
cpu_map_rebuild();
return 0;
}
void __cpu_die(unsigned int cpu)
{
int i;
for (i = 0; i < 100; i++) {
smp_rmb();
if (!cpumask_test_cpu(cpu, &smp_commenced_mask))
break;
msleep(100);
}
if (cpumask_test_cpu(cpu, &smp_commenced_mask)) {
printk(KERN_ERR "CPU %u didn't die...\n", cpu);
} else {
#if defined(CONFIG_SUN_LDOMS)
unsigned long hv_err;
int limit = 100;
do {
hv_err = sun4v_cpu_stop(cpu);
if (hv_err == HV_EOK) {
set_cpu_present(cpu, false);
break;
}
} while (--limit > 0);
if (limit <= 0) {
printk(KERN_ERR "sun4v_cpu_stop() fails err=%lu\n",
hv_err);
}
#endif
}
}
#endif
void __init smp_cpus_done(unsigned int max_cpus)
{
}
static void send_cpu_ipi(int cpu)
{
xcall_deliver((u64) &xcall_receive_signal,
0, 0, cpumask_of(cpu));
}
void scheduler_poke(void)
{
if (!cpu_poke)
return;
if (!__this_cpu_read(poke))
return;
__this_cpu_write(poke, false);
set_softint(1 << PIL_SMP_RECEIVE_SIGNAL);
}
static unsigned long send_cpu_poke(int cpu)
{
unsigned long hv_err;
per_cpu(poke, cpu) = true;
hv_err = sun4v_cpu_poke(cpu);
if (hv_err != HV_EOK) {
per_cpu(poke, cpu) = false;
pr_err_ratelimited("%s: sun4v_cpu_poke() fails err=%lu\n",
__func__, hv_err);
}
return hv_err;
}
void arch_smp_send_reschedule(int cpu)
{
if (cpu == smp_processor_id()) {
WARN_ON_ONCE(preemptible());
set_softint(1 << PIL_SMP_RECEIVE_SIGNAL);
return;
}
/* Use cpu poke to resume idle cpu if supported. */
if (cpu_poke && idle_cpu(cpu)) {
unsigned long ret;
ret = send_cpu_poke(cpu);
if (ret == HV_EOK)
return;
}
/* Use IPI in following cases:
* - cpu poke not supported
* - cpu not idle
* - send_cpu_poke() returns with error
*/
send_cpu_ipi(cpu);
}
void smp_init_cpu_poke(void)
{
unsigned long major;
unsigned long minor;
int ret;
if (tlb_type != hypervisor)
return;
ret = sun4v_hvapi_get(HV_GRP_CORE, &major, &minor);
if (ret) {
pr_debug("HV_GRP_CORE is not registered\n");
return;
}
if (major == 1 && minor >= 6) {
/* CPU POKE is registered. */
cpu_poke = true;
return;
}
pr_debug("CPU_POKE not supported\n");
}
void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs)
{
clear_softint(1 << irq);
scheduler_ipi();
}
static void stop_this_cpu(void *dummy)
{
set_cpu_online(smp_processor_id(), false);
prom_stopself();
}
void smp_send_stop(void)
{
int cpu;
if (tlb_type == hypervisor) {
int this_cpu = smp_processor_id();
#ifdef CONFIG_SERIAL_SUNHV
sunhv_migrate_hvcons_irq(this_cpu);
#endif
for_each_online_cpu(cpu) {
if (cpu == this_cpu)
continue;
set_cpu_online(cpu, false);
#ifdef CONFIG_SUN_LDOMS
if (ldom_domaining_enabled) {
unsigned long hv_err;
hv_err = sun4v_cpu_stop(cpu);
if (hv_err)
printk(KERN_ERR "sun4v_cpu_stop() "
"failed err=%lu\n", hv_err);
} else
#endif
prom_stopcpu_cpuid(cpu);
}
} else
smp_call_function(stop_this_cpu, NULL, 0);
}
static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
{
if (cpu_to_node(from) == cpu_to_node(to))
return LOCAL_DISTANCE;
else
return REMOTE_DISTANCE;
}
static int __init pcpu_cpu_to_node(int cpu)
{
return cpu_to_node(cpu);
}
void __init setup_per_cpu_areas(void)
{
unsigned long delta;
unsigned int cpu;
int rc = -EINVAL;
if (pcpu_chosen_fc != PCPU_FC_PAGE) {
rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
PERCPU_DYNAMIC_RESERVE, 4 << 20,
pcpu_cpu_distance,
pcpu_cpu_to_node);
if (rc)
pr_warn("PERCPU: %s allocator failed (%d), "
"falling back to page size\n",
pcpu_fc_names[pcpu_chosen_fc], rc);
}
if (rc < 0)
rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE,
pcpu_cpu_to_node);
if (rc < 0)
panic("cannot initialize percpu area (err=%d)", rc);
delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
for_each_possible_cpu(cpu)
__per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
/* Setup %g5 for the boot cpu. */
__local_per_cpu_offset = __per_cpu_offset(smp_processor_id());
of_fill_in_cpu_data();
if (tlb_type == hypervisor)
mdesc_fill_in_cpu_data(cpu_all_mask);
}
| linux-master | arch/sparc/kernel/smp_64.c |
// SPDX-License-Identifier: GPL-2.0
/* arch/sparc64/kernel/signal32.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright (C) 1995 David S. Miller ([email protected])
* Copyright (C) 1996 Miguel de Icaza ([email protected])
* Copyright (C) 1997 Eddie C. Dost ([email protected])
* Copyright (C) 1997,1998 Jakub Jelinek ([email protected])
*/
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/errno.h>
#include <linux/wait.h>
#include <linux/ptrace.h>
#include <linux/unistd.h>
#include <linux/mm.h>
#include <linux/tty.h>
#include <linux/binfmts.h>
#include <linux/compat.h>
#include <linux/bitops.h>
#include <linux/uaccess.h>
#include <asm/ptrace.h>
#include <asm/psrcompat.h>
#include <asm/fpumacro.h>
#include <asm/visasm.h>
#include <asm/compat_signal.h>
#include <asm/switch_to.h>
#include "sigutil.h"
#include "kernel.h"
/* This magic should be in g_upper[0] for all upper parts
* to be valid.
*/
#define SIGINFO_EXTRA_V8PLUS_MAGIC 0x130e269
typedef struct {
unsigned int g_upper[8];
unsigned int o_upper[8];
unsigned int asi;
} siginfo_extra_v8plus_t;
struct signal_frame32 {
struct sparc_stackf32 ss;
__siginfo32_t info;
/* __siginfo_fpu_t * */ u32 fpu_save;
unsigned int insns[2];
unsigned int extramask[_COMPAT_NSIG_WORDS - 1];
unsigned int extra_size; /* Should be sizeof(siginfo_extra_v8plus_t) */
/* Only valid if (info.si_regs.psr & (PSR_VERS|PSR_IMPL)) == PSR_V8PLUS */
siginfo_extra_v8plus_t v8plus;
/* __siginfo_rwin_t * */u32 rwin_save;
} __attribute__((aligned(8)));
struct rt_signal_frame32 {
struct sparc_stackf32 ss;
compat_siginfo_t info;
struct pt_regs32 regs;
compat_sigset_t mask;
/* __siginfo_fpu_t * */ u32 fpu_save;
unsigned int insns[2];
compat_stack_t stack;
unsigned int extra_size; /* Should be sizeof(siginfo_extra_v8plus_t) */
/* Only valid if (regs.psr & (PSR_VERS|PSR_IMPL)) == PSR_V8PLUS */
siginfo_extra_v8plus_t v8plus;
/* __siginfo_rwin_t * */u32 rwin_save;
} __attribute__((aligned(8)));
/* Checks if the fp is valid. We always build signal frames which are
* 16-byte aligned, therefore we can always enforce that the restore
* frame has that property as well.
*/
static bool invalid_frame_pointer(void __user *fp, int fplen)
{
if ((((unsigned long) fp) & 15) ||
((unsigned long)fp) > 0x100000000ULL - fplen)
return true;
return false;
}
void do_sigreturn32(struct pt_regs *regs)
{
struct signal_frame32 __user *sf;
compat_uptr_t fpu_save;
compat_uptr_t rwin_save;
unsigned int psr, ufp;
unsigned int pc, npc;
sigset_t set;
compat_sigset_t seta;
int err, i;
/* Always make any pending restarted system calls return -EINTR */
current->restart_block.fn = do_no_restart_syscall;
synchronize_user_stack();
regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL;
sf = (struct signal_frame32 __user *) regs->u_regs[UREG_FP];
/* 1. Make sure we are not getting garbage from the user */
if (invalid_frame_pointer(sf, sizeof(*sf)))
goto segv;
if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP]))
goto segv;
if (ufp & 0x7)
goto segv;
if (__get_user(pc, &sf->info.si_regs.pc) ||
__get_user(npc, &sf->info.si_regs.npc))
goto segv;
if ((pc | npc) & 3)
goto segv;
if (test_thread_flag(TIF_32BIT)) {
pc &= 0xffffffff;
npc &= 0xffffffff;
}
regs->tpc = pc;
regs->tnpc = npc;
/* 2. Restore the state */
err = __get_user(regs->y, &sf->info.si_regs.y);
err |= __get_user(psr, &sf->info.si_regs.psr);
for (i = UREG_G1; i <= UREG_I7; i++)
err |= __get_user(regs->u_regs[i], &sf->info.si_regs.u_regs[i]);
if ((psr & (PSR_VERS|PSR_IMPL)) == PSR_V8PLUS) {
err |= __get_user(i, &sf->v8plus.g_upper[0]);
if (i == SIGINFO_EXTRA_V8PLUS_MAGIC) {
unsigned long asi;
for (i = UREG_G1; i <= UREG_I7; i++)
err |= __get_user(((u32 *)regs->u_regs)[2*i], &sf->v8plus.g_upper[i]);
err |= __get_user(asi, &sf->v8plus.asi);
regs->tstate &= ~TSTATE_ASI;
regs->tstate |= ((asi & 0xffUL) << 24UL);
}
}
/* User can only change condition codes in %tstate. */
regs->tstate &= ~(TSTATE_ICC|TSTATE_XCC);
regs->tstate |= psr_to_tstate_icc(psr);
/* Prevent syscall restart. */
pt_regs_clear_syscall(regs);
err |= __get_user(fpu_save, &sf->fpu_save);
if (!err && fpu_save)
err |= restore_fpu_state(regs, compat_ptr(fpu_save));
err |= __get_user(rwin_save, &sf->rwin_save);
if (!err && rwin_save) {
if (restore_rwin_state(compat_ptr(rwin_save)))
goto segv;
}
err |= __get_user(seta.sig[0], &sf->info.si_mask);
err |= copy_from_user(&seta.sig[1], &sf->extramask,
(_COMPAT_NSIG_WORDS - 1) * sizeof(unsigned int));
if (err)
goto segv;
set.sig[0] = seta.sig[0] + (((long)seta.sig[1]) << 32);
set_current_blocked(&set);
return;
segv:
force_sig(SIGSEGV);
}
asmlinkage void do_rt_sigreturn32(struct pt_regs *regs)
{
struct rt_signal_frame32 __user *sf;
unsigned int psr, pc, npc, ufp;
compat_uptr_t fpu_save;
compat_uptr_t rwin_save;
sigset_t set;
int err, i;
/* Always make any pending restarted system calls return -EINTR */
current->restart_block.fn = do_no_restart_syscall;
synchronize_user_stack();
regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL;
sf = (struct rt_signal_frame32 __user *) regs->u_regs[UREG_FP];
/* 1. Make sure we are not getting garbage from the user */
if (invalid_frame_pointer(sf, sizeof(*sf)))
goto segv;
if (get_user(ufp, &sf->regs.u_regs[UREG_FP]))
goto segv;
if (ufp & 0x7)
goto segv;
if (__get_user(pc, &sf->regs.pc) ||
__get_user(npc, &sf->regs.npc))
goto segv;
if ((pc | npc) & 3)
goto segv;
if (test_thread_flag(TIF_32BIT)) {
pc &= 0xffffffff;
npc &= 0xffffffff;
}
regs->tpc = pc;
regs->tnpc = npc;
/* 2. Restore the state */
err = __get_user(regs->y, &sf->regs.y);
err |= __get_user(psr, &sf->regs.psr);
for (i = UREG_G1; i <= UREG_I7; i++)
err |= __get_user(regs->u_regs[i], &sf->regs.u_regs[i]);
if ((psr & (PSR_VERS|PSR_IMPL)) == PSR_V8PLUS) {
err |= __get_user(i, &sf->v8plus.g_upper[0]);
if (i == SIGINFO_EXTRA_V8PLUS_MAGIC) {
unsigned long asi;
for (i = UREG_G1; i <= UREG_I7; i++)
err |= __get_user(((u32 *)regs->u_regs)[2*i], &sf->v8plus.g_upper[i]);
err |= __get_user(asi, &sf->v8plus.asi);
regs->tstate &= ~TSTATE_ASI;
regs->tstate |= ((asi & 0xffUL) << 24UL);
}
}
/* User can only change condition codes in %tstate. */
regs->tstate &= ~(TSTATE_ICC|TSTATE_XCC);
regs->tstate |= psr_to_tstate_icc(psr);
/* Prevent syscall restart. */
pt_regs_clear_syscall(regs);
err |= __get_user(fpu_save, &sf->fpu_save);
if (!err && fpu_save)
err |= restore_fpu_state(regs, compat_ptr(fpu_save));
err |= get_compat_sigset(&set, &sf->mask);
err |= compat_restore_altstack(&sf->stack);
if (err)
goto segv;
err |= __get_user(rwin_save, &sf->rwin_save);
if (!err && rwin_save) {
if (restore_rwin_state(compat_ptr(rwin_save)))
goto segv;
}
set_current_blocked(&set);
return;
segv:
force_sig(SIGSEGV);
}
static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize)
{
unsigned long sp;
regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL;
sp = regs->u_regs[UREG_FP];
/*
* If we are on the alternate signal stack and would overflow it, don't.
* Return an always-bogus address instead so we will die with SIGSEGV.
*/
if (on_sig_stack(sp) && !likely(on_sig_stack(sp - framesize)))
return (void __user *) -1L;
/* This is the X/Open sanctioned signal stack switching. */
sp = sigsp(sp, ksig) - framesize;
/* Always align the stack frame. This handles two cases. First,
* sigaltstack need not be mindful of platform specific stack
* alignment. Second, if we took this signal because the stack
* is not aligned properly, we'd like to take the signal cleanly
* and report that.
*/
sp &= ~15UL;
return (void __user *) sp;
}
/* The I-cache flush instruction only works in the primary ASI, which
* right now is the nucleus, aka. kernel space.
*
* Therefore we have to kick the instructions out using the kernel
* side linear mapping of the physical address backing the user
* instructions.
*/
static void flush_signal_insns(unsigned long address)
{
unsigned long pstate, paddr;
pte_t *ptep, pte;
pgd_t *pgdp;
p4d_t *p4dp;
pud_t *pudp;
pmd_t *pmdp;
/* Commit all stores of the instructions we are about to flush. */
wmb();
/* Disable cross-call reception. In this way even a very wide
* munmap() on another cpu can't tear down the page table
* hierarchy from underneath us, since that can't complete
* until the IPI tlb flush returns.
*/
__asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
__asm__ __volatile__("wrpr %0, %1, %%pstate"
: : "r" (pstate), "i" (PSTATE_IE));
pgdp = pgd_offset(current->mm, address);
if (pgd_none(*pgdp))
goto out_irqs_on;
p4dp = p4d_offset(pgdp, address);
if (p4d_none(*p4dp))
goto out_irqs_on;
pudp = pud_offset(p4dp, address);
if (pud_none(*pudp))
goto out_irqs_on;
pmdp = pmd_offset(pudp, address);
if (pmd_none(*pmdp))
goto out_irqs_on;
ptep = pte_offset_map(pmdp, address);
if (!ptep)
goto out_irqs_on;
pte = *ptep;
if (!pte_present(pte))
goto out_unmap;
paddr = (unsigned long) page_address(pte_page(pte));
__asm__ __volatile__("flush %0 + %1"
: /* no outputs */
: "r" (paddr),
"r" (address & (PAGE_SIZE - 1))
: "memory");
out_unmap:
pte_unmap(ptep);
out_irqs_on:
__asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate));
}
static int setup_frame32(struct ksignal *ksig, struct pt_regs *regs,
sigset_t *oldset)
{
struct signal_frame32 __user *sf;
int i, err, wsaved;
void __user *tail;
int sigframe_size;
u32 psr;
compat_sigset_t seta;
/* 1. Make sure everything is clean */
synchronize_user_stack();
save_and_clear_fpu();
wsaved = get_thread_wsaved();
sigframe_size = sizeof(*sf);
if (current_thread_info()->fpsaved[0] & FPRS_FEF)
sigframe_size += sizeof(__siginfo_fpu_t);
if (wsaved)
sigframe_size += sizeof(__siginfo_rwin_t);
sf = (struct signal_frame32 __user *)
get_sigframe(ksig, regs, sigframe_size);
if (invalid_frame_pointer(sf, sigframe_size)) {
if (show_unhandled_signals)
pr_info("%s[%d] bad frame in setup_frame32: %08lx TPC %08lx O7 %08lx\n",
current->comm, current->pid, (unsigned long)sf,
regs->tpc, regs->u_regs[UREG_I7]);
force_sigsegv(ksig->sig);
return -EINVAL;
}
tail = (sf + 1);
/* 2. Save the current process state */
if (test_thread_flag(TIF_32BIT)) {
regs->tpc &= 0xffffffff;
regs->tnpc &= 0xffffffff;
}
err = put_user(regs->tpc, &sf->info.si_regs.pc);
err |= __put_user(regs->tnpc, &sf->info.si_regs.npc);
err |= __put_user(regs->y, &sf->info.si_regs.y);
psr = tstate_to_psr(regs->tstate);
if (current_thread_info()->fpsaved[0] & FPRS_FEF)
psr |= PSR_EF;
err |= __put_user(psr, &sf->info.si_regs.psr);
for (i = 0; i < 16; i++)
err |= __put_user(regs->u_regs[i], &sf->info.si_regs.u_regs[i]);
err |= __put_user(sizeof(siginfo_extra_v8plus_t), &sf->extra_size);
err |= __put_user(SIGINFO_EXTRA_V8PLUS_MAGIC, &sf->v8plus.g_upper[0]);
for (i = 1; i < 16; i++)
err |= __put_user(((u32 *)regs->u_regs)[2*i],
&sf->v8plus.g_upper[i]);
err |= __put_user((regs->tstate & TSTATE_ASI) >> 24UL,
&sf->v8plus.asi);
if (psr & PSR_EF) {
__siginfo_fpu_t __user *fp = tail;
tail += sizeof(*fp);
err |= save_fpu_state(regs, fp);
err |= __put_user((u64)fp, &sf->fpu_save);
} else {
err |= __put_user(0, &sf->fpu_save);
}
if (wsaved) {
__siginfo_rwin_t __user *rwp = tail;
tail += sizeof(*rwp);
err |= save_rwin_state(wsaved, rwp);
err |= __put_user((u64)rwp, &sf->rwin_save);
set_thread_wsaved(0);
} else {
err |= __put_user(0, &sf->rwin_save);
}
/* If these change we need to know - assignments to seta relies on these sizes */
BUILD_BUG_ON(_NSIG_WORDS != 1);
BUILD_BUG_ON(_COMPAT_NSIG_WORDS != 2);
seta.sig[1] = (oldset->sig[0] >> 32);
seta.sig[0] = oldset->sig[0];
err |= __put_user(seta.sig[0], &sf->info.si_mask);
err |= __copy_to_user(sf->extramask, &seta.sig[1],
(_COMPAT_NSIG_WORDS - 1) * sizeof(unsigned int));
if (!wsaved) {
err |= raw_copy_in_user((u32 __user *)sf,
(u32 __user *)(regs->u_regs[UREG_FP]),
sizeof(struct reg_window32));
} else {
struct reg_window *rp;
rp = ¤t_thread_info()->reg_window[wsaved - 1];
for (i = 0; i < 8; i++)
err |= __put_user(rp->locals[i], &sf->ss.locals[i]);
for (i = 0; i < 6; i++)
err |= __put_user(rp->ins[i], &sf->ss.ins[i]);
err |= __put_user(rp->ins[6], &sf->ss.fp);
err |= __put_user(rp->ins[7], &sf->ss.callers_pc);
}
if (err)
return err;
/* 3. signal handler back-trampoline and parameters */
regs->u_regs[UREG_FP] = (unsigned long) sf;
regs->u_regs[UREG_I0] = ksig->sig;
regs->u_regs[UREG_I1] = (unsigned long) &sf->info;
regs->u_regs[UREG_I2] = (unsigned long) &sf->info;
/* 4. signal handler */
regs->tpc = (unsigned long) ksig->ka.sa.sa_handler;
regs->tnpc = (regs->tpc + 4);
if (test_thread_flag(TIF_32BIT)) {
regs->tpc &= 0xffffffff;
regs->tnpc &= 0xffffffff;
}
/* 5. return to kernel instructions */
if (ksig->ka.ka_restorer) {
regs->u_regs[UREG_I7] = (unsigned long)ksig->ka.ka_restorer;
} else {
unsigned long address = ((unsigned long)&(sf->insns[0]));
regs->u_regs[UREG_I7] = (unsigned long) (&(sf->insns[0]) - 2);
err = __put_user(0x821020d8, &sf->insns[0]); /*mov __NR_sigreturn, %g1*/
err |= __put_user(0x91d02010, &sf->insns[1]); /*t 0x10*/
if (err)
return err;
flush_signal_insns(address);
}
return 0;
}
static int setup_rt_frame32(struct ksignal *ksig, struct pt_regs *regs,
sigset_t *oldset)
{
struct rt_signal_frame32 __user *sf;
int i, err, wsaved;
void __user *tail;
int sigframe_size;
u32 psr;
/* 1. Make sure everything is clean */
synchronize_user_stack();
save_and_clear_fpu();
wsaved = get_thread_wsaved();
sigframe_size = sizeof(*sf);
if (current_thread_info()->fpsaved[0] & FPRS_FEF)
sigframe_size += sizeof(__siginfo_fpu_t);
if (wsaved)
sigframe_size += sizeof(__siginfo_rwin_t);
sf = (struct rt_signal_frame32 __user *)
get_sigframe(ksig, regs, sigframe_size);
if (invalid_frame_pointer(sf, sigframe_size)) {
if (show_unhandled_signals)
pr_info("%s[%d] bad frame in setup_rt_frame32: %08lx TPC %08lx O7 %08lx\n",
current->comm, current->pid, (unsigned long)sf,
regs->tpc, regs->u_regs[UREG_I7]);
force_sigsegv(ksig->sig);
return -EINVAL;
}
tail = (sf + 1);
/* 2. Save the current process state */
if (test_thread_flag(TIF_32BIT)) {
regs->tpc &= 0xffffffff;
regs->tnpc &= 0xffffffff;
}
err = put_user(regs->tpc, &sf->regs.pc);
err |= __put_user(regs->tnpc, &sf->regs.npc);
err |= __put_user(regs->y, &sf->regs.y);
psr = tstate_to_psr(regs->tstate);
if (current_thread_info()->fpsaved[0] & FPRS_FEF)
psr |= PSR_EF;
err |= __put_user(psr, &sf->regs.psr);
for (i = 0; i < 16; i++)
err |= __put_user(regs->u_regs[i], &sf->regs.u_regs[i]);
err |= __put_user(sizeof(siginfo_extra_v8plus_t), &sf->extra_size);
err |= __put_user(SIGINFO_EXTRA_V8PLUS_MAGIC, &sf->v8plus.g_upper[0]);
for (i = 1; i < 16; i++)
err |= __put_user(((u32 *)regs->u_regs)[2*i],
&sf->v8plus.g_upper[i]);
err |= __put_user((regs->tstate & TSTATE_ASI) >> 24UL,
&sf->v8plus.asi);
if (psr & PSR_EF) {
__siginfo_fpu_t __user *fp = tail;
tail += sizeof(*fp);
err |= save_fpu_state(regs, fp);
err |= __put_user((u64)fp, &sf->fpu_save);
} else {
err |= __put_user(0, &sf->fpu_save);
}
if (wsaved) {
__siginfo_rwin_t __user *rwp = tail;
tail += sizeof(*rwp);
err |= save_rwin_state(wsaved, rwp);
err |= __put_user((u64)rwp, &sf->rwin_save);
set_thread_wsaved(0);
} else {
err |= __put_user(0, &sf->rwin_save);
}
/* Update the siginfo structure. */
err |= copy_siginfo_to_user32(&sf->info, &ksig->info);
/* Setup sigaltstack */
err |= __compat_save_altstack(&sf->stack, regs->u_regs[UREG_FP]);
err |= put_compat_sigset(&sf->mask, oldset, sizeof(compat_sigset_t));
if (!wsaved) {
err |= raw_copy_in_user((u32 __user *)sf,
(u32 __user *)(regs->u_regs[UREG_FP]),
sizeof(struct reg_window32));
} else {
struct reg_window *rp;
rp = ¤t_thread_info()->reg_window[wsaved - 1];
for (i = 0; i < 8; i++)
err |= __put_user(rp->locals[i], &sf->ss.locals[i]);
for (i = 0; i < 6; i++)
err |= __put_user(rp->ins[i], &sf->ss.ins[i]);
err |= __put_user(rp->ins[6], &sf->ss.fp);
err |= __put_user(rp->ins[7], &sf->ss.callers_pc);
}
if (err)
return err;
/* 3. signal handler back-trampoline and parameters */
regs->u_regs[UREG_FP] = (unsigned long) sf;
regs->u_regs[UREG_I0] = ksig->sig;
regs->u_regs[UREG_I1] = (unsigned long) &sf->info;
regs->u_regs[UREG_I2] = (unsigned long) &sf->regs;
/* 4. signal handler */
regs->tpc = (unsigned long) ksig->ka.sa.sa_handler;
regs->tnpc = (regs->tpc + 4);
if (test_thread_flag(TIF_32BIT)) {
regs->tpc &= 0xffffffff;
regs->tnpc &= 0xffffffff;
}
/* 5. return to kernel instructions */
if (ksig->ka.ka_restorer)
regs->u_regs[UREG_I7] = (unsigned long)ksig->ka.ka_restorer;
else {
unsigned long address = ((unsigned long)&(sf->insns[0]));
regs->u_regs[UREG_I7] = (unsigned long) (&(sf->insns[0]) - 2);
/* mov __NR_rt_sigreturn, %g1 */
err |= __put_user(0x82102065, &sf->insns[0]);
/* t 0x10 */
err |= __put_user(0x91d02010, &sf->insns[1]);
if (err)
return err;
flush_signal_insns(address);
}
return 0;
}
static inline void handle_signal32(struct ksignal *ksig,
struct pt_regs *regs)
{
sigset_t *oldset = sigmask_to_save();
int err;
if (ksig->ka.sa.sa_flags & SA_SIGINFO)
err = setup_rt_frame32(ksig, regs, oldset);
else
err = setup_frame32(ksig, regs, oldset);
signal_setup_done(err, ksig, 0);
}
static inline void syscall_restart32(unsigned long orig_i0, struct pt_regs *regs,
struct sigaction *sa)
{
switch (regs->u_regs[UREG_I0]) {
case ERESTART_RESTARTBLOCK:
case ERESTARTNOHAND:
no_system_call_restart:
regs->u_regs[UREG_I0] = EINTR;
regs->tstate |= TSTATE_ICARRY;
break;
case ERESTARTSYS:
if (!(sa->sa_flags & SA_RESTART))
goto no_system_call_restart;
fallthrough;
case ERESTARTNOINTR:
regs->u_regs[UREG_I0] = orig_i0;
regs->tpc -= 4;
regs->tnpc -= 4;
}
}
/* Note that 'init' is a special process: it doesn't get signals it doesn't
* want to handle. Thus you cannot kill init even with a SIGKILL even by
* mistake.
*/
void do_signal32(struct pt_regs * regs)
{
struct ksignal ksig;
unsigned long orig_i0 = 0;
int restart_syscall = 0;
bool has_handler = get_signal(&ksig);
if (pt_regs_is_syscall(regs) &&
(regs->tstate & (TSTATE_XCARRY | TSTATE_ICARRY))) {
restart_syscall = 1;
orig_i0 = regs->u_regs[UREG_G6];
}
if (has_handler) {
if (restart_syscall)
syscall_restart32(orig_i0, regs, &ksig.ka.sa);
handle_signal32(&ksig, regs);
} else {
if (restart_syscall) {
switch (regs->u_regs[UREG_I0]) {
case ERESTARTNOHAND:
case ERESTARTSYS:
case ERESTARTNOINTR:
/* replay the system call when we are done */
regs->u_regs[UREG_I0] = orig_i0;
regs->tpc -= 4;
regs->tnpc -= 4;
pt_regs_clear_syscall(regs);
fallthrough;
case ERESTART_RESTARTBLOCK:
regs->u_regs[UREG_G1] = __NR_restart_syscall;
regs->tpc -= 4;
regs->tnpc -= 4;
pt_regs_clear_syscall(regs);
}
}
restore_saved_sigmask();
}
}
struct sigstack32 {
u32 the_stack;
int cur_status;
};
asmlinkage int do_sys32_sigstack(u32 u_ssptr, u32 u_ossptr, unsigned long sp)
{
struct sigstack32 __user *ssptr =
(struct sigstack32 __user *)((unsigned long)(u_ssptr));
struct sigstack32 __user *ossptr =
(struct sigstack32 __user *)((unsigned long)(u_ossptr));
int ret = -EFAULT;
/* First see if old state is wanted. */
if (ossptr) {
if (put_user(current->sas_ss_sp + current->sas_ss_size,
&ossptr->the_stack) ||
__put_user(on_sig_stack(sp), &ossptr->cur_status))
goto out;
}
/* Now see if we want to update the new state. */
if (ssptr) {
u32 ss_sp;
if (get_user(ss_sp, &ssptr->the_stack))
goto out;
/* If the current stack was set with sigaltstack, don't
* swap stacks while we are on it.
*/
ret = -EPERM;
if (current->sas_ss_sp && on_sig_stack(sp))
goto out;
/* Since we don't know the extent of the stack, and we don't
* track onstack-ness, but rather calculate it, we must
* presume a size. Ho hum this interface is lossy.
*/
current->sas_ss_sp = (unsigned long)ss_sp - SIGSTKSZ;
current->sas_ss_size = SIGSTKSZ;
}
ret = 0;
out:
return ret;
}
/*
* Compile-time assertions for siginfo_t offsets. Check NSIG* as well, as
* changes likely come with new fields that should be added below.
*/
static_assert(NSIGILL == 11);
static_assert(NSIGFPE == 15);
static_assert(NSIGSEGV == 10);
static_assert(NSIGBUS == 5);
static_assert(NSIGTRAP == 6);
static_assert(NSIGCHLD == 6);
static_assert(NSIGSYS == 2);
static_assert(sizeof(compat_siginfo_t) == 128);
static_assert(__alignof__(compat_siginfo_t) == 4);
static_assert(offsetof(compat_siginfo_t, si_signo) == 0x00);
static_assert(offsetof(compat_siginfo_t, si_errno) == 0x04);
static_assert(offsetof(compat_siginfo_t, si_code) == 0x08);
static_assert(offsetof(compat_siginfo_t, si_pid) == 0x0c);
static_assert(offsetof(compat_siginfo_t, si_uid) == 0x10);
static_assert(offsetof(compat_siginfo_t, si_tid) == 0x0c);
static_assert(offsetof(compat_siginfo_t, si_overrun) == 0x10);
static_assert(offsetof(compat_siginfo_t, si_status) == 0x14);
static_assert(offsetof(compat_siginfo_t, si_utime) == 0x18);
static_assert(offsetof(compat_siginfo_t, si_stime) == 0x1c);
static_assert(offsetof(compat_siginfo_t, si_value) == 0x14);
static_assert(offsetof(compat_siginfo_t, si_int) == 0x14);
static_assert(offsetof(compat_siginfo_t, si_ptr) == 0x14);
static_assert(offsetof(compat_siginfo_t, si_addr) == 0x0c);
static_assert(offsetof(compat_siginfo_t, si_trapno) == 0x10);
static_assert(offsetof(compat_siginfo_t, si_addr_lsb) == 0x10);
static_assert(offsetof(compat_siginfo_t, si_lower) == 0x14);
static_assert(offsetof(compat_siginfo_t, si_upper) == 0x18);
static_assert(offsetof(compat_siginfo_t, si_pkey) == 0x14);
static_assert(offsetof(compat_siginfo_t, si_perf_data) == 0x10);
static_assert(offsetof(compat_siginfo_t, si_perf_type) == 0x14);
static_assert(offsetof(compat_siginfo_t, si_perf_flags) == 0x18);
static_assert(offsetof(compat_siginfo_t, si_band) == 0x0c);
static_assert(offsetof(compat_siginfo_t, si_fd) == 0x10);
| linux-master | arch/sparc/kernel/signal32.c |
// SPDX-License-Identifier: GPL-2.0
/* cpumap.c: used for optimizing CPU assignment
*
* Copyright (C) 2009 Hong H. Pham <[email protected]>
*/
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/cpumask.h>
#include <linux/spinlock.h>
#include <asm/cpudata.h>
#include "cpumap.h"
enum {
CPUINFO_LVL_ROOT = 0,
CPUINFO_LVL_NODE,
CPUINFO_LVL_CORE,
CPUINFO_LVL_PROC,
CPUINFO_LVL_MAX,
};
enum {
ROVER_NO_OP = 0,
/* Increment rover every time level is visited */
ROVER_INC_ON_VISIT = 1 << 0,
/* Increment parent's rover every time rover wraps around */
ROVER_INC_PARENT_ON_LOOP = 1 << 1,
};
struct cpuinfo_node {
int id;
int level;
int num_cpus; /* Number of CPUs in this hierarchy */
int parent_index;
int child_start; /* Array index of the first child node */
int child_end; /* Array index of the last child node */
int rover; /* Child node iterator */
};
struct cpuinfo_level {
int start_index; /* Index of first node of a level in a cpuinfo tree */
int end_index; /* Index of last node of a level in a cpuinfo tree */
int num_nodes; /* Number of nodes in a level in a cpuinfo tree */
};
struct cpuinfo_tree {
int total_nodes;
/* Offsets into nodes[] for each level of the tree */
struct cpuinfo_level level[CPUINFO_LVL_MAX];
struct cpuinfo_node nodes[];
};
static struct cpuinfo_tree *cpuinfo_tree;
static u16 cpu_distribution_map[NR_CPUS];
static DEFINE_SPINLOCK(cpu_map_lock);
/* Niagara optimized cpuinfo tree traversal. */
static const int niagara_iterate_method[] = {
[CPUINFO_LVL_ROOT] = ROVER_NO_OP,
/* Strands (or virtual CPUs) within a core may not run concurrently
* on the Niagara, as instruction pipeline(s) are shared. Distribute
* work to strands in different cores first for better concurrency.
* Go to next NUMA node when all cores are used.
*/
[CPUINFO_LVL_NODE] = ROVER_INC_ON_VISIT|ROVER_INC_PARENT_ON_LOOP,
/* Strands are grouped together by proc_id in cpuinfo_sparc, i.e.
* a proc_id represents an instruction pipeline. Distribute work to
* strands in different proc_id groups if the core has multiple
* instruction pipelines (e.g. the Niagara 2/2+ has two).
*/
[CPUINFO_LVL_CORE] = ROVER_INC_ON_VISIT,
/* Pick the next strand in the proc_id group. */
[CPUINFO_LVL_PROC] = ROVER_INC_ON_VISIT,
};
/* Generic cpuinfo tree traversal. Distribute work round robin across NUMA
* nodes.
*/
static const int generic_iterate_method[] = {
[CPUINFO_LVL_ROOT] = ROVER_INC_ON_VISIT,
[CPUINFO_LVL_NODE] = ROVER_NO_OP,
[CPUINFO_LVL_CORE] = ROVER_INC_PARENT_ON_LOOP,
[CPUINFO_LVL_PROC] = ROVER_INC_ON_VISIT|ROVER_INC_PARENT_ON_LOOP,
};
static int cpuinfo_id(int cpu, int level)
{
int id;
switch (level) {
case CPUINFO_LVL_ROOT:
id = 0;
break;
case CPUINFO_LVL_NODE:
id = cpu_to_node(cpu);
break;
case CPUINFO_LVL_CORE:
id = cpu_data(cpu).core_id;
break;
case CPUINFO_LVL_PROC:
id = cpu_data(cpu).proc_id;
break;
default:
id = -EINVAL;
}
return id;
}
/*
* Enumerate the CPU information in __cpu_data to determine the start index,
* end index, and number of nodes for each level in the cpuinfo tree. The
* total number of cpuinfo nodes required to build the tree is returned.
*/
static int enumerate_cpuinfo_nodes(struct cpuinfo_level *tree_level)
{
int prev_id[CPUINFO_LVL_MAX];
int i, n, num_nodes;
for (i = CPUINFO_LVL_ROOT; i < CPUINFO_LVL_MAX; i++) {
struct cpuinfo_level *lv = &tree_level[i];
prev_id[i] = -1;
lv->start_index = lv->end_index = lv->num_nodes = 0;
}
num_nodes = 1; /* Include the root node */
for (i = 0; i < num_possible_cpus(); i++) {
if (!cpu_online(i))
continue;
n = cpuinfo_id(i, CPUINFO_LVL_NODE);
if (n > prev_id[CPUINFO_LVL_NODE]) {
tree_level[CPUINFO_LVL_NODE].num_nodes++;
prev_id[CPUINFO_LVL_NODE] = n;
num_nodes++;
}
n = cpuinfo_id(i, CPUINFO_LVL_CORE);
if (n > prev_id[CPUINFO_LVL_CORE]) {
tree_level[CPUINFO_LVL_CORE].num_nodes++;
prev_id[CPUINFO_LVL_CORE] = n;
num_nodes++;
}
n = cpuinfo_id(i, CPUINFO_LVL_PROC);
if (n > prev_id[CPUINFO_LVL_PROC]) {
tree_level[CPUINFO_LVL_PROC].num_nodes++;
prev_id[CPUINFO_LVL_PROC] = n;
num_nodes++;
}
}
tree_level[CPUINFO_LVL_ROOT].num_nodes = 1;
n = tree_level[CPUINFO_LVL_NODE].num_nodes;
tree_level[CPUINFO_LVL_NODE].start_index = 1;
tree_level[CPUINFO_LVL_NODE].end_index = n;
n++;
tree_level[CPUINFO_LVL_CORE].start_index = n;
n += tree_level[CPUINFO_LVL_CORE].num_nodes;
tree_level[CPUINFO_LVL_CORE].end_index = n - 1;
tree_level[CPUINFO_LVL_PROC].start_index = n;
n += tree_level[CPUINFO_LVL_PROC].num_nodes;
tree_level[CPUINFO_LVL_PROC].end_index = n - 1;
return num_nodes;
}
/* Build a tree representation of the CPU hierarchy using the per CPU
* information in __cpu_data. Entries in __cpu_data[0..NR_CPUS] are
* assumed to be sorted in ascending order based on node, core_id, and
* proc_id (in order of significance).
*/
static struct cpuinfo_tree *build_cpuinfo_tree(void)
{
struct cpuinfo_tree *new_tree;
struct cpuinfo_node *node;
struct cpuinfo_level tmp_level[CPUINFO_LVL_MAX];
int num_cpus[CPUINFO_LVL_MAX];
int level_rover[CPUINFO_LVL_MAX];
int prev_id[CPUINFO_LVL_MAX];
int n, id, cpu, prev_cpu, last_cpu, level;
n = enumerate_cpuinfo_nodes(tmp_level);
new_tree = kzalloc(struct_size(new_tree, nodes, n), GFP_ATOMIC);
if (!new_tree)
return NULL;
new_tree->total_nodes = n;
memcpy(&new_tree->level, tmp_level, sizeof(tmp_level));
prev_cpu = cpu = cpumask_first(cpu_online_mask);
/* Initialize all levels in the tree with the first CPU */
for (level = CPUINFO_LVL_PROC; level >= CPUINFO_LVL_ROOT; level--) {
n = new_tree->level[level].start_index;
level_rover[level] = n;
node = &new_tree->nodes[n];
id = cpuinfo_id(cpu, level);
if (unlikely(id < 0)) {
kfree(new_tree);
return NULL;
}
node->id = id;
node->level = level;
node->num_cpus = 1;
node->parent_index = (level > CPUINFO_LVL_ROOT)
? new_tree->level[level - 1].start_index : -1;
node->child_start = node->child_end = node->rover =
(level == CPUINFO_LVL_PROC)
? cpu : new_tree->level[level + 1].start_index;
prev_id[level] = node->id;
num_cpus[level] = 1;
}
for (last_cpu = (num_possible_cpus() - 1); last_cpu >= 0; last_cpu--) {
if (cpu_online(last_cpu))
break;
}
while (++cpu <= last_cpu) {
if (!cpu_online(cpu))
continue;
for (level = CPUINFO_LVL_PROC; level >= CPUINFO_LVL_ROOT;
level--) {
id = cpuinfo_id(cpu, level);
if (unlikely(id < 0)) {
kfree(new_tree);
return NULL;
}
if ((id != prev_id[level]) || (cpu == last_cpu)) {
prev_id[level] = id;
node = &new_tree->nodes[level_rover[level]];
node->num_cpus = num_cpus[level];
num_cpus[level] = 1;
if (cpu == last_cpu)
node->num_cpus++;
/* Connect tree node to parent */
if (level == CPUINFO_LVL_ROOT)
node->parent_index = -1;
else
node->parent_index =
level_rover[level - 1];
if (level == CPUINFO_LVL_PROC) {
node->child_end =
(cpu == last_cpu) ? cpu : prev_cpu;
} else {
node->child_end =
level_rover[level + 1] - 1;
}
/* Initialize the next node in the same level */
n = ++level_rover[level];
if (n <= new_tree->level[level].end_index) {
node = &new_tree->nodes[n];
node->id = id;
node->level = level;
/* Connect node to child */
node->child_start = node->child_end =
node->rover =
(level == CPUINFO_LVL_PROC)
? cpu : level_rover[level + 1];
}
} else
num_cpus[level]++;
}
prev_cpu = cpu;
}
return new_tree;
}
static void increment_rover(struct cpuinfo_tree *t, int node_index,
int root_index, const int *rover_inc_table)
{
struct cpuinfo_node *node = &t->nodes[node_index];
int top_level, level;
top_level = t->nodes[root_index].level;
for (level = node->level; level >= top_level; level--) {
node->rover++;
if (node->rover <= node->child_end)
return;
node->rover = node->child_start;
/* If parent's rover does not need to be adjusted, stop here. */
if ((level == top_level) ||
!(rover_inc_table[level] & ROVER_INC_PARENT_ON_LOOP))
return;
node = &t->nodes[node->parent_index];
}
}
static int iterate_cpu(struct cpuinfo_tree *t, unsigned int root_index)
{
const int *rover_inc_table;
int level, new_index, index = root_index;
switch (sun4v_chip_type) {
case SUN4V_CHIP_NIAGARA1:
case SUN4V_CHIP_NIAGARA2:
case SUN4V_CHIP_NIAGARA3:
case SUN4V_CHIP_NIAGARA4:
case SUN4V_CHIP_NIAGARA5:
case SUN4V_CHIP_SPARC_M6:
case SUN4V_CHIP_SPARC_M7:
case SUN4V_CHIP_SPARC_M8:
case SUN4V_CHIP_SPARC_SN:
case SUN4V_CHIP_SPARC64X:
rover_inc_table = niagara_iterate_method;
break;
default:
rover_inc_table = generic_iterate_method;
}
for (level = t->nodes[root_index].level; level < CPUINFO_LVL_MAX;
level++) {
new_index = t->nodes[index].rover;
if (rover_inc_table[level] & ROVER_INC_ON_VISIT)
increment_rover(t, index, root_index, rover_inc_table);
index = new_index;
}
return index;
}
static void _cpu_map_rebuild(void)
{
int i;
if (cpuinfo_tree) {
kfree(cpuinfo_tree);
cpuinfo_tree = NULL;
}
cpuinfo_tree = build_cpuinfo_tree();
if (!cpuinfo_tree)
return;
/* Build CPU distribution map that spans all online CPUs. No need
* to check if the CPU is online, as that is done when the cpuinfo
* tree is being built.
*/
for (i = 0; i < cpuinfo_tree->nodes[0].num_cpus; i++)
cpu_distribution_map[i] = iterate_cpu(cpuinfo_tree, 0);
}
/* Fallback if the cpuinfo tree could not be built. CPU mapping is linear
* round robin.
*/
static int simple_map_to_cpu(unsigned int index)
{
int i, end, cpu_rover;
cpu_rover = 0;
end = index % num_online_cpus();
for (i = 0; i < num_possible_cpus(); i++) {
if (cpu_online(cpu_rover)) {
if (cpu_rover >= end)
return cpu_rover;
cpu_rover++;
}
}
/* Impossible, since num_online_cpus() <= num_possible_cpus() */
return cpumask_first(cpu_online_mask);
}
static int _map_to_cpu(unsigned int index)
{
struct cpuinfo_node *root_node;
if (unlikely(!cpuinfo_tree)) {
_cpu_map_rebuild();
if (!cpuinfo_tree)
return simple_map_to_cpu(index);
}
root_node = &cpuinfo_tree->nodes[0];
#ifdef CONFIG_HOTPLUG_CPU
if (unlikely(root_node->num_cpus != num_online_cpus())) {
_cpu_map_rebuild();
if (!cpuinfo_tree)
return simple_map_to_cpu(index);
}
#endif
return cpu_distribution_map[index % root_node->num_cpus];
}
int map_to_cpu(unsigned int index)
{
int mapped_cpu;
unsigned long flag;
spin_lock_irqsave(&cpu_map_lock, flag);
mapped_cpu = _map_to_cpu(index);
#ifdef CONFIG_HOTPLUG_CPU
while (unlikely(!cpu_online(mapped_cpu)))
mapped_cpu = _map_to_cpu(index);
#endif
spin_unlock_irqrestore(&cpu_map_lock, flag);
return mapped_cpu;
}
EXPORT_SYMBOL(map_to_cpu);
void cpu_map_rebuild(void)
{
unsigned long flag;
spin_lock_irqsave(&cpu_map_lock, flag);
_cpu_map_rebuild();
spin_unlock_irqrestore(&cpu_map_lock, flag);
}
| linux-master | arch/sparc/kernel/cpumap.c |
// SPDX-License-Identifier: GPL-2.0-only
/* ptrace.c: Sparc process tracing support.
*
* Copyright (C) 1996, 2008 David S. Miller ([email protected])
* Copyright (C) 1997 Jakub Jelinek ([email protected])
*
* Based upon code written by Ross Biro, Linus Torvalds, Bob Manson,
* and David Mosberger.
*
* Added Linux support -miguel (weird, eh?, the original code was meant
* to emulate SunOS).
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
#include <linux/mm.h>
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/ptrace.h>
#include <linux/user.h>
#include <linux/smp.h>
#include <linux/security.h>
#include <linux/seccomp.h>
#include <linux/audit.h>
#include <linux/signal.h>
#include <linux/regset.h>
#include <trace/syscall.h>
#include <linux/compat.h>
#include <linux/elf.h>
#include <linux/context_tracking.h>
#include <asm/asi.h>
#include <linux/uaccess.h>
#include <asm/psrcompat.h>
#include <asm/visasm.h>
#include <asm/spitfire.h>
#include <asm/page.h>
#include <asm/cpudata.h>
#include <asm/cacheflush.h>
#define CREATE_TRACE_POINTS
#include <trace/events/syscalls.h>
#include "entry.h"
/* #define ALLOW_INIT_TRACING */
struct pt_regs_offset {
const char *name;
int offset;
};
#define REG_OFFSET_NAME(n, r) \
{.name = n, .offset = (PT_V9_##r)}
#define REG_OFFSET_END {.name = NULL, .offset = 0}
static const struct pt_regs_offset regoffset_table[] = {
REG_OFFSET_NAME("g0", G0),
REG_OFFSET_NAME("g1", G1),
REG_OFFSET_NAME("g2", G2),
REG_OFFSET_NAME("g3", G3),
REG_OFFSET_NAME("g4", G4),
REG_OFFSET_NAME("g5", G5),
REG_OFFSET_NAME("g6", G6),
REG_OFFSET_NAME("g7", G7),
REG_OFFSET_NAME("i0", I0),
REG_OFFSET_NAME("i1", I1),
REG_OFFSET_NAME("i2", I2),
REG_OFFSET_NAME("i3", I3),
REG_OFFSET_NAME("i4", I4),
REG_OFFSET_NAME("i5", I5),
REG_OFFSET_NAME("i6", I6),
REG_OFFSET_NAME("i7", I7),
REG_OFFSET_NAME("tstate", TSTATE),
REG_OFFSET_NAME("pc", TPC),
REG_OFFSET_NAME("npc", TNPC),
REG_OFFSET_NAME("y", Y),
REG_OFFSET_NAME("lr", I7),
REG_OFFSET_END,
};
/*
* Called by kernel/ptrace.c when detaching..
*
* Make sure single step bits etc are not set.
*/
void ptrace_disable(struct task_struct *child)
{
/* nothing to do */
}
/* To get the necessary page struct, access_process_vm() first calls
* get_user_pages(). This has done a flush_dcache_page() on the
* accessed page. Then our caller (copy_{to,from}_user_page()) did
* to memcpy to read/write the data from that page.
*
* Now, the only thing we have to do is:
* 1) flush the D-cache if it's possible than an illegal alias
* has been created
* 2) flush the I-cache if this is pre-cheetah and we did a write
*/
void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
unsigned long uaddr, void *kaddr,
unsigned long len, int write)
{
BUG_ON(len > PAGE_SIZE);
if (tlb_type == hypervisor)
return;
preempt_disable();
#ifdef DCACHE_ALIASING_POSSIBLE
/* If bit 13 of the kernel address we used to access the
* user page is the same as the virtual address that page
* is mapped to in the user's address space, we can skip the
* D-cache flush.
*/
if ((uaddr ^ (unsigned long) kaddr) & (1UL << 13)) {
unsigned long start = __pa(kaddr);
unsigned long end = start + len;
unsigned long dcache_line_size;
dcache_line_size = local_cpu_data().dcache_line_size;
if (tlb_type == spitfire) {
for (; start < end; start += dcache_line_size)
spitfire_put_dcache_tag(start & 0x3fe0, 0x0);
} else {
start &= ~(dcache_line_size - 1);
for (; start < end; start += dcache_line_size)
__asm__ __volatile__(
"stxa %%g0, [%0] %1\n\t"
"membar #Sync"
: /* no outputs */
: "r" (start),
"i" (ASI_DCACHE_INVALIDATE));
}
}
#endif
if (write && tlb_type == spitfire) {
unsigned long start = (unsigned long) kaddr;
unsigned long end = start + len;
unsigned long icache_line_size;
icache_line_size = local_cpu_data().icache_line_size;
for (; start < end; start += icache_line_size)
flushi(start);
}
preempt_enable();
}
EXPORT_SYMBOL_GPL(flush_ptrace_access);
static int get_from_target(struct task_struct *target, unsigned long uaddr,
void *kbuf, int len)
{
if (target == current) {
if (copy_from_user(kbuf, (void __user *) uaddr, len))
return -EFAULT;
} else {
int len2 = access_process_vm(target, uaddr, kbuf, len,
FOLL_FORCE);
if (len2 != len)
return -EFAULT;
}
return 0;
}
static int set_to_target(struct task_struct *target, unsigned long uaddr,
void *kbuf, int len)
{
if (target == current) {
if (copy_to_user((void __user *) uaddr, kbuf, len))
return -EFAULT;
} else {
int len2 = access_process_vm(target, uaddr, kbuf, len,
FOLL_FORCE | FOLL_WRITE);
if (len2 != len)
return -EFAULT;
}
return 0;
}
static int regwindow64_get(struct task_struct *target,
const struct pt_regs *regs,
struct reg_window *wbuf)
{
unsigned long rw_addr = regs->u_regs[UREG_I6];
if (!test_thread_64bit_stack(rw_addr)) {
struct reg_window32 win32;
int i;
if (get_from_target(target, rw_addr, &win32, sizeof(win32)))
return -EFAULT;
for (i = 0; i < 8; i++)
wbuf->locals[i] = win32.locals[i];
for (i = 0; i < 8; i++)
wbuf->ins[i] = win32.ins[i];
} else {
rw_addr += STACK_BIAS;
if (get_from_target(target, rw_addr, wbuf, sizeof(*wbuf)))
return -EFAULT;
}
return 0;
}
static int regwindow64_set(struct task_struct *target,
const struct pt_regs *regs,
struct reg_window *wbuf)
{
unsigned long rw_addr = regs->u_regs[UREG_I6];
if (!test_thread_64bit_stack(rw_addr)) {
struct reg_window32 win32;
int i;
for (i = 0; i < 8; i++)
win32.locals[i] = wbuf->locals[i];
for (i = 0; i < 8; i++)
win32.ins[i] = wbuf->ins[i];
if (set_to_target(target, rw_addr, &win32, sizeof(win32)))
return -EFAULT;
} else {
rw_addr += STACK_BIAS;
if (set_to_target(target, rw_addr, wbuf, sizeof(*wbuf)))
return -EFAULT;
}
return 0;
}
enum sparc_regset {
REGSET_GENERAL,
REGSET_FP,
};
static int genregs64_get(struct task_struct *target,
const struct user_regset *regset,
struct membuf to)
{
const struct pt_regs *regs = task_pt_regs(target);
struct reg_window window;
if (target == current)
flushw_user();
membuf_write(&to, regs->u_regs, 16 * sizeof(u64));
if (!to.left)
return 0;
if (regwindow64_get(target, regs, &window))
return -EFAULT;
membuf_write(&to, &window, 16 * sizeof(u64));
/* TSTATE, TPC, TNPC */
membuf_write(&to, ®s->tstate, 3 * sizeof(u64));
return membuf_store(&to, (u64)regs->y);
}
static int genregs64_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
struct pt_regs *regs = task_pt_regs(target);
int ret;
if (target == current)
flushw_user();
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
regs->u_regs,
0, 16 * sizeof(u64));
if (!ret && count && pos < (32 * sizeof(u64))) {
struct reg_window window;
if (regwindow64_get(target, regs, &window))
return -EFAULT;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&window,
16 * sizeof(u64),
32 * sizeof(u64));
if (!ret &&
regwindow64_set(target, regs, &window))
return -EFAULT;
}
if (!ret && count > 0) {
unsigned long tstate;
/* TSTATE */
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&tstate,
32 * sizeof(u64),
33 * sizeof(u64));
if (!ret) {
/* Only the condition codes and the "in syscall"
* state can be modified in the %tstate register.
*/
tstate &= (TSTATE_ICC | TSTATE_XCC | TSTATE_SYSCALL);
regs->tstate &= ~(TSTATE_ICC | TSTATE_XCC | TSTATE_SYSCALL);
regs->tstate |= tstate;
}
}
if (!ret) {
/* TPC, TNPC */
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
®s->tpc,
33 * sizeof(u64),
35 * sizeof(u64));
}
if (!ret) {
unsigned long y = regs->y;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&y,
35 * sizeof(u64),
36 * sizeof(u64));
if (!ret)
regs->y = y;
}
if (!ret)
user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
36 * sizeof(u64), -1);
return ret;
}
static int fpregs64_get(struct task_struct *target,
const struct user_regset *regset,
struct membuf to)
{
struct thread_info *t = task_thread_info(target);
unsigned long fprs;
if (target == current)
save_and_clear_fpu();
fprs = t->fpsaved[0];
if (fprs & FPRS_DL)
membuf_write(&to, t->fpregs, 16 * sizeof(u64));
else
membuf_zero(&to, 16 * sizeof(u64));
if (fprs & FPRS_DU)
membuf_write(&to, t->fpregs + 16, 16 * sizeof(u64));
else
membuf_zero(&to, 16 * sizeof(u64));
if (fprs & FPRS_FEF) {
membuf_store(&to, t->xfsr[0]);
membuf_store(&to, t->gsr[0]);
} else {
membuf_zero(&to, 2 * sizeof(u64));
}
return membuf_store(&to, fprs);
}
static int fpregs64_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
unsigned long *fpregs = task_thread_info(target)->fpregs;
unsigned long fprs;
int ret;
if (target == current)
save_and_clear_fpu();
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
fpregs,
0, 32 * sizeof(u64));
if (!ret)
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
task_thread_info(target)->xfsr,
32 * sizeof(u64),
33 * sizeof(u64));
if (!ret)
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
task_thread_info(target)->gsr,
33 * sizeof(u64),
34 * sizeof(u64));
fprs = task_thread_info(target)->fpsaved[0];
if (!ret && count > 0) {
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&fprs,
34 * sizeof(u64),
35 * sizeof(u64));
}
fprs |= (FPRS_FEF | FPRS_DL | FPRS_DU);
task_thread_info(target)->fpsaved[0] = fprs;
if (!ret)
user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
35 * sizeof(u64), -1);
return ret;
}
static const struct user_regset sparc64_regsets[] = {
/* Format is:
* G0 --> G7
* O0 --> O7
* L0 --> L7
* I0 --> I7
* TSTATE, TPC, TNPC, Y
*/
[REGSET_GENERAL] = {
.core_note_type = NT_PRSTATUS,
.n = 36,
.size = sizeof(u64), .align = sizeof(u64),
.regset_get = genregs64_get, .set = genregs64_set
},
/* Format is:
* F0 --> F63
* FSR
* GSR
* FPRS
*/
[REGSET_FP] = {
.core_note_type = NT_PRFPREG,
.n = 35,
.size = sizeof(u64), .align = sizeof(u64),
.regset_get = fpregs64_get, .set = fpregs64_set
},
};
static int getregs64_get(struct task_struct *target,
const struct user_regset *regset,
struct membuf to)
{
const struct pt_regs *regs = task_pt_regs(target);
if (target == current)
flushw_user();
membuf_write(&to, regs->u_regs + 1, 15 * sizeof(u64));
membuf_store(&to, (u64)0);
membuf_write(&to, ®s->tstate, 3 * sizeof(u64));
return membuf_store(&to, (u64)regs->y);
}
static int setregs64_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
struct pt_regs *regs = task_pt_regs(target);
unsigned long y = regs->y;
unsigned long tstate;
int ret;
if (target == current)
flushw_user();
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
regs->u_regs + 1,
0 * sizeof(u64),
15 * sizeof(u64));
if (ret)
return ret;
user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
15 * sizeof(u64), 16 * sizeof(u64));
/* TSTATE */
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&tstate,
16 * sizeof(u64),
17 * sizeof(u64));
if (ret)
return ret;
/* Only the condition codes and the "in syscall"
* state can be modified in the %tstate register.
*/
tstate &= (TSTATE_ICC | TSTATE_XCC | TSTATE_SYSCALL);
regs->tstate &= ~(TSTATE_ICC | TSTATE_XCC | TSTATE_SYSCALL);
regs->tstate |= tstate;
/* TPC, TNPC */
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
®s->tpc,
17 * sizeof(u64),
19 * sizeof(u64));
if (ret)
return ret;
/* Y */
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&y,
19 * sizeof(u64),
20 * sizeof(u64));
if (!ret)
regs->y = y;
return ret;
}
static const struct user_regset ptrace64_regsets[] = {
/* Format is:
* G1 --> G7
* O0 --> O7
* 0
* TSTATE, TPC, TNPC, Y
*/
[REGSET_GENERAL] = {
.n = 20, .size = sizeof(u64),
.regset_get = getregs64_get, .set = setregs64_set,
},
};
static const struct user_regset_view ptrace64_view = {
.regsets = ptrace64_regsets, .n = ARRAY_SIZE(ptrace64_regsets)
};
static const struct user_regset_view user_sparc64_view = {
.name = "sparc64", .e_machine = EM_SPARCV9,
.regsets = sparc64_regsets, .n = ARRAY_SIZE(sparc64_regsets)
};
#ifdef CONFIG_COMPAT
static int genregs32_get(struct task_struct *target,
const struct user_regset *regset,
struct membuf to)
{
const struct pt_regs *regs = task_pt_regs(target);
u32 uregs[16];
int i;
if (target == current)
flushw_user();
for (i = 0; i < 16; i++)
membuf_store(&to, (u32)regs->u_regs[i]);
if (!to.left)
return 0;
if (get_from_target(target, regs->u_regs[UREG_I6],
uregs, sizeof(uregs)))
return -EFAULT;
membuf_write(&to, uregs, 16 * sizeof(u32));
membuf_store(&to, (u32)tstate_to_psr(regs->tstate));
membuf_store(&to, (u32)(regs->tpc));
membuf_store(&to, (u32)(regs->tnpc));
membuf_store(&to, (u32)(regs->y));
return membuf_zero(&to, 2 * sizeof(u32));
}
static int genregs32_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
struct pt_regs *regs = task_pt_regs(target);
compat_ulong_t __user *reg_window;
const compat_ulong_t *k = kbuf;
const compat_ulong_t __user *u = ubuf;
compat_ulong_t reg;
if (target == current)
flushw_user();
pos /= sizeof(reg);
count /= sizeof(reg);
if (kbuf) {
for (; count > 0 && pos < 16; count--)
regs->u_regs[pos++] = *k++;
reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6];
reg_window -= 16;
if (target == current) {
for (; count > 0 && pos < 32; count--) {
if (put_user(*k++, ®_window[pos++]))
return -EFAULT;
}
} else {
for (; count > 0 && pos < 32; count--) {
if (access_process_vm(target,
(unsigned long)
®_window[pos],
(void *) k,
sizeof(*k),
FOLL_FORCE | FOLL_WRITE)
!= sizeof(*k))
return -EFAULT;
k++;
pos++;
}
}
} else {
for (; count > 0 && pos < 16; count--) {
if (get_user(reg, u++))
return -EFAULT;
regs->u_regs[pos++] = reg;
}
reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6];
reg_window -= 16;
if (target == current) {
for (; count > 0 && pos < 32; count--) {
if (get_user(reg, u++) ||
put_user(reg, ®_window[pos++]))
return -EFAULT;
}
} else {
for (; count > 0 && pos < 32; count--) {
if (get_user(reg, u++))
return -EFAULT;
if (access_process_vm(target,
(unsigned long)
®_window[pos],
®, sizeof(reg),
FOLL_FORCE | FOLL_WRITE)
!= sizeof(reg))
return -EFAULT;
pos++;
u++;
}
}
}
while (count > 0) {
unsigned long tstate;
if (kbuf)
reg = *k++;
else if (get_user(reg, u++))
return -EFAULT;
switch (pos) {
case 32: /* PSR */
tstate = regs->tstate;
tstate &= ~(TSTATE_ICC | TSTATE_XCC | TSTATE_SYSCALL);
tstate |= psr_to_tstate_icc(reg);
if (reg & PSR_SYSCALL)
tstate |= TSTATE_SYSCALL;
regs->tstate = tstate;
break;
case 33: /* PC */
regs->tpc = reg;
break;
case 34: /* NPC */
regs->tnpc = reg;
break;
case 35: /* Y */
regs->y = reg;
break;
case 36: /* WIM */
case 37: /* TBR */
break;
default:
goto finish;
}
pos++;
count--;
}
finish:
pos *= sizeof(reg);
count *= sizeof(reg);
user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
38 * sizeof(reg), -1);
return 0;
}
static int fpregs32_get(struct task_struct *target,
const struct user_regset *regset,
struct membuf to)
{
struct thread_info *t = task_thread_info(target);
bool enabled;
if (target == current)
save_and_clear_fpu();
enabled = t->fpsaved[0] & FPRS_FEF;
membuf_write(&to, t->fpregs, 32 * sizeof(u32));
membuf_zero(&to, sizeof(u32));
if (enabled)
membuf_store(&to, (u32)t->xfsr[0]);
else
membuf_zero(&to, sizeof(u32));
membuf_store(&to, (u32)((enabled << 8) | (8 << 16)));
return membuf_zero(&to, 64 * sizeof(u32));
}
static int fpregs32_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
unsigned long *fpregs = task_thread_info(target)->fpregs;
unsigned long fprs;
int ret;
if (target == current)
save_and_clear_fpu();
fprs = task_thread_info(target)->fpsaved[0];
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
fpregs,
0, 32 * sizeof(u32));
if (!ret)
user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
32 * sizeof(u32),
33 * sizeof(u32));
if (!ret && count > 0) {
compat_ulong_t fsr;
unsigned long val;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&fsr,
33 * sizeof(u32),
34 * sizeof(u32));
if (!ret) {
val = task_thread_info(target)->xfsr[0];
val &= 0xffffffff00000000UL;
val |= fsr;
task_thread_info(target)->xfsr[0] = val;
}
}
fprs |= (FPRS_FEF | FPRS_DL);
task_thread_info(target)->fpsaved[0] = fprs;
if (!ret)
user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
34 * sizeof(u32), -1);
return ret;
}
static const struct user_regset sparc32_regsets[] = {
/* Format is:
* G0 --> G7
* O0 --> O7
* L0 --> L7
* I0 --> I7
* PSR, PC, nPC, Y, WIM, TBR
*/
[REGSET_GENERAL] = {
.core_note_type = NT_PRSTATUS,
.n = 38,
.size = sizeof(u32), .align = sizeof(u32),
.regset_get = genregs32_get, .set = genregs32_set
},
/* Format is:
* F0 --> F31
* empty 32-bit word
* FSR (32--bit word)
* FPU QUEUE COUNT (8-bit char)
* FPU QUEUE ENTRYSIZE (8-bit char)
* FPU ENABLED (8-bit char)
* empty 8-bit char
* FPU QUEUE (64 32-bit ints)
*/
[REGSET_FP] = {
.core_note_type = NT_PRFPREG,
.n = 99,
.size = sizeof(u32), .align = sizeof(u32),
.regset_get = fpregs32_get, .set = fpregs32_set
},
};
static int getregs_get(struct task_struct *target,
const struct user_regset *regset,
struct membuf to)
{
const struct pt_regs *regs = task_pt_regs(target);
int i;
if (target == current)
flushw_user();
membuf_store(&to, (u32)tstate_to_psr(regs->tstate));
membuf_store(&to, (u32)(regs->tpc));
membuf_store(&to, (u32)(regs->tnpc));
membuf_store(&to, (u32)(regs->y));
for (i = 1; i < 16; i++)
membuf_store(&to, (u32)regs->u_regs[i]);
return to.left;
}
static int setregs_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
struct pt_regs *regs = task_pt_regs(target);
unsigned long tstate;
u32 uregs[19];
int i, ret;
if (target == current)
flushw_user();
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
uregs,
0, 19 * sizeof(u32));
if (ret)
return ret;
tstate = regs->tstate;
tstate &= ~(TSTATE_ICC | TSTATE_XCC | TSTATE_SYSCALL);
tstate |= psr_to_tstate_icc(uregs[0]);
if (uregs[0] & PSR_SYSCALL)
tstate |= TSTATE_SYSCALL;
regs->tstate = tstate;
regs->tpc = uregs[1];
regs->tnpc = uregs[2];
regs->y = uregs[3];
for (i = 1; i < 15; i++)
regs->u_regs[i] = uregs[3 + i];
return 0;
}
static int getfpregs_get(struct task_struct *target,
const struct user_regset *regset,
struct membuf to)
{
struct thread_info *t = task_thread_info(target);
if (target == current)
save_and_clear_fpu();
membuf_write(&to, t->fpregs, 32 * sizeof(u32));
if (t->fpsaved[0] & FPRS_FEF)
membuf_store(&to, (u32)t->xfsr[0]);
else
membuf_zero(&to, sizeof(u32));
return membuf_zero(&to, 35 * sizeof(u32));
}
static int setfpregs_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
unsigned long *fpregs = task_thread_info(target)->fpregs;
unsigned long fprs;
int ret;
if (target == current)
save_and_clear_fpu();
fprs = task_thread_info(target)->fpsaved[0];
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
fpregs,
0, 32 * sizeof(u32));
if (!ret) {
compat_ulong_t fsr;
unsigned long val;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&fsr,
32 * sizeof(u32),
33 * sizeof(u32));
if (!ret) {
val = task_thread_info(target)->xfsr[0];
val &= 0xffffffff00000000UL;
val |= fsr;
task_thread_info(target)->xfsr[0] = val;
}
}
fprs |= (FPRS_FEF | FPRS_DL);
task_thread_info(target)->fpsaved[0] = fprs;
return ret;
}
static const struct user_regset ptrace32_regsets[] = {
[REGSET_GENERAL] = {
.n = 19, .size = sizeof(u32),
.regset_get = getregs_get, .set = setregs_set,
},
[REGSET_FP] = {
.n = 68, .size = sizeof(u32),
.regset_get = getfpregs_get, .set = setfpregs_set,
},
};
static const struct user_regset_view ptrace32_view = {
.regsets = ptrace32_regsets, .n = ARRAY_SIZE(ptrace32_regsets)
};
static const struct user_regset_view user_sparc32_view = {
.name = "sparc", .e_machine = EM_SPARC,
.regsets = sparc32_regsets, .n = ARRAY_SIZE(sparc32_regsets)
};
#endif /* CONFIG_COMPAT */
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
{
#ifdef CONFIG_COMPAT
if (test_tsk_thread_flag(task, TIF_32BIT))
return &user_sparc32_view;
#endif
return &user_sparc64_view;
}
#ifdef CONFIG_COMPAT
struct compat_fps {
unsigned int regs[32];
unsigned int fsr;
unsigned int flags;
unsigned int extra;
unsigned int fpqd;
struct compat_fq {
unsigned int insnaddr;
unsigned int insn;
} fpq[16];
};
long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
compat_ulong_t caddr, compat_ulong_t cdata)
{
compat_ulong_t caddr2 = task_pt_regs(current)->u_regs[UREG_I4];
struct pt_regs32 __user *pregs;
struct compat_fps __user *fps;
unsigned long addr2 = caddr2;
unsigned long addr = caddr;
unsigned long data = cdata;
int ret;
pregs = (struct pt_regs32 __user *) addr;
fps = (struct compat_fps __user *) addr;
switch (request) {
case PTRACE_PEEKUSR:
ret = (addr != 0) ? -EIO : 0;
break;
case PTRACE_GETREGS:
ret = copy_regset_to_user(child, &ptrace32_view,
REGSET_GENERAL, 0,
19 * sizeof(u32),
pregs);
break;
case PTRACE_SETREGS:
ret = copy_regset_from_user(child, &ptrace32_view,
REGSET_GENERAL, 0,
19 * sizeof(u32),
pregs);
break;
case PTRACE_GETFPREGS:
ret = copy_regset_to_user(child, &ptrace32_view,
REGSET_FP, 0,
68 * sizeof(u32),
fps);
break;
case PTRACE_SETFPREGS:
ret = copy_regset_from_user(child, &ptrace32_view,
REGSET_FP, 0,
33 * sizeof(u32),
fps);
break;
case PTRACE_READTEXT:
case PTRACE_READDATA:
ret = ptrace_readdata(child, addr,
(char __user *)addr2, data);
if (ret == data)
ret = 0;
else if (ret >= 0)
ret = -EIO;
break;
case PTRACE_WRITETEXT:
case PTRACE_WRITEDATA:
ret = ptrace_writedata(child, (char __user *) addr2,
addr, data);
if (ret == data)
ret = 0;
else if (ret >= 0)
ret = -EIO;
break;
default:
if (request == PTRACE_SPARC_DETACH)
request = PTRACE_DETACH;
ret = compat_ptrace_request(child, request, addr, data);
break;
}
return ret;
}
#endif /* CONFIG_COMPAT */
struct fps {
unsigned int regs[64];
unsigned long fsr;
};
long arch_ptrace(struct task_struct *child, long request,
unsigned long addr, unsigned long data)
{
const struct user_regset_view *view = task_user_regset_view(current);
unsigned long addr2 = task_pt_regs(current)->u_regs[UREG_I4];
struct pt_regs __user *pregs;
struct fps __user *fps;
void __user *addr2p;
int ret;
pregs = (struct pt_regs __user *) addr;
fps = (struct fps __user *) addr;
addr2p = (void __user *) addr2;
switch (request) {
case PTRACE_PEEKUSR:
ret = (addr != 0) ? -EIO : 0;
break;
case PTRACE_GETREGS64:
ret = copy_regset_to_user(child, &ptrace64_view,
REGSET_GENERAL, 0,
19 * sizeof(u64),
pregs);
break;
case PTRACE_SETREGS64:
ret = copy_regset_from_user(child, &ptrace64_view,
REGSET_GENERAL, 0,
19 * sizeof(u64),
pregs);
break;
case PTRACE_GETFPREGS64:
ret = copy_regset_to_user(child, view, REGSET_FP,
0 * sizeof(u64),
33 * sizeof(u64),
fps);
break;
case PTRACE_SETFPREGS64:
ret = copy_regset_from_user(child, view, REGSET_FP,
0 * sizeof(u64),
33 * sizeof(u64),
fps);
break;
case PTRACE_READTEXT:
case PTRACE_READDATA:
ret = ptrace_readdata(child, addr, addr2p, data);
if (ret == data)
ret = 0;
else if (ret >= 0)
ret = -EIO;
break;
case PTRACE_WRITETEXT:
case PTRACE_WRITEDATA:
ret = ptrace_writedata(child, addr2p, addr, data);
if (ret == data)
ret = 0;
else if (ret >= 0)
ret = -EIO;
break;
default:
if (request == PTRACE_SPARC_DETACH)
request = PTRACE_DETACH;
ret = ptrace_request(child, request, addr, data);
break;
}
return ret;
}
asmlinkage int syscall_trace_enter(struct pt_regs *regs)
{
int ret = 0;
/* do the secure computing check first */
secure_computing_strict(regs->u_regs[UREG_G1]);
if (test_thread_flag(TIF_NOHZ))
user_exit();
if (test_thread_flag(TIF_SYSCALL_TRACE))
ret = ptrace_report_syscall_entry(regs);
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_enter(regs, regs->u_regs[UREG_G1]);
audit_syscall_entry(regs->u_regs[UREG_G1], regs->u_regs[UREG_I0],
regs->u_regs[UREG_I1], regs->u_regs[UREG_I2],
regs->u_regs[UREG_I3]);
return ret;
}
asmlinkage void syscall_trace_leave(struct pt_regs *regs)
{
if (test_thread_flag(TIF_NOHZ))
user_exit();
audit_syscall_exit(regs);
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_exit(regs, regs->u_regs[UREG_I0]);
if (test_thread_flag(TIF_SYSCALL_TRACE))
ptrace_report_syscall_exit(regs, 0);
if (test_thread_flag(TIF_NOHZ))
user_enter();
}
/**
* regs_query_register_offset() - query register offset from its name
* @name: the name of a register
*
* regs_query_register_offset() returns the offset of a register in struct
* pt_regs from its name. If the name is invalid, this returns -EINVAL;
*/
int regs_query_register_offset(const char *name)
{
const struct pt_regs_offset *roff;
for (roff = regoffset_table; roff->name != NULL; roff++)
if (!strcmp(roff->name, name))
return roff->offset;
return -EINVAL;
}
/**
* regs_within_kernel_stack() - check the address in the stack
* @regs: pt_regs which contains kernel stack pointer.
* @addr: address which is checked.
*
* regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
* If @addr is within the kernel stack, it returns true. If not, returns false.
*/
static inline int regs_within_kernel_stack(struct pt_regs *regs,
unsigned long addr)
{
unsigned long ksp = kernel_stack_pointer(regs) + STACK_BIAS;
return ((addr & ~(THREAD_SIZE - 1)) ==
(ksp & ~(THREAD_SIZE - 1)));
}
/**
* regs_get_kernel_stack_nth() - get Nth entry of the stack
* @regs: pt_regs which contains kernel stack pointer.
* @n: stack entry number.
*
* regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
* is specified by @regs. If the @n th entry is NOT in the kernel stack,
* this returns 0.
*/
unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
{
unsigned long ksp = kernel_stack_pointer(regs) + STACK_BIAS;
unsigned long *addr = (unsigned long *)ksp;
addr += n;
if (regs_within_kernel_stack(regs, (unsigned long)addr))
return *addr;
else
return 0;
}
| linux-master | arch/sparc/kernel/ptrace_64.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Bit sliced AES using NEON instructions
*
* Copyright (C) 2016 - 2017 Linaro Ltd <[email protected]>
*/
#include <asm/neon.h>
#include <asm/simd.h>
#include <crypto/aes.h>
#include <crypto/ctr.h>
#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
#include <crypto/xts.h>
#include <linux/module.h>
MODULE_AUTHOR("Ard Biesheuvel <[email protected]>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS_CRYPTO("ecb(aes)");
MODULE_ALIAS_CRYPTO("cbc(aes)");
MODULE_ALIAS_CRYPTO("ctr(aes)");
MODULE_ALIAS_CRYPTO("xts(aes)");
asmlinkage void aesbs_convert_key(u8 out[], u32 const rk[], int rounds);
asmlinkage void aesbs_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[],
int rounds, int blocks);
asmlinkage void aesbs_ecb_decrypt(u8 out[], u8 const in[], u8 const rk[],
int rounds, int blocks);
asmlinkage void aesbs_cbc_decrypt(u8 out[], u8 const in[], u8 const rk[],
int rounds, int blocks, u8 iv[]);
asmlinkage void aesbs_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[],
int rounds, int blocks, u8 iv[]);
asmlinkage void aesbs_xts_encrypt(u8 out[], u8 const in[], u8 const rk[],
int rounds, int blocks, u8 iv[]);
asmlinkage void aesbs_xts_decrypt(u8 out[], u8 const in[], u8 const rk[],
int rounds, int blocks, u8 iv[]);
/* borrowed from aes-neon-blk.ko */
asmlinkage void neon_aes_ecb_encrypt(u8 out[], u8 const in[], u32 const rk[],
int rounds, int blocks);
asmlinkage void neon_aes_cbc_encrypt(u8 out[], u8 const in[], u32 const rk[],
int rounds, int blocks, u8 iv[]);
asmlinkage void neon_aes_ctr_encrypt(u8 out[], u8 const in[], u32 const rk[],
int rounds, int bytes, u8 ctr[]);
asmlinkage void neon_aes_xts_encrypt(u8 out[], u8 const in[],
u32 const rk1[], int rounds, int bytes,
u32 const rk2[], u8 iv[], int first);
asmlinkage void neon_aes_xts_decrypt(u8 out[], u8 const in[],
u32 const rk1[], int rounds, int bytes,
u32 const rk2[], u8 iv[], int first);
struct aesbs_ctx {
u8 rk[13 * (8 * AES_BLOCK_SIZE) + 32];
int rounds;
} __aligned(AES_BLOCK_SIZE);
struct aesbs_cbc_ctr_ctx {
struct aesbs_ctx key;
u32 enc[AES_MAX_KEYLENGTH_U32];
};
struct aesbs_xts_ctx {
struct aesbs_ctx key;
u32 twkey[AES_MAX_KEYLENGTH_U32];
struct crypto_aes_ctx cts;
};
static int aesbs_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int key_len)
{
struct aesbs_ctx *ctx = crypto_skcipher_ctx(tfm);
struct crypto_aes_ctx rk;
int err;
err = aes_expandkey(&rk, in_key, key_len);
if (err)
return err;
ctx->rounds = 6 + key_len / 4;
kernel_neon_begin();
aesbs_convert_key(ctx->rk, rk.key_enc, ctx->rounds);
kernel_neon_end();
return 0;
}
static int __ecb_crypt(struct skcipher_request *req,
void (*fn)(u8 out[], u8 const in[], u8 const rk[],
int rounds, int blocks))
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct aesbs_ctx *ctx = crypto_skcipher_ctx(tfm);
struct skcipher_walk walk;
int err;
err = skcipher_walk_virt(&walk, req, false);
while (walk.nbytes >= AES_BLOCK_SIZE) {
unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE;
if (walk.nbytes < walk.total)
blocks = round_down(blocks,
walk.stride / AES_BLOCK_SIZE);
kernel_neon_begin();
fn(walk.dst.virt.addr, walk.src.virt.addr, ctx->rk,
ctx->rounds, blocks);
kernel_neon_end();
err = skcipher_walk_done(&walk,
walk.nbytes - blocks * AES_BLOCK_SIZE);
}
return err;
}
static int ecb_encrypt(struct skcipher_request *req)
{
return __ecb_crypt(req, aesbs_ecb_encrypt);
}
static int ecb_decrypt(struct skcipher_request *req)
{
return __ecb_crypt(req, aesbs_ecb_decrypt);
}
static int aesbs_cbc_ctr_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int key_len)
{
struct aesbs_cbc_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
struct crypto_aes_ctx rk;
int err;
err = aes_expandkey(&rk, in_key, key_len);
if (err)
return err;
ctx->key.rounds = 6 + key_len / 4;
memcpy(ctx->enc, rk.key_enc, sizeof(ctx->enc));
kernel_neon_begin();
aesbs_convert_key(ctx->key.rk, rk.key_enc, ctx->key.rounds);
kernel_neon_end();
memzero_explicit(&rk, sizeof(rk));
return 0;
}
static int cbc_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct aesbs_cbc_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
struct skcipher_walk walk;
int err;
err = skcipher_walk_virt(&walk, req, false);
while (walk.nbytes >= AES_BLOCK_SIZE) {
unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE;
/* fall back to the non-bitsliced NEON implementation */
kernel_neon_begin();
neon_aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
ctx->enc, ctx->key.rounds, blocks,
walk.iv);
kernel_neon_end();
err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
}
return err;
}
static int cbc_decrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct aesbs_cbc_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
struct skcipher_walk walk;
int err;
err = skcipher_walk_virt(&walk, req, false);
while (walk.nbytes >= AES_BLOCK_SIZE) {
unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE;
if (walk.nbytes < walk.total)
blocks = round_down(blocks,
walk.stride / AES_BLOCK_SIZE);
kernel_neon_begin();
aesbs_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
ctx->key.rk, ctx->key.rounds, blocks,
walk.iv);
kernel_neon_end();
err = skcipher_walk_done(&walk,
walk.nbytes - blocks * AES_BLOCK_SIZE);
}
return err;
}
static int ctr_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct aesbs_cbc_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
struct skcipher_walk walk;
int err;
err = skcipher_walk_virt(&walk, req, false);
while (walk.nbytes > 0) {
int blocks = (walk.nbytes / AES_BLOCK_SIZE) & ~7;
int nbytes = walk.nbytes % (8 * AES_BLOCK_SIZE);
const u8 *src = walk.src.virt.addr;
u8 *dst = walk.dst.virt.addr;
kernel_neon_begin();
if (blocks >= 8) {
aesbs_ctr_encrypt(dst, src, ctx->key.rk, ctx->key.rounds,
blocks, walk.iv);
dst += blocks * AES_BLOCK_SIZE;
src += blocks * AES_BLOCK_SIZE;
}
if (nbytes && walk.nbytes == walk.total) {
neon_aes_ctr_encrypt(dst, src, ctx->enc, ctx->key.rounds,
nbytes, walk.iv);
nbytes = 0;
}
kernel_neon_end();
err = skcipher_walk_done(&walk, nbytes);
}
return err;
}
static int aesbs_xts_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int key_len)
{
struct aesbs_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
struct crypto_aes_ctx rk;
int err;
err = xts_verify_key(tfm, in_key, key_len);
if (err)
return err;
key_len /= 2;
err = aes_expandkey(&ctx->cts, in_key, key_len);
if (err)
return err;
err = aes_expandkey(&rk, in_key + key_len, key_len);
if (err)
return err;
memcpy(ctx->twkey, rk.key_enc, sizeof(ctx->twkey));
return aesbs_setkey(tfm, in_key, key_len);
}
static int __xts_crypt(struct skcipher_request *req, bool encrypt,
void (*fn)(u8 out[], u8 const in[], u8 const rk[],
int rounds, int blocks, u8 iv[]))
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct aesbs_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
int tail = req->cryptlen % (8 * AES_BLOCK_SIZE);
struct scatterlist sg_src[2], sg_dst[2];
struct skcipher_request subreq;
struct scatterlist *src, *dst;
struct skcipher_walk walk;
int nbytes, err;
int first = 1;
u8 *out, *in;
if (req->cryptlen < AES_BLOCK_SIZE)
return -EINVAL;
/* ensure that the cts tail is covered by a single step */
if (unlikely(tail > 0 && tail < AES_BLOCK_SIZE)) {
int xts_blocks = DIV_ROUND_UP(req->cryptlen,
AES_BLOCK_SIZE) - 2;
skcipher_request_set_tfm(&subreq, tfm);
skcipher_request_set_callback(&subreq,
skcipher_request_flags(req),
NULL, NULL);
skcipher_request_set_crypt(&subreq, req->src, req->dst,
xts_blocks * AES_BLOCK_SIZE,
req->iv);
req = &subreq;
} else {
tail = 0;
}
err = skcipher_walk_virt(&walk, req, false);
if (err)
return err;
while (walk.nbytes >= AES_BLOCK_SIZE) {
int blocks = (walk.nbytes / AES_BLOCK_SIZE) & ~7;
out = walk.dst.virt.addr;
in = walk.src.virt.addr;
nbytes = walk.nbytes;
kernel_neon_begin();
if (blocks >= 8) {
if (first == 1)
neon_aes_ecb_encrypt(walk.iv, walk.iv,
ctx->twkey,
ctx->key.rounds, 1);
first = 2;
fn(out, in, ctx->key.rk, ctx->key.rounds, blocks,
walk.iv);
out += blocks * AES_BLOCK_SIZE;
in += blocks * AES_BLOCK_SIZE;
nbytes -= blocks * AES_BLOCK_SIZE;
}
if (walk.nbytes == walk.total && nbytes > 0) {
if (encrypt)
neon_aes_xts_encrypt(out, in, ctx->cts.key_enc,
ctx->key.rounds, nbytes,
ctx->twkey, walk.iv, first);
else
neon_aes_xts_decrypt(out, in, ctx->cts.key_dec,
ctx->key.rounds, nbytes,
ctx->twkey, walk.iv, first);
nbytes = first = 0;
}
kernel_neon_end();
err = skcipher_walk_done(&walk, nbytes);
}
if (err || likely(!tail))
return err;
/* handle ciphertext stealing */
dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen);
if (req->dst != req->src)
dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen);
skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail,
req->iv);
err = skcipher_walk_virt(&walk, req, false);
if (err)
return err;
out = walk.dst.virt.addr;
in = walk.src.virt.addr;
nbytes = walk.nbytes;
kernel_neon_begin();
if (encrypt)
neon_aes_xts_encrypt(out, in, ctx->cts.key_enc, ctx->key.rounds,
nbytes, ctx->twkey, walk.iv, first);
else
neon_aes_xts_decrypt(out, in, ctx->cts.key_dec, ctx->key.rounds,
nbytes, ctx->twkey, walk.iv, first);
kernel_neon_end();
return skcipher_walk_done(&walk, 0);
}
static int xts_encrypt(struct skcipher_request *req)
{
return __xts_crypt(req, true, aesbs_xts_encrypt);
}
static int xts_decrypt(struct skcipher_request *req)
{
return __xts_crypt(req, false, aesbs_xts_decrypt);
}
static struct skcipher_alg aes_algs[] = { {
.base.cra_name = "ecb(aes)",
.base.cra_driver_name = "ecb-aes-neonbs",
.base.cra_priority = 250,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct aesbs_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.walksize = 8 * AES_BLOCK_SIZE,
.setkey = aesbs_setkey,
.encrypt = ecb_encrypt,
.decrypt = ecb_decrypt,
}, {
.base.cra_name = "cbc(aes)",
.base.cra_driver_name = "cbc-aes-neonbs",
.base.cra_priority = 250,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct aesbs_cbc_ctr_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.walksize = 8 * AES_BLOCK_SIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = aesbs_cbc_ctr_setkey,
.encrypt = cbc_encrypt,
.decrypt = cbc_decrypt,
}, {
.base.cra_name = "ctr(aes)",
.base.cra_driver_name = "ctr-aes-neonbs",
.base.cra_priority = 250,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct aesbs_cbc_ctr_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.chunksize = AES_BLOCK_SIZE,
.walksize = 8 * AES_BLOCK_SIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = aesbs_cbc_ctr_setkey,
.encrypt = ctr_encrypt,
.decrypt = ctr_encrypt,
}, {
.base.cra_name = "xts(aes)",
.base.cra_driver_name = "xts-aes-neonbs",
.base.cra_priority = 250,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct aesbs_xts_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = 2 * AES_MIN_KEY_SIZE,
.max_keysize = 2 * AES_MAX_KEY_SIZE,
.walksize = 8 * AES_BLOCK_SIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = aesbs_xts_setkey,
.encrypt = xts_encrypt,
.decrypt = xts_decrypt,
} };
static void aes_exit(void)
{
crypto_unregister_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
}
static int __init aes_init(void)
{
if (!cpu_have_named_feature(ASIMD))
return -ENODEV;
return crypto_register_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
}
module_init(aes_init);
module_exit(aes_exit);
| linux-master | arch/arm64/crypto/aes-neonbs-glue.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Glue code for POLYVAL using ARMv8 Crypto Extensions
*
* Copyright (c) 2007 Nokia Siemens Networks - Mikko Herranen <[email protected]>
* Copyright (c) 2009 Intel Corp.
* Author: Huang Ying <[email protected]>
* Copyright 2021 Google LLC
*/
/*
* Glue code based on ghash-clmulni-intel_glue.c.
*
* This implementation of POLYVAL uses montgomery multiplication accelerated by
* ARMv8 Crypto Extensions instructions to implement the finite field operations.
*/
#include <crypto/algapi.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
#include <crypto/polyval.h>
#include <linux/crypto.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/cpufeature.h>
#include <asm/neon.h>
#include <asm/simd.h>
#define NUM_KEY_POWERS 8
struct polyval_tfm_ctx {
/*
* These powers must be in the order h^8, ..., h^1.
*/
u8 key_powers[NUM_KEY_POWERS][POLYVAL_BLOCK_SIZE];
};
struct polyval_desc_ctx {
u8 buffer[POLYVAL_BLOCK_SIZE];
u32 bytes;
};
asmlinkage void pmull_polyval_update(const struct polyval_tfm_ctx *keys,
const u8 *in, size_t nblocks, u8 *accumulator);
asmlinkage void pmull_polyval_mul(u8 *op1, const u8 *op2);
static void internal_polyval_update(const struct polyval_tfm_ctx *keys,
const u8 *in, size_t nblocks, u8 *accumulator)
{
if (likely(crypto_simd_usable())) {
kernel_neon_begin();
pmull_polyval_update(keys, in, nblocks, accumulator);
kernel_neon_end();
} else {
polyval_update_non4k(keys->key_powers[NUM_KEY_POWERS-1], in,
nblocks, accumulator);
}
}
static void internal_polyval_mul(u8 *op1, const u8 *op2)
{
if (likely(crypto_simd_usable())) {
kernel_neon_begin();
pmull_polyval_mul(op1, op2);
kernel_neon_end();
} else {
polyval_mul_non4k(op1, op2);
}
}
static int polyval_arm64_setkey(struct crypto_shash *tfm,
const u8 *key, unsigned int keylen)
{
struct polyval_tfm_ctx *tctx = crypto_shash_ctx(tfm);
int i;
if (keylen != POLYVAL_BLOCK_SIZE)
return -EINVAL;
memcpy(tctx->key_powers[NUM_KEY_POWERS-1], key, POLYVAL_BLOCK_SIZE);
for (i = NUM_KEY_POWERS-2; i >= 0; i--) {
memcpy(tctx->key_powers[i], key, POLYVAL_BLOCK_SIZE);
internal_polyval_mul(tctx->key_powers[i],
tctx->key_powers[i+1]);
}
return 0;
}
static int polyval_arm64_init(struct shash_desc *desc)
{
struct polyval_desc_ctx *dctx = shash_desc_ctx(desc);
memset(dctx, 0, sizeof(*dctx));
return 0;
}
static int polyval_arm64_update(struct shash_desc *desc,
const u8 *src, unsigned int srclen)
{
struct polyval_desc_ctx *dctx = shash_desc_ctx(desc);
const struct polyval_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
u8 *pos;
unsigned int nblocks;
unsigned int n;
if (dctx->bytes) {
n = min(srclen, dctx->bytes);
pos = dctx->buffer + POLYVAL_BLOCK_SIZE - dctx->bytes;
dctx->bytes -= n;
srclen -= n;
while (n--)
*pos++ ^= *src++;
if (!dctx->bytes)
internal_polyval_mul(dctx->buffer,
tctx->key_powers[NUM_KEY_POWERS-1]);
}
while (srclen >= POLYVAL_BLOCK_SIZE) {
/* allow rescheduling every 4K bytes */
nblocks = min(srclen, 4096U) / POLYVAL_BLOCK_SIZE;
internal_polyval_update(tctx, src, nblocks, dctx->buffer);
srclen -= nblocks * POLYVAL_BLOCK_SIZE;
src += nblocks * POLYVAL_BLOCK_SIZE;
}
if (srclen) {
dctx->bytes = POLYVAL_BLOCK_SIZE - srclen;
pos = dctx->buffer;
while (srclen--)
*pos++ ^= *src++;
}
return 0;
}
static int polyval_arm64_final(struct shash_desc *desc, u8 *dst)
{
struct polyval_desc_ctx *dctx = shash_desc_ctx(desc);
const struct polyval_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
if (dctx->bytes) {
internal_polyval_mul(dctx->buffer,
tctx->key_powers[NUM_KEY_POWERS-1]);
}
memcpy(dst, dctx->buffer, POLYVAL_BLOCK_SIZE);
return 0;
}
static struct shash_alg polyval_alg = {
.digestsize = POLYVAL_DIGEST_SIZE,
.init = polyval_arm64_init,
.update = polyval_arm64_update,
.final = polyval_arm64_final,
.setkey = polyval_arm64_setkey,
.descsize = sizeof(struct polyval_desc_ctx),
.base = {
.cra_name = "polyval",
.cra_driver_name = "polyval-ce",
.cra_priority = 200,
.cra_blocksize = POLYVAL_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct polyval_tfm_ctx),
.cra_module = THIS_MODULE,
},
};
static int __init polyval_ce_mod_init(void)
{
return crypto_register_shash(&polyval_alg);
}
static void __exit polyval_ce_mod_exit(void)
{
crypto_unregister_shash(&polyval_alg);
}
module_cpu_feature_match(PMULL, polyval_ce_mod_init)
module_exit(polyval_ce_mod_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("POLYVAL hash function accelerated by ARMv8 Crypto Extensions");
MODULE_ALIAS_CRYPTO("polyval");
MODULE_ALIAS_CRYPTO("polyval-ce");
| linux-master | arch/arm64/crypto/polyval-ce-glue.c |
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* SM4-GCM AEAD Algorithm using ARMv8 Crypto Extensions
* as specified in rfc8998
* https://datatracker.ietf.org/doc/html/rfc8998
*
* Copyright (C) 2022 Tianjia Zhang <[email protected]>
*/
#include <linux/module.h>
#include <linux/crypto.h>
#include <linux/kernel.h>
#include <linux/cpufeature.h>
#include <asm/neon.h>
#include <crypto/b128ops.h>
#include <crypto/scatterwalk.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/skcipher.h>
#include <crypto/sm4.h>
#include "sm4-ce.h"
asmlinkage void sm4_ce_pmull_ghash_setup(const u32 *rkey_enc, u8 *ghash_table);
asmlinkage void pmull_ghash_update(const u8 *ghash_table, u8 *ghash,
const u8 *src, unsigned int nblocks);
asmlinkage void sm4_ce_pmull_gcm_enc(const u32 *rkey_enc, u8 *dst,
const u8 *src, u8 *iv,
unsigned int nbytes, u8 *ghash,
const u8 *ghash_table, const u8 *lengths);
asmlinkage void sm4_ce_pmull_gcm_dec(const u32 *rkey_enc, u8 *dst,
const u8 *src, u8 *iv,
unsigned int nbytes, u8 *ghash,
const u8 *ghash_table, const u8 *lengths);
#define GHASH_BLOCK_SIZE 16
#define GCM_IV_SIZE 12
struct sm4_gcm_ctx {
struct sm4_ctx key;
u8 ghash_table[16 * 4];
};
static int gcm_setkey(struct crypto_aead *tfm, const u8 *key,
unsigned int key_len)
{
struct sm4_gcm_ctx *ctx = crypto_aead_ctx(tfm);
if (key_len != SM4_KEY_SIZE)
return -EINVAL;
kernel_neon_begin();
sm4_ce_expand_key(key, ctx->key.rkey_enc, ctx->key.rkey_dec,
crypto_sm4_fk, crypto_sm4_ck);
sm4_ce_pmull_ghash_setup(ctx->key.rkey_enc, ctx->ghash_table);
kernel_neon_end();
return 0;
}
static int gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
{
switch (authsize) {
case 4:
case 8:
case 12 ... 16:
return 0;
default:
return -EINVAL;
}
}
static void gcm_calculate_auth_mac(struct aead_request *req, u8 ghash[])
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct sm4_gcm_ctx *ctx = crypto_aead_ctx(aead);
u8 __aligned(8) buffer[GHASH_BLOCK_SIZE];
u32 assoclen = req->assoclen;
struct scatter_walk walk;
unsigned int buflen = 0;
scatterwalk_start(&walk, req->src);
do {
u32 n = scatterwalk_clamp(&walk, assoclen);
u8 *p, *ptr;
if (!n) {
scatterwalk_start(&walk, sg_next(walk.sg));
n = scatterwalk_clamp(&walk, assoclen);
}
p = ptr = scatterwalk_map(&walk);
assoclen -= n;
scatterwalk_advance(&walk, n);
if (n + buflen < GHASH_BLOCK_SIZE) {
memcpy(&buffer[buflen], ptr, n);
buflen += n;
} else {
unsigned int nblocks;
if (buflen) {
unsigned int l = GHASH_BLOCK_SIZE - buflen;
memcpy(&buffer[buflen], ptr, l);
ptr += l;
n -= l;
pmull_ghash_update(ctx->ghash_table, ghash,
buffer, 1);
}
nblocks = n / GHASH_BLOCK_SIZE;
if (nblocks) {
pmull_ghash_update(ctx->ghash_table, ghash,
ptr, nblocks);
ptr += nblocks * GHASH_BLOCK_SIZE;
}
buflen = n % GHASH_BLOCK_SIZE;
if (buflen)
memcpy(&buffer[0], ptr, buflen);
}
scatterwalk_unmap(p);
scatterwalk_done(&walk, 0, assoclen);
} while (assoclen);
/* padding with '0' */
if (buflen) {
memset(&buffer[buflen], 0, GHASH_BLOCK_SIZE - buflen);
pmull_ghash_update(ctx->ghash_table, ghash, buffer, 1);
}
}
static int gcm_crypt(struct aead_request *req, struct skcipher_walk *walk,
u8 ghash[], int err,
void (*sm4_ce_pmull_gcm_crypt)(const u32 *rkey_enc,
u8 *dst, const u8 *src, u8 *iv,
unsigned int nbytes, u8 *ghash,
const u8 *ghash_table, const u8 *lengths))
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct sm4_gcm_ctx *ctx = crypto_aead_ctx(aead);
u8 __aligned(8) iv[SM4_BLOCK_SIZE];
be128 __aligned(8) lengths;
memset(ghash, 0, SM4_BLOCK_SIZE);
lengths.a = cpu_to_be64(req->assoclen * 8);
lengths.b = cpu_to_be64(walk->total * 8);
memcpy(iv, req->iv, GCM_IV_SIZE);
put_unaligned_be32(2, iv + GCM_IV_SIZE);
kernel_neon_begin();
if (req->assoclen)
gcm_calculate_auth_mac(req, ghash);
while (walk->nbytes) {
unsigned int tail = walk->nbytes % SM4_BLOCK_SIZE;
const u8 *src = walk->src.virt.addr;
u8 *dst = walk->dst.virt.addr;
if (walk->nbytes == walk->total) {
sm4_ce_pmull_gcm_crypt(ctx->key.rkey_enc, dst, src, iv,
walk->nbytes, ghash,
ctx->ghash_table,
(const u8 *)&lengths);
kernel_neon_end();
return skcipher_walk_done(walk, 0);
}
sm4_ce_pmull_gcm_crypt(ctx->key.rkey_enc, dst, src, iv,
walk->nbytes - tail, ghash,
ctx->ghash_table, NULL);
kernel_neon_end();
err = skcipher_walk_done(walk, tail);
kernel_neon_begin();
}
sm4_ce_pmull_gcm_crypt(ctx->key.rkey_enc, NULL, NULL, iv,
walk->nbytes, ghash, ctx->ghash_table,
(const u8 *)&lengths);
kernel_neon_end();
return err;
}
static int gcm_encrypt(struct aead_request *req)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
u8 __aligned(8) ghash[SM4_BLOCK_SIZE];
struct skcipher_walk walk;
int err;
err = skcipher_walk_aead_encrypt(&walk, req, false);
err = gcm_crypt(req, &walk, ghash, err, sm4_ce_pmull_gcm_enc);
if (err)
return err;
/* copy authtag to end of dst */
scatterwalk_map_and_copy(ghash, req->dst, req->assoclen + req->cryptlen,
crypto_aead_authsize(aead), 1);
return 0;
}
static int gcm_decrypt(struct aead_request *req)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
unsigned int authsize = crypto_aead_authsize(aead);
u8 __aligned(8) ghash[SM4_BLOCK_SIZE];
u8 authtag[SM4_BLOCK_SIZE];
struct skcipher_walk walk;
int err;
err = skcipher_walk_aead_decrypt(&walk, req, false);
err = gcm_crypt(req, &walk, ghash, err, sm4_ce_pmull_gcm_dec);
if (err)
return err;
/* compare calculated auth tag with the stored one */
scatterwalk_map_and_copy(authtag, req->src,
req->assoclen + req->cryptlen - authsize,
authsize, 0);
if (crypto_memneq(authtag, ghash, authsize))
return -EBADMSG;
return 0;
}
static struct aead_alg sm4_gcm_alg = {
.base = {
.cra_name = "gcm(sm4)",
.cra_driver_name = "gcm-sm4-ce",
.cra_priority = 400,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct sm4_gcm_ctx),
.cra_module = THIS_MODULE,
},
.ivsize = GCM_IV_SIZE,
.chunksize = SM4_BLOCK_SIZE,
.maxauthsize = SM4_BLOCK_SIZE,
.setkey = gcm_setkey,
.setauthsize = gcm_setauthsize,
.encrypt = gcm_encrypt,
.decrypt = gcm_decrypt,
};
static int __init sm4_ce_gcm_init(void)
{
if (!cpu_have_named_feature(PMULL))
return -ENODEV;
return crypto_register_aead(&sm4_gcm_alg);
}
static void __exit sm4_ce_gcm_exit(void)
{
crypto_unregister_aead(&sm4_gcm_alg);
}
static const struct cpu_feature __maybe_unused sm4_ce_gcm_cpu_feature[] = {
{ cpu_feature(PMULL) },
{}
};
MODULE_DEVICE_TABLE(cpu, sm4_ce_gcm_cpu_feature);
module_cpu_feature_match(SM4, sm4_ce_gcm_init);
module_exit(sm4_ce_gcm_exit);
MODULE_DESCRIPTION("Synchronous SM4 in GCM mode using ARMv8 Crypto Extensions");
MODULE_ALIAS_CRYPTO("gcm(sm4)");
MODULE_AUTHOR("Tianjia Zhang <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | arch/arm64/crypto/sm4-ce-gcm-glue.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Accelerated CRC-T10DIF using arm64 NEON and Crypto Extensions instructions
*
* Copyright (C) 2016 - 2017 Linaro Ltd <[email protected]>
*/
#include <linux/cpufeature.h>
#include <linux/crc-t10dif.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/string.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
#include <asm/neon.h>
#include <asm/simd.h>
#define CRC_T10DIF_PMULL_CHUNK_SIZE 16U
asmlinkage u16 crc_t10dif_pmull_p8(u16 init_crc, const u8 *buf, size_t len);
asmlinkage u16 crc_t10dif_pmull_p64(u16 init_crc, const u8 *buf, size_t len);
static int crct10dif_init(struct shash_desc *desc)
{
u16 *crc = shash_desc_ctx(desc);
*crc = 0;
return 0;
}
static int crct10dif_update_pmull_p8(struct shash_desc *desc, const u8 *data,
unsigned int length)
{
u16 *crc = shash_desc_ctx(desc);
if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && crypto_simd_usable()) {
do {
unsigned int chunk = length;
if (chunk > SZ_4K + CRC_T10DIF_PMULL_CHUNK_SIZE)
chunk = SZ_4K;
kernel_neon_begin();
*crc = crc_t10dif_pmull_p8(*crc, data, chunk);
kernel_neon_end();
data += chunk;
length -= chunk;
} while (length);
} else {
*crc = crc_t10dif_generic(*crc, data, length);
}
return 0;
}
static int crct10dif_update_pmull_p64(struct shash_desc *desc, const u8 *data,
unsigned int length)
{
u16 *crc = shash_desc_ctx(desc);
if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && crypto_simd_usable()) {
do {
unsigned int chunk = length;
if (chunk > SZ_4K + CRC_T10DIF_PMULL_CHUNK_SIZE)
chunk = SZ_4K;
kernel_neon_begin();
*crc = crc_t10dif_pmull_p64(*crc, data, chunk);
kernel_neon_end();
data += chunk;
length -= chunk;
} while (length);
} else {
*crc = crc_t10dif_generic(*crc, data, length);
}
return 0;
}
static int crct10dif_final(struct shash_desc *desc, u8 *out)
{
u16 *crc = shash_desc_ctx(desc);
*(u16 *)out = *crc;
return 0;
}
static struct shash_alg crc_t10dif_alg[] = {{
.digestsize = CRC_T10DIF_DIGEST_SIZE,
.init = crct10dif_init,
.update = crct10dif_update_pmull_p8,
.final = crct10dif_final,
.descsize = CRC_T10DIF_DIGEST_SIZE,
.base.cra_name = "crct10dif",
.base.cra_driver_name = "crct10dif-arm64-neon",
.base.cra_priority = 100,
.base.cra_blocksize = CRC_T10DIF_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
}, {
.digestsize = CRC_T10DIF_DIGEST_SIZE,
.init = crct10dif_init,
.update = crct10dif_update_pmull_p64,
.final = crct10dif_final,
.descsize = CRC_T10DIF_DIGEST_SIZE,
.base.cra_name = "crct10dif",
.base.cra_driver_name = "crct10dif-arm64-ce",
.base.cra_priority = 200,
.base.cra_blocksize = CRC_T10DIF_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
}};
static int __init crc_t10dif_mod_init(void)
{
if (cpu_have_named_feature(PMULL))
return crypto_register_shashes(crc_t10dif_alg,
ARRAY_SIZE(crc_t10dif_alg));
else
/* only register the first array element */
return crypto_register_shash(crc_t10dif_alg);
}
static void __exit crc_t10dif_mod_exit(void)
{
if (cpu_have_named_feature(PMULL))
crypto_unregister_shashes(crc_t10dif_alg,
ARRAY_SIZE(crc_t10dif_alg));
else
crypto_unregister_shash(crc_t10dif_alg);
}
module_cpu_feature_match(ASIMD, crc_t10dif_mod_init);
module_exit(crc_t10dif_mod_exit);
MODULE_AUTHOR("Ard Biesheuvel <[email protected]>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS_CRYPTO("crct10dif");
MODULE_ALIAS_CRYPTO("crct10dif-arm64-ce");
| linux-master | arch/arm64/crypto/crct10dif-ce-glue.c |
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* SM4 Cipher Algorithm, using ARMv8 Crypto Extensions
* as specified in
* https://tools.ietf.org/id/draft-ribose-cfrg-sm4-10.html
*
* Copyright (C) 2022, Alibaba Group.
* Copyright (C) 2022 Tianjia Zhang <[email protected]>
*/
#include <linux/module.h>
#include <linux/crypto.h>
#include <linux/kernel.h>
#include <linux/cpufeature.h>
#include <asm/neon.h>
#include <asm/simd.h>
#include <crypto/b128ops.h>
#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>
#include <crypto/internal/hash.h>
#include <crypto/scatterwalk.h>
#include <crypto/xts.h>
#include <crypto/sm4.h>
#define BYTES2BLKS(nbytes) ((nbytes) >> 4)
asmlinkage void sm4_ce_expand_key(const u8 *key, u32 *rkey_enc, u32 *rkey_dec,
const u32 *fk, const u32 *ck);
asmlinkage void sm4_ce_crypt_block(const u32 *rkey, u8 *dst, const u8 *src);
asmlinkage void sm4_ce_crypt(const u32 *rkey, u8 *dst, const u8 *src,
unsigned int nblks);
asmlinkage void sm4_ce_cbc_enc(const u32 *rkey, u8 *dst, const u8 *src,
u8 *iv, unsigned int nblocks);
asmlinkage void sm4_ce_cbc_dec(const u32 *rkey, u8 *dst, const u8 *src,
u8 *iv, unsigned int nblocks);
asmlinkage void sm4_ce_cbc_cts_enc(const u32 *rkey, u8 *dst, const u8 *src,
u8 *iv, unsigned int nbytes);
asmlinkage void sm4_ce_cbc_cts_dec(const u32 *rkey, u8 *dst, const u8 *src,
u8 *iv, unsigned int nbytes);
asmlinkage void sm4_ce_cfb_enc(const u32 *rkey, u8 *dst, const u8 *src,
u8 *iv, unsigned int nblks);
asmlinkage void sm4_ce_cfb_dec(const u32 *rkey, u8 *dst, const u8 *src,
u8 *iv, unsigned int nblks);
asmlinkage void sm4_ce_ctr_enc(const u32 *rkey, u8 *dst, const u8 *src,
u8 *iv, unsigned int nblks);
asmlinkage void sm4_ce_xts_enc(const u32 *rkey1, u8 *dst, const u8 *src,
u8 *tweak, unsigned int nbytes,
const u32 *rkey2_enc);
asmlinkage void sm4_ce_xts_dec(const u32 *rkey1, u8 *dst, const u8 *src,
u8 *tweak, unsigned int nbytes,
const u32 *rkey2_enc);
asmlinkage void sm4_ce_mac_update(const u32 *rkey_enc, u8 *digest,
const u8 *src, unsigned int nblocks,
bool enc_before, bool enc_after);
EXPORT_SYMBOL(sm4_ce_expand_key);
EXPORT_SYMBOL(sm4_ce_crypt_block);
EXPORT_SYMBOL(sm4_ce_cbc_enc);
EXPORT_SYMBOL(sm4_ce_cfb_enc);
struct sm4_xts_ctx {
struct sm4_ctx key1;
struct sm4_ctx key2;
};
struct sm4_mac_tfm_ctx {
struct sm4_ctx key;
u8 __aligned(8) consts[];
};
struct sm4_mac_desc_ctx {
unsigned int len;
u8 digest[SM4_BLOCK_SIZE];
};
static int sm4_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int key_len)
{
struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
if (key_len != SM4_KEY_SIZE)
return -EINVAL;
kernel_neon_begin();
sm4_ce_expand_key(key, ctx->rkey_enc, ctx->rkey_dec,
crypto_sm4_fk, crypto_sm4_ck);
kernel_neon_end();
return 0;
}
static int sm4_xts_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int key_len)
{
struct sm4_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
int ret;
if (key_len != SM4_KEY_SIZE * 2)
return -EINVAL;
ret = xts_verify_key(tfm, key, key_len);
if (ret)
return ret;
kernel_neon_begin();
sm4_ce_expand_key(key, ctx->key1.rkey_enc,
ctx->key1.rkey_dec, crypto_sm4_fk, crypto_sm4_ck);
sm4_ce_expand_key(&key[SM4_KEY_SIZE], ctx->key2.rkey_enc,
ctx->key2.rkey_dec, crypto_sm4_fk, crypto_sm4_ck);
kernel_neon_end();
return 0;
}
static int sm4_ecb_do_crypt(struct skcipher_request *req, const u32 *rkey)
{
struct skcipher_walk walk;
unsigned int nbytes;
int err;
err = skcipher_walk_virt(&walk, req, false);
while ((nbytes = walk.nbytes) > 0) {
const u8 *src = walk.src.virt.addr;
u8 *dst = walk.dst.virt.addr;
unsigned int nblks;
kernel_neon_begin();
nblks = BYTES2BLKS(nbytes);
if (nblks) {
sm4_ce_crypt(rkey, dst, src, nblks);
nbytes -= nblks * SM4_BLOCK_SIZE;
}
kernel_neon_end();
err = skcipher_walk_done(&walk, nbytes);
}
return err;
}
static int sm4_ecb_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
return sm4_ecb_do_crypt(req, ctx->rkey_enc);
}
static int sm4_ecb_decrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
return sm4_ecb_do_crypt(req, ctx->rkey_dec);
}
static int sm4_cbc_crypt(struct skcipher_request *req,
struct sm4_ctx *ctx, bool encrypt)
{
struct skcipher_walk walk;
unsigned int nbytes;
int err;
err = skcipher_walk_virt(&walk, req, false);
if (err)
return err;
while ((nbytes = walk.nbytes) > 0) {
const u8 *src = walk.src.virt.addr;
u8 *dst = walk.dst.virt.addr;
unsigned int nblocks;
nblocks = nbytes / SM4_BLOCK_SIZE;
if (nblocks) {
kernel_neon_begin();
if (encrypt)
sm4_ce_cbc_enc(ctx->rkey_enc, dst, src,
walk.iv, nblocks);
else
sm4_ce_cbc_dec(ctx->rkey_dec, dst, src,
walk.iv, nblocks);
kernel_neon_end();
}
err = skcipher_walk_done(&walk, nbytes % SM4_BLOCK_SIZE);
}
return err;
}
static int sm4_cbc_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
return sm4_cbc_crypt(req, ctx, true);
}
static int sm4_cbc_decrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
return sm4_cbc_crypt(req, ctx, false);
}
static int sm4_cbc_cts_crypt(struct skcipher_request *req, bool encrypt)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
struct scatterlist *src = req->src;
struct scatterlist *dst = req->dst;
struct scatterlist sg_src[2], sg_dst[2];
struct skcipher_request subreq;
struct skcipher_walk walk;
int cbc_blocks;
int err;
if (req->cryptlen < SM4_BLOCK_SIZE)
return -EINVAL;
if (req->cryptlen == SM4_BLOCK_SIZE)
return sm4_cbc_crypt(req, ctx, encrypt);
skcipher_request_set_tfm(&subreq, tfm);
skcipher_request_set_callback(&subreq, skcipher_request_flags(req),
NULL, NULL);
/* handle the CBC cryption part */
cbc_blocks = DIV_ROUND_UP(req->cryptlen, SM4_BLOCK_SIZE) - 2;
if (cbc_blocks) {
skcipher_request_set_crypt(&subreq, src, dst,
cbc_blocks * SM4_BLOCK_SIZE,
req->iv);
err = sm4_cbc_crypt(&subreq, ctx, encrypt);
if (err)
return err;
dst = src = scatterwalk_ffwd(sg_src, src, subreq.cryptlen);
if (req->dst != req->src)
dst = scatterwalk_ffwd(sg_dst, req->dst,
subreq.cryptlen);
}
/* handle ciphertext stealing */
skcipher_request_set_crypt(&subreq, src, dst,
req->cryptlen - cbc_blocks * SM4_BLOCK_SIZE,
req->iv);
err = skcipher_walk_virt(&walk, &subreq, false);
if (err)
return err;
kernel_neon_begin();
if (encrypt)
sm4_ce_cbc_cts_enc(ctx->rkey_enc, walk.dst.virt.addr,
walk.src.virt.addr, walk.iv, walk.nbytes);
else
sm4_ce_cbc_cts_dec(ctx->rkey_dec, walk.dst.virt.addr,
walk.src.virt.addr, walk.iv, walk.nbytes);
kernel_neon_end();
return skcipher_walk_done(&walk, 0);
}
static int sm4_cbc_cts_encrypt(struct skcipher_request *req)
{
return sm4_cbc_cts_crypt(req, true);
}
static int sm4_cbc_cts_decrypt(struct skcipher_request *req)
{
return sm4_cbc_cts_crypt(req, false);
}
static int sm4_cfb_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
struct skcipher_walk walk;
unsigned int nbytes;
int err;
err = skcipher_walk_virt(&walk, req, false);
while ((nbytes = walk.nbytes) > 0) {
const u8 *src = walk.src.virt.addr;
u8 *dst = walk.dst.virt.addr;
unsigned int nblks;
kernel_neon_begin();
nblks = BYTES2BLKS(nbytes);
if (nblks) {
sm4_ce_cfb_enc(ctx->rkey_enc, dst, src, walk.iv, nblks);
dst += nblks * SM4_BLOCK_SIZE;
src += nblks * SM4_BLOCK_SIZE;
nbytes -= nblks * SM4_BLOCK_SIZE;
}
/* tail */
if (walk.nbytes == walk.total && nbytes > 0) {
u8 keystream[SM4_BLOCK_SIZE];
sm4_ce_crypt_block(ctx->rkey_enc, keystream, walk.iv);
crypto_xor_cpy(dst, src, keystream, nbytes);
nbytes = 0;
}
kernel_neon_end();
err = skcipher_walk_done(&walk, nbytes);
}
return err;
}
static int sm4_cfb_decrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
struct skcipher_walk walk;
unsigned int nbytes;
int err;
err = skcipher_walk_virt(&walk, req, false);
while ((nbytes = walk.nbytes) > 0) {
const u8 *src = walk.src.virt.addr;
u8 *dst = walk.dst.virt.addr;
unsigned int nblks;
kernel_neon_begin();
nblks = BYTES2BLKS(nbytes);
if (nblks) {
sm4_ce_cfb_dec(ctx->rkey_enc, dst, src, walk.iv, nblks);
dst += nblks * SM4_BLOCK_SIZE;
src += nblks * SM4_BLOCK_SIZE;
nbytes -= nblks * SM4_BLOCK_SIZE;
}
/* tail */
if (walk.nbytes == walk.total && nbytes > 0) {
u8 keystream[SM4_BLOCK_SIZE];
sm4_ce_crypt_block(ctx->rkey_enc, keystream, walk.iv);
crypto_xor_cpy(dst, src, keystream, nbytes);
nbytes = 0;
}
kernel_neon_end();
err = skcipher_walk_done(&walk, nbytes);
}
return err;
}
static int sm4_ctr_crypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
struct skcipher_walk walk;
unsigned int nbytes;
int err;
err = skcipher_walk_virt(&walk, req, false);
while ((nbytes = walk.nbytes) > 0) {
const u8 *src = walk.src.virt.addr;
u8 *dst = walk.dst.virt.addr;
unsigned int nblks;
kernel_neon_begin();
nblks = BYTES2BLKS(nbytes);
if (nblks) {
sm4_ce_ctr_enc(ctx->rkey_enc, dst, src, walk.iv, nblks);
dst += nblks * SM4_BLOCK_SIZE;
src += nblks * SM4_BLOCK_SIZE;
nbytes -= nblks * SM4_BLOCK_SIZE;
}
/* tail */
if (walk.nbytes == walk.total && nbytes > 0) {
u8 keystream[SM4_BLOCK_SIZE];
sm4_ce_crypt_block(ctx->rkey_enc, keystream, walk.iv);
crypto_inc(walk.iv, SM4_BLOCK_SIZE);
crypto_xor_cpy(dst, src, keystream, nbytes);
nbytes = 0;
}
kernel_neon_end();
err = skcipher_walk_done(&walk, nbytes);
}
return err;
}
static int sm4_xts_crypt(struct skcipher_request *req, bool encrypt)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct sm4_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
int tail = req->cryptlen % SM4_BLOCK_SIZE;
const u32 *rkey2_enc = ctx->key2.rkey_enc;
struct scatterlist sg_src[2], sg_dst[2];
struct skcipher_request subreq;
struct scatterlist *src, *dst;
struct skcipher_walk walk;
unsigned int nbytes;
int err;
if (req->cryptlen < SM4_BLOCK_SIZE)
return -EINVAL;
err = skcipher_walk_virt(&walk, req, false);
if (err)
return err;
if (unlikely(tail > 0 && walk.nbytes < walk.total)) {
int nblocks = DIV_ROUND_UP(req->cryptlen, SM4_BLOCK_SIZE) - 2;
skcipher_walk_abort(&walk);
skcipher_request_set_tfm(&subreq, tfm);
skcipher_request_set_callback(&subreq,
skcipher_request_flags(req),
NULL, NULL);
skcipher_request_set_crypt(&subreq, req->src, req->dst,
nblocks * SM4_BLOCK_SIZE, req->iv);
err = skcipher_walk_virt(&walk, &subreq, false);
if (err)
return err;
} else {
tail = 0;
}
while ((nbytes = walk.nbytes) >= SM4_BLOCK_SIZE) {
if (nbytes < walk.total)
nbytes &= ~(SM4_BLOCK_SIZE - 1);
kernel_neon_begin();
if (encrypt)
sm4_ce_xts_enc(ctx->key1.rkey_enc, walk.dst.virt.addr,
walk.src.virt.addr, walk.iv, nbytes,
rkey2_enc);
else
sm4_ce_xts_dec(ctx->key1.rkey_dec, walk.dst.virt.addr,
walk.src.virt.addr, walk.iv, nbytes,
rkey2_enc);
kernel_neon_end();
rkey2_enc = NULL;
err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
if (err)
return err;
}
if (likely(tail == 0))
return 0;
/* handle ciphertext stealing */
dst = src = scatterwalk_ffwd(sg_src, req->src, subreq.cryptlen);
if (req->dst != req->src)
dst = scatterwalk_ffwd(sg_dst, req->dst, subreq.cryptlen);
skcipher_request_set_crypt(&subreq, src, dst, SM4_BLOCK_SIZE + tail,
req->iv);
err = skcipher_walk_virt(&walk, &subreq, false);
if (err)
return err;
kernel_neon_begin();
if (encrypt)
sm4_ce_xts_enc(ctx->key1.rkey_enc, walk.dst.virt.addr,
walk.src.virt.addr, walk.iv, walk.nbytes,
rkey2_enc);
else
sm4_ce_xts_dec(ctx->key1.rkey_dec, walk.dst.virt.addr,
walk.src.virt.addr, walk.iv, walk.nbytes,
rkey2_enc);
kernel_neon_end();
return skcipher_walk_done(&walk, 0);
}
static int sm4_xts_encrypt(struct skcipher_request *req)
{
return sm4_xts_crypt(req, true);
}
static int sm4_xts_decrypt(struct skcipher_request *req)
{
return sm4_xts_crypt(req, false);
}
static struct skcipher_alg sm4_algs[] = {
{
.base = {
.cra_name = "ecb(sm4)",
.cra_driver_name = "ecb-sm4-ce",
.cra_priority = 400,
.cra_blocksize = SM4_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sm4_ctx),
.cra_module = THIS_MODULE,
},
.min_keysize = SM4_KEY_SIZE,
.max_keysize = SM4_KEY_SIZE,
.setkey = sm4_setkey,
.encrypt = sm4_ecb_encrypt,
.decrypt = sm4_ecb_decrypt,
}, {
.base = {
.cra_name = "cbc(sm4)",
.cra_driver_name = "cbc-sm4-ce",
.cra_priority = 400,
.cra_blocksize = SM4_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sm4_ctx),
.cra_module = THIS_MODULE,
},
.min_keysize = SM4_KEY_SIZE,
.max_keysize = SM4_KEY_SIZE,
.ivsize = SM4_BLOCK_SIZE,
.setkey = sm4_setkey,
.encrypt = sm4_cbc_encrypt,
.decrypt = sm4_cbc_decrypt,
}, {
.base = {
.cra_name = "cfb(sm4)",
.cra_driver_name = "cfb-sm4-ce",
.cra_priority = 400,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct sm4_ctx),
.cra_module = THIS_MODULE,
},
.min_keysize = SM4_KEY_SIZE,
.max_keysize = SM4_KEY_SIZE,
.ivsize = SM4_BLOCK_SIZE,
.chunksize = SM4_BLOCK_SIZE,
.setkey = sm4_setkey,
.encrypt = sm4_cfb_encrypt,
.decrypt = sm4_cfb_decrypt,
}, {
.base = {
.cra_name = "ctr(sm4)",
.cra_driver_name = "ctr-sm4-ce",
.cra_priority = 400,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct sm4_ctx),
.cra_module = THIS_MODULE,
},
.min_keysize = SM4_KEY_SIZE,
.max_keysize = SM4_KEY_SIZE,
.ivsize = SM4_BLOCK_SIZE,
.chunksize = SM4_BLOCK_SIZE,
.setkey = sm4_setkey,
.encrypt = sm4_ctr_crypt,
.decrypt = sm4_ctr_crypt,
}, {
.base = {
.cra_name = "cts(cbc(sm4))",
.cra_driver_name = "cts-cbc-sm4-ce",
.cra_priority = 400,
.cra_blocksize = SM4_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sm4_ctx),
.cra_module = THIS_MODULE,
},
.min_keysize = SM4_KEY_SIZE,
.max_keysize = SM4_KEY_SIZE,
.ivsize = SM4_BLOCK_SIZE,
.walksize = SM4_BLOCK_SIZE * 2,
.setkey = sm4_setkey,
.encrypt = sm4_cbc_cts_encrypt,
.decrypt = sm4_cbc_cts_decrypt,
}, {
.base = {
.cra_name = "xts(sm4)",
.cra_driver_name = "xts-sm4-ce",
.cra_priority = 400,
.cra_blocksize = SM4_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sm4_xts_ctx),
.cra_module = THIS_MODULE,
},
.min_keysize = SM4_KEY_SIZE * 2,
.max_keysize = SM4_KEY_SIZE * 2,
.ivsize = SM4_BLOCK_SIZE,
.walksize = SM4_BLOCK_SIZE * 2,
.setkey = sm4_xts_setkey,
.encrypt = sm4_xts_encrypt,
.decrypt = sm4_xts_decrypt,
}
};
static int sm4_cbcmac_setkey(struct crypto_shash *tfm, const u8 *key,
unsigned int key_len)
{
struct sm4_mac_tfm_ctx *ctx = crypto_shash_ctx(tfm);
if (key_len != SM4_KEY_SIZE)
return -EINVAL;
kernel_neon_begin();
sm4_ce_expand_key(key, ctx->key.rkey_enc, ctx->key.rkey_dec,
crypto_sm4_fk, crypto_sm4_ck);
kernel_neon_end();
return 0;
}
static int sm4_cmac_setkey(struct crypto_shash *tfm, const u8 *key,
unsigned int key_len)
{
struct sm4_mac_tfm_ctx *ctx = crypto_shash_ctx(tfm);
be128 *consts = (be128 *)ctx->consts;
u64 a, b;
if (key_len != SM4_KEY_SIZE)
return -EINVAL;
memset(consts, 0, SM4_BLOCK_SIZE);
kernel_neon_begin();
sm4_ce_expand_key(key, ctx->key.rkey_enc, ctx->key.rkey_dec,
crypto_sm4_fk, crypto_sm4_ck);
/* encrypt the zero block */
sm4_ce_crypt_block(ctx->key.rkey_enc, (u8 *)consts, (const u8 *)consts);
kernel_neon_end();
/* gf(2^128) multiply zero-ciphertext with u and u^2 */
a = be64_to_cpu(consts[0].a);
b = be64_to_cpu(consts[0].b);
consts[0].a = cpu_to_be64((a << 1) | (b >> 63));
consts[0].b = cpu_to_be64((b << 1) ^ ((a >> 63) ? 0x87 : 0));
a = be64_to_cpu(consts[0].a);
b = be64_to_cpu(consts[0].b);
consts[1].a = cpu_to_be64((a << 1) | (b >> 63));
consts[1].b = cpu_to_be64((b << 1) ^ ((a >> 63) ? 0x87 : 0));
return 0;
}
static int sm4_xcbc_setkey(struct crypto_shash *tfm, const u8 *key,
unsigned int key_len)
{
struct sm4_mac_tfm_ctx *ctx = crypto_shash_ctx(tfm);
u8 __aligned(8) key2[SM4_BLOCK_SIZE];
static u8 const ks[3][SM4_BLOCK_SIZE] = {
{ [0 ... SM4_BLOCK_SIZE - 1] = 0x1},
{ [0 ... SM4_BLOCK_SIZE - 1] = 0x2},
{ [0 ... SM4_BLOCK_SIZE - 1] = 0x3},
};
if (key_len != SM4_KEY_SIZE)
return -EINVAL;
kernel_neon_begin();
sm4_ce_expand_key(key, ctx->key.rkey_enc, ctx->key.rkey_dec,
crypto_sm4_fk, crypto_sm4_ck);
sm4_ce_crypt_block(ctx->key.rkey_enc, key2, ks[0]);
sm4_ce_crypt(ctx->key.rkey_enc, ctx->consts, ks[1], 2);
sm4_ce_expand_key(key2, ctx->key.rkey_enc, ctx->key.rkey_dec,
crypto_sm4_fk, crypto_sm4_ck);
kernel_neon_end();
return 0;
}
static int sm4_mac_init(struct shash_desc *desc)
{
struct sm4_mac_desc_ctx *ctx = shash_desc_ctx(desc);
memset(ctx->digest, 0, SM4_BLOCK_SIZE);
ctx->len = 0;
return 0;
}
static int sm4_mac_update(struct shash_desc *desc, const u8 *p,
unsigned int len)
{
struct sm4_mac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
struct sm4_mac_desc_ctx *ctx = shash_desc_ctx(desc);
unsigned int l, nblocks;
if (len == 0)
return 0;
if (ctx->len || ctx->len + len < SM4_BLOCK_SIZE) {
l = min(len, SM4_BLOCK_SIZE - ctx->len);
crypto_xor(ctx->digest + ctx->len, p, l);
ctx->len += l;
len -= l;
p += l;
}
if (len && (ctx->len % SM4_BLOCK_SIZE) == 0) {
kernel_neon_begin();
if (len < SM4_BLOCK_SIZE && ctx->len == SM4_BLOCK_SIZE) {
sm4_ce_crypt_block(tctx->key.rkey_enc,
ctx->digest, ctx->digest);
ctx->len = 0;
} else {
nblocks = len / SM4_BLOCK_SIZE;
len %= SM4_BLOCK_SIZE;
sm4_ce_mac_update(tctx->key.rkey_enc, ctx->digest, p,
nblocks, (ctx->len == SM4_BLOCK_SIZE),
(len != 0));
p += nblocks * SM4_BLOCK_SIZE;
if (len == 0)
ctx->len = SM4_BLOCK_SIZE;
}
kernel_neon_end();
if (len) {
crypto_xor(ctx->digest, p, len);
ctx->len = len;
}
}
return 0;
}
static int sm4_cmac_final(struct shash_desc *desc, u8 *out)
{
struct sm4_mac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
struct sm4_mac_desc_ctx *ctx = shash_desc_ctx(desc);
const u8 *consts = tctx->consts;
if (ctx->len != SM4_BLOCK_SIZE) {
ctx->digest[ctx->len] ^= 0x80;
consts += SM4_BLOCK_SIZE;
}
kernel_neon_begin();
sm4_ce_mac_update(tctx->key.rkey_enc, ctx->digest, consts, 1,
false, true);
kernel_neon_end();
memcpy(out, ctx->digest, SM4_BLOCK_SIZE);
return 0;
}
static int sm4_cbcmac_final(struct shash_desc *desc, u8 *out)
{
struct sm4_mac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
struct sm4_mac_desc_ctx *ctx = shash_desc_ctx(desc);
if (ctx->len) {
kernel_neon_begin();
sm4_ce_crypt_block(tctx->key.rkey_enc, ctx->digest,
ctx->digest);
kernel_neon_end();
}
memcpy(out, ctx->digest, SM4_BLOCK_SIZE);
return 0;
}
static struct shash_alg sm4_mac_algs[] = {
{
.base = {
.cra_name = "cmac(sm4)",
.cra_driver_name = "cmac-sm4-ce",
.cra_priority = 400,
.cra_blocksize = SM4_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sm4_mac_tfm_ctx)
+ SM4_BLOCK_SIZE * 2,
.cra_module = THIS_MODULE,
},
.digestsize = SM4_BLOCK_SIZE,
.init = sm4_mac_init,
.update = sm4_mac_update,
.final = sm4_cmac_final,
.setkey = sm4_cmac_setkey,
.descsize = sizeof(struct sm4_mac_desc_ctx),
}, {
.base = {
.cra_name = "xcbc(sm4)",
.cra_driver_name = "xcbc-sm4-ce",
.cra_priority = 400,
.cra_blocksize = SM4_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sm4_mac_tfm_ctx)
+ SM4_BLOCK_SIZE * 2,
.cra_module = THIS_MODULE,
},
.digestsize = SM4_BLOCK_SIZE,
.init = sm4_mac_init,
.update = sm4_mac_update,
.final = sm4_cmac_final,
.setkey = sm4_xcbc_setkey,
.descsize = sizeof(struct sm4_mac_desc_ctx),
}, {
.base = {
.cra_name = "cbcmac(sm4)",
.cra_driver_name = "cbcmac-sm4-ce",
.cra_priority = 400,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct sm4_mac_tfm_ctx),
.cra_module = THIS_MODULE,
},
.digestsize = SM4_BLOCK_SIZE,
.init = sm4_mac_init,
.update = sm4_mac_update,
.final = sm4_cbcmac_final,
.setkey = sm4_cbcmac_setkey,
.descsize = sizeof(struct sm4_mac_desc_ctx),
}
};
static int __init sm4_init(void)
{
int err;
err = crypto_register_skciphers(sm4_algs, ARRAY_SIZE(sm4_algs));
if (err)
return err;
err = crypto_register_shashes(sm4_mac_algs, ARRAY_SIZE(sm4_mac_algs));
if (err)
goto out_err;
return 0;
out_err:
crypto_unregister_skciphers(sm4_algs, ARRAY_SIZE(sm4_algs));
return err;
}
static void __exit sm4_exit(void)
{
crypto_unregister_shashes(sm4_mac_algs, ARRAY_SIZE(sm4_mac_algs));
crypto_unregister_skciphers(sm4_algs, ARRAY_SIZE(sm4_algs));
}
module_cpu_feature_match(SM4, sm4_init);
module_exit(sm4_exit);
MODULE_DESCRIPTION("SM4 ECB/CBC/CFB/CTR/XTS using ARMv8 Crypto Extensions");
MODULE_ALIAS_CRYPTO("sm4-ce");
MODULE_ALIAS_CRYPTO("sm4");
MODULE_ALIAS_CRYPTO("ecb(sm4)");
MODULE_ALIAS_CRYPTO("cbc(sm4)");
MODULE_ALIAS_CRYPTO("cfb(sm4)");
MODULE_ALIAS_CRYPTO("ctr(sm4)");
MODULE_ALIAS_CRYPTO("cts(cbc(sm4))");
MODULE_ALIAS_CRYPTO("xts(sm4)");
MODULE_ALIAS_CRYPTO("cmac(sm4)");
MODULE_ALIAS_CRYPTO("xcbc(sm4)");
MODULE_ALIAS_CRYPTO("cbcmac(sm4)");
MODULE_AUTHOR("Tianjia Zhang <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | arch/arm64/crypto/sm4-ce-glue.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Scalar AES core transform
*
* Copyright (C) 2017 Linaro Ltd <[email protected]>
*/
#include <crypto/aes.h>
#include <crypto/algapi.h>
#include <linux/module.h>
asmlinkage void __aes_arm64_encrypt(u32 *rk, u8 *out, const u8 *in, int rounds);
asmlinkage void __aes_arm64_decrypt(u32 *rk, u8 *out, const u8 *in, int rounds);
static void aes_arm64_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
int rounds = 6 + ctx->key_length / 4;
__aes_arm64_encrypt(ctx->key_enc, out, in, rounds);
}
static void aes_arm64_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
int rounds = 6 + ctx->key_length / 4;
__aes_arm64_decrypt(ctx->key_dec, out, in, rounds);
}
static struct crypto_alg aes_alg = {
.cra_name = "aes",
.cra_driver_name = "aes-arm64",
.cra_priority = 200,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto_aes_ctx),
.cra_module = THIS_MODULE,
.cra_cipher.cia_min_keysize = AES_MIN_KEY_SIZE,
.cra_cipher.cia_max_keysize = AES_MAX_KEY_SIZE,
.cra_cipher.cia_setkey = crypto_aes_set_key,
.cra_cipher.cia_encrypt = aes_arm64_encrypt,
.cra_cipher.cia_decrypt = aes_arm64_decrypt
};
static int __init aes_init(void)
{
return crypto_register_alg(&aes_alg);
}
static void __exit aes_fini(void)
{
crypto_unregister_alg(&aes_alg);
}
module_init(aes_init);
module_exit(aes_fini);
MODULE_DESCRIPTION("Scalar AES cipher for arm64");
MODULE_AUTHOR("Ard Biesheuvel <[email protected]>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS_CRYPTO("aes");
| linux-master | arch/arm64/crypto/aes-cipher-glue.c |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.