python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2006, 07 MIPS Technologies, Inc.
* written by Ralf Baechle ([email protected])
* written by Ralf Baechle <[email protected]>
*
* Copyright (C) 2008 Wind River Systems, Inc.
* updated by Tiejun Chen <[email protected]>
*
* 1. Probe driver for the Malta's UART ports:
*
* o 2 ports in the SMC SuperIO
* o 1 port in the CBUS UART, a discrete 16550 which normally is only used
* for bringups.
*
* We don't use 8250_platform.c on Malta as it would result in the CBUS
* UART becoming ttyS0.
*
* 2. Register RTC-CMOS platform device on Malta.
*/
#include <linux/init.h>
#include <linux/serial_8250.h>
#include <linux/irq.h>
#include <linux/platform_device.h>
#include <asm/mips-boards/maltaint.h>
#define SMC_PORT(base, int) \
{ \
.iobase = base, \
.irq = int, \
.uartclk = 1843200, \
.iotype = UPIO_PORT, \
.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | \
UPF_MAGIC_MULTIPLIER, \
.regshift = 0, \
}
#define CBUS_UART_FLAGS (UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_IOREMAP)
static struct plat_serial8250_port uart8250_data[] = {
SMC_PORT(0x3F8, 4),
SMC_PORT(0x2F8, 3),
{
.mapbase = 0x1f000900, /* The CBUS UART */
.irq = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_MB2,
.uartclk = 3686400, /* Twice the usual clk! */
.iotype = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) ?
UPIO_MEM32BE : UPIO_MEM32,
.flags = CBUS_UART_FLAGS,
.regshift = 3,
},
{ },
};
static struct platform_device malta_uart8250_device = {
.name = "serial8250",
.id = PLAT8250_DEV_PLATFORM,
.dev = {
.platform_data = uart8250_data,
},
};
static struct platform_device *malta_devices[] __initdata = {
&malta_uart8250_device,
};
static int __init malta_add_devices(void)
{
return platform_add_devices(malta_devices, ARRAY_SIZE(malta_devices));
}
device_initcall(malta_add_devices);
| linux-master | arch/mips/mti-malta/malta-platform.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) 2011-2016 Zhang, Keguang <[email protected]>
*/
#include <linux/leds.h>
#include <linux/mtd/partitions.h>
#include <linux/sizes.h>
#include <loongson1.h>
#include <dma.h>
#include <nand.h>
#include <platform.h>
static const struct gpio_led ls1x_gpio_leds[] __initconst = {
{
.name = "LED9",
.default_trigger = "heartbeat",
.gpio = 38,
.active_low = 1,
.default_state = LEDS_GPIO_DEFSTATE_OFF,
}, {
.name = "LED6",
.default_trigger = "nand-disk",
.gpio = 39,
.active_low = 1,
.default_state = LEDS_GPIO_DEFSTATE_OFF,
},
};
static const struct gpio_led_platform_data ls1x_led_pdata __initconst = {
.num_leds = ARRAY_SIZE(ls1x_gpio_leds),
.leds = ls1x_gpio_leds,
};
static struct platform_device *ls1b_platform_devices[] __initdata = {
&ls1x_uart_pdev,
&ls1x_eth0_pdev,
&ls1x_eth1_pdev,
&ls1x_ehci_pdev,
&ls1x_gpio0_pdev,
&ls1x_gpio1_pdev,
&ls1x_rtc_pdev,
&ls1x_wdt_pdev,
};
static int __init ls1b_platform_init(void)
{
ls1x_serial_set_uartclk(&ls1x_uart_pdev);
gpio_led_register_device(-1, &ls1x_led_pdata);
return platform_add_devices(ls1b_platform_devices,
ARRAY_SIZE(ls1b_platform_devices));
}
arch_initcall(ls1b_platform_init);
| linux-master | arch/mips/loongson32/ls1b/board.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) 2016 Yang Ling <[email protected]>
*/
#include <platform.h>
static struct platform_device *ls1c_platform_devices[] __initdata = {
&ls1x_uart_pdev,
&ls1x_eth0_pdev,
&ls1x_rtc_pdev,
&ls1x_wdt_pdev,
};
static int __init ls1c_platform_init(void)
{
ls1x_serial_set_uartclk(&ls1x_uart_pdev);
return platform_add_devices(ls1c_platform_devices,
ARRAY_SIZE(ls1c_platform_devices));
}
arch_initcall(ls1c_platform_init);
| linux-master | arch/mips/loongson32/ls1c/board.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) 2011 Zhang, Keguang <[email protected]>
*/
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <asm/irq_cpu.h>
#include <loongson1.h>
#include <irq.h>
#define LS1X_INTC_REG(n, x) \
((void __iomem *)KSEG1ADDR(LS1X_INTC_BASE + (n * 0x18) + (x)))
#define LS1X_INTC_INTISR(n) LS1X_INTC_REG(n, 0x0)
#define LS1X_INTC_INTIEN(n) LS1X_INTC_REG(n, 0x4)
#define LS1X_INTC_INTSET(n) LS1X_INTC_REG(n, 0x8)
#define LS1X_INTC_INTCLR(n) LS1X_INTC_REG(n, 0xc)
#define LS1X_INTC_INTPOL(n) LS1X_INTC_REG(n, 0x10)
#define LS1X_INTC_INTEDGE(n) LS1X_INTC_REG(n, 0x14)
static void ls1x_irq_ack(struct irq_data *d)
{
unsigned int bit = (d->irq - LS1X_IRQ_BASE) & 0x1f;
unsigned int n = (d->irq - LS1X_IRQ_BASE) >> 5;
__raw_writel(__raw_readl(LS1X_INTC_INTCLR(n))
| (1 << bit), LS1X_INTC_INTCLR(n));
}
static void ls1x_irq_mask(struct irq_data *d)
{
unsigned int bit = (d->irq - LS1X_IRQ_BASE) & 0x1f;
unsigned int n = (d->irq - LS1X_IRQ_BASE) >> 5;
__raw_writel(__raw_readl(LS1X_INTC_INTIEN(n))
& ~(1 << bit), LS1X_INTC_INTIEN(n));
}
static void ls1x_irq_mask_ack(struct irq_data *d)
{
unsigned int bit = (d->irq - LS1X_IRQ_BASE) & 0x1f;
unsigned int n = (d->irq - LS1X_IRQ_BASE) >> 5;
__raw_writel(__raw_readl(LS1X_INTC_INTIEN(n))
& ~(1 << bit), LS1X_INTC_INTIEN(n));
__raw_writel(__raw_readl(LS1X_INTC_INTCLR(n))
| (1 << bit), LS1X_INTC_INTCLR(n));
}
static void ls1x_irq_unmask(struct irq_data *d)
{
unsigned int bit = (d->irq - LS1X_IRQ_BASE) & 0x1f;
unsigned int n = (d->irq - LS1X_IRQ_BASE) >> 5;
__raw_writel(__raw_readl(LS1X_INTC_INTIEN(n))
| (1 << bit), LS1X_INTC_INTIEN(n));
}
static int ls1x_irq_settype(struct irq_data *d, unsigned int type)
{
unsigned int bit = (d->irq - LS1X_IRQ_BASE) & 0x1f;
unsigned int n = (d->irq - LS1X_IRQ_BASE) >> 5;
switch (type) {
case IRQ_TYPE_LEVEL_HIGH:
__raw_writel(__raw_readl(LS1X_INTC_INTPOL(n))
| (1 << bit), LS1X_INTC_INTPOL(n));
__raw_writel(__raw_readl(LS1X_INTC_INTEDGE(n))
& ~(1 << bit), LS1X_INTC_INTEDGE(n));
break;
case IRQ_TYPE_LEVEL_LOW:
__raw_writel(__raw_readl(LS1X_INTC_INTPOL(n))
& ~(1 << bit), LS1X_INTC_INTPOL(n));
__raw_writel(__raw_readl(LS1X_INTC_INTEDGE(n))
& ~(1 << bit), LS1X_INTC_INTEDGE(n));
break;
case IRQ_TYPE_EDGE_RISING:
__raw_writel(__raw_readl(LS1X_INTC_INTPOL(n))
| (1 << bit), LS1X_INTC_INTPOL(n));
__raw_writel(__raw_readl(LS1X_INTC_INTEDGE(n))
| (1 << bit), LS1X_INTC_INTEDGE(n));
break;
case IRQ_TYPE_EDGE_FALLING:
__raw_writel(__raw_readl(LS1X_INTC_INTPOL(n))
& ~(1 << bit), LS1X_INTC_INTPOL(n));
__raw_writel(__raw_readl(LS1X_INTC_INTEDGE(n))
| (1 << bit), LS1X_INTC_INTEDGE(n));
break;
case IRQ_TYPE_EDGE_BOTH:
__raw_writel(__raw_readl(LS1X_INTC_INTPOL(n))
& ~(1 << bit), LS1X_INTC_INTPOL(n));
__raw_writel(__raw_readl(LS1X_INTC_INTEDGE(n))
| (1 << bit), LS1X_INTC_INTEDGE(n));
break;
case IRQ_TYPE_NONE:
break;
default:
return -EINVAL;
}
return 0;
}
static struct irq_chip ls1x_irq_chip = {
.name = "LS1X-INTC",
.irq_ack = ls1x_irq_ack,
.irq_mask = ls1x_irq_mask,
.irq_mask_ack = ls1x_irq_mask_ack,
.irq_unmask = ls1x_irq_unmask,
.irq_set_type = ls1x_irq_settype,
};
static void ls1x_irq_dispatch(int n)
{
u32 int_status, irq;
/* Get pending sources, masked by current enables */
int_status = __raw_readl(LS1X_INTC_INTISR(n)) &
__raw_readl(LS1X_INTC_INTIEN(n));
if (int_status) {
irq = LS1X_IRQ(n, __ffs(int_status));
do_IRQ(irq);
}
}
asmlinkage void plat_irq_dispatch(void)
{
unsigned int pending;
pending = read_c0_cause() & read_c0_status() & ST0_IM;
if (pending & CAUSEF_IP7)
do_IRQ(TIMER_IRQ);
else if (pending & CAUSEF_IP2)
ls1x_irq_dispatch(0); /* INT0 */
else if (pending & CAUSEF_IP3)
ls1x_irq_dispatch(1); /* INT1 */
else if (pending & CAUSEF_IP4)
ls1x_irq_dispatch(2); /* INT2 */
else if (pending & CAUSEF_IP5)
ls1x_irq_dispatch(3); /* INT3 */
else if (pending & CAUSEF_IP6)
ls1x_irq_dispatch(4); /* INT4 */
else
spurious_interrupt();
}
static void __init ls1x_irq_init(int base)
{
int n;
/* Disable interrupts and clear pending,
* setup all IRQs as high level triggered
*/
for (n = 0; n < INTN; n++) {
__raw_writel(0x0, LS1X_INTC_INTIEN(n));
__raw_writel(0xffffffff, LS1X_INTC_INTCLR(n));
__raw_writel(0xffffffff, LS1X_INTC_INTPOL(n));
/* set DMA0, DMA1 and DMA2 to edge trigger */
__raw_writel(n ? 0x0 : 0xe000, LS1X_INTC_INTEDGE(n));
}
for (n = base; n < NR_IRQS; n++) {
irq_set_chip_and_handler(n, &ls1x_irq_chip,
handle_level_irq);
}
if (request_irq(INT0_IRQ, no_action, IRQF_NO_THREAD, "cascade", NULL))
pr_err("Failed to request irq %d (cascade)\n", INT0_IRQ);
if (request_irq(INT1_IRQ, no_action, IRQF_NO_THREAD, "cascade", NULL))
pr_err("Failed to request irq %d (cascade)\n", INT1_IRQ);
if (request_irq(INT2_IRQ, no_action, IRQF_NO_THREAD, "cascade", NULL))
pr_err("Failed to request irq %d (cascade)\n", INT2_IRQ);
if (request_irq(INT3_IRQ, no_action, IRQF_NO_THREAD, "cascade", NULL))
pr_err("Failed to request irq %d (cascade)\n", INT3_IRQ);
#if defined(CONFIG_LOONGSON1_LS1C)
if (request_irq(INT4_IRQ, no_action, IRQF_NO_THREAD, "cascade", NULL))
pr_err("Failed to request irq %d (cascade)\n", INT4_IRQ);
#endif
}
void __init arch_init_irq(void)
{
mips_cpu_irq_init();
ls1x_irq_init(LS1X_IRQ_BASE);
}
| linux-master | arch/mips/loongson32/common/irq.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) 2011-2016 Zhang, Keguang <[email protected]>
*/
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/mtd/partitions.h>
#include <linux/sizes.h>
#include <linux/phy.h>
#include <linux/serial_8250.h>
#include <linux/stmmac.h>
#include <linux/usb/ehci_pdriver.h>
#include <platform.h>
#include <loongson1.h>
#include <dma.h>
#include <nand.h>
/* 8250/16550 compatible UART */
#define LS1X_UART(_id) \
{ \
.mapbase = LS1X_UART ## _id ## _BASE, \
.irq = LS1X_UART ## _id ## _IRQ, \
.iotype = UPIO_MEM, \
.flags = UPF_IOREMAP | UPF_FIXED_TYPE, \
.type = PORT_16550A, \
}
static struct plat_serial8250_port ls1x_serial8250_pdata[] = {
LS1X_UART(0),
LS1X_UART(1),
LS1X_UART(2),
LS1X_UART(3),
{},
};
struct platform_device ls1x_uart_pdev = {
.name = "serial8250",
.id = PLAT8250_DEV_PLATFORM,
.dev = {
.platform_data = ls1x_serial8250_pdata,
},
};
void __init ls1x_serial_set_uartclk(struct platform_device *pdev)
{
struct clk *clk;
struct plat_serial8250_port *p;
clk = clk_get(&pdev->dev, pdev->name);
if (IS_ERR(clk)) {
pr_err("unable to get %s clock, err=%ld",
pdev->name, PTR_ERR(clk));
return;
}
clk_prepare_enable(clk);
for (p = pdev->dev.platform_data; p->flags != 0; ++p)
p->uartclk = clk_get_rate(clk);
}
/* Synopsys Ethernet GMAC */
static struct stmmac_mdio_bus_data ls1x_mdio_bus_data = {
.phy_mask = 0,
};
static struct stmmac_dma_cfg ls1x_eth_dma_cfg = {
.pbl = 1,
};
int ls1x_eth_mux_init(struct platform_device *pdev, void *priv)
{
struct plat_stmmacenet_data *plat_dat = NULL;
u32 val;
val = __raw_readl(LS1X_MUX_CTRL1);
#if defined(CONFIG_LOONGSON1_LS1B)
plat_dat = dev_get_platdata(&pdev->dev);
if (plat_dat->bus_id) {
__raw_writel(__raw_readl(LS1X_MUX_CTRL0) | GMAC1_USE_UART1 |
GMAC1_USE_UART0, LS1X_MUX_CTRL0);
switch (plat_dat->phy_interface) {
case PHY_INTERFACE_MODE_RGMII:
val &= ~(GMAC1_USE_TXCLK | GMAC1_USE_PWM23);
break;
case PHY_INTERFACE_MODE_MII:
val |= (GMAC1_USE_TXCLK | GMAC1_USE_PWM23);
break;
default:
pr_err("unsupported mii mode %d\n",
plat_dat->phy_interface);
return -ENOTSUPP;
}
val &= ~GMAC1_SHUT;
} else {
switch (plat_dat->phy_interface) {
case PHY_INTERFACE_MODE_RGMII:
val &= ~(GMAC0_USE_TXCLK | GMAC0_USE_PWM01);
break;
case PHY_INTERFACE_MODE_MII:
val |= (GMAC0_USE_TXCLK | GMAC0_USE_PWM01);
break;
default:
pr_err("unsupported mii mode %d\n",
plat_dat->phy_interface);
return -ENOTSUPP;
}
val &= ~GMAC0_SHUT;
}
__raw_writel(val, LS1X_MUX_CTRL1);
#elif defined(CONFIG_LOONGSON1_LS1C)
plat_dat = dev_get_platdata(&pdev->dev);
val &= ~PHY_INTF_SELI;
if (plat_dat->phy_interface == PHY_INTERFACE_MODE_RMII)
val |= 0x4 << PHY_INTF_SELI_SHIFT;
__raw_writel(val, LS1X_MUX_CTRL1);
val = __raw_readl(LS1X_MUX_CTRL0);
__raw_writel(val & (~GMAC_SHUT), LS1X_MUX_CTRL0);
#endif
return 0;
}
static struct plat_stmmacenet_data ls1x_eth0_pdata = {
.bus_id = 0,
.phy_addr = -1,
#if defined(CONFIG_LOONGSON1_LS1B)
.phy_interface = PHY_INTERFACE_MODE_MII,
#elif defined(CONFIG_LOONGSON1_LS1C)
.phy_interface = PHY_INTERFACE_MODE_RMII,
#endif
.mdio_bus_data = &ls1x_mdio_bus_data,
.dma_cfg = &ls1x_eth_dma_cfg,
.has_gmac = 1,
.tx_coe = 1,
.rx_queues_to_use = 1,
.tx_queues_to_use = 1,
.init = ls1x_eth_mux_init,
};
static struct resource ls1x_eth0_resources[] = {
[0] = {
.start = LS1X_GMAC0_BASE,
.end = LS1X_GMAC0_BASE + SZ_64K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.name = "macirq",
.start = LS1X_GMAC0_IRQ,
.flags = IORESOURCE_IRQ,
},
};
struct platform_device ls1x_eth0_pdev = {
.name = "stmmaceth",
.id = 0,
.num_resources = ARRAY_SIZE(ls1x_eth0_resources),
.resource = ls1x_eth0_resources,
.dev = {
.platform_data = &ls1x_eth0_pdata,
},
};
#ifdef CONFIG_LOONGSON1_LS1B
static struct plat_stmmacenet_data ls1x_eth1_pdata = {
.bus_id = 1,
.phy_addr = -1,
.phy_interface = PHY_INTERFACE_MODE_MII,
.mdio_bus_data = &ls1x_mdio_bus_data,
.dma_cfg = &ls1x_eth_dma_cfg,
.has_gmac = 1,
.tx_coe = 1,
.rx_queues_to_use = 1,
.tx_queues_to_use = 1,
.init = ls1x_eth_mux_init,
};
static struct resource ls1x_eth1_resources[] = {
[0] = {
.start = LS1X_GMAC1_BASE,
.end = LS1X_GMAC1_BASE + SZ_64K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.name = "macirq",
.start = LS1X_GMAC1_IRQ,
.flags = IORESOURCE_IRQ,
},
};
struct platform_device ls1x_eth1_pdev = {
.name = "stmmaceth",
.id = 1,
.num_resources = ARRAY_SIZE(ls1x_eth1_resources),
.resource = ls1x_eth1_resources,
.dev = {
.platform_data = &ls1x_eth1_pdata,
},
};
#endif /* CONFIG_LOONGSON1_LS1B */
/* GPIO */
static struct resource ls1x_gpio0_resources[] = {
[0] = {
.start = LS1X_GPIO0_BASE,
.end = LS1X_GPIO0_BASE + SZ_4 - 1,
.flags = IORESOURCE_MEM,
},
};
struct platform_device ls1x_gpio0_pdev = {
.name = "ls1x-gpio",
.id = 0,
.num_resources = ARRAY_SIZE(ls1x_gpio0_resources),
.resource = ls1x_gpio0_resources,
};
static struct resource ls1x_gpio1_resources[] = {
[0] = {
.start = LS1X_GPIO1_BASE,
.end = LS1X_GPIO1_BASE + SZ_4 - 1,
.flags = IORESOURCE_MEM,
},
};
struct platform_device ls1x_gpio1_pdev = {
.name = "ls1x-gpio",
.id = 1,
.num_resources = ARRAY_SIZE(ls1x_gpio1_resources),
.resource = ls1x_gpio1_resources,
};
/* USB EHCI */
static u64 ls1x_ehci_dmamask = DMA_BIT_MASK(32);
static struct resource ls1x_ehci_resources[] = {
[0] = {
.start = LS1X_EHCI_BASE,
.end = LS1X_EHCI_BASE + SZ_32K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = LS1X_EHCI_IRQ,
.flags = IORESOURCE_IRQ,
},
};
static struct usb_ehci_pdata ls1x_ehci_pdata = {
};
struct platform_device ls1x_ehci_pdev = {
.name = "ehci-platform",
.id = -1,
.num_resources = ARRAY_SIZE(ls1x_ehci_resources),
.resource = ls1x_ehci_resources,
.dev = {
.dma_mask = &ls1x_ehci_dmamask,
.platform_data = &ls1x_ehci_pdata,
},
};
/* Real Time Clock */
struct platform_device ls1x_rtc_pdev = {
.name = "ls1x-rtc",
.id = -1,
};
/* Watchdog */
static struct resource ls1x_wdt_resources[] = {
{
.start = LS1X_WDT_BASE,
.end = LS1X_WDT_BASE + SZ_16 - 1,
.flags = IORESOURCE_MEM,
},
};
struct platform_device ls1x_wdt_pdev = {
.name = "ls1x-wdt",
.id = -1,
.num_resources = ARRAY_SIZE(ls1x_wdt_resources),
.resource = ls1x_wdt_resources,
};
| linux-master | arch/mips/loongson32/common/platform.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) 2011 Zhang, Keguang <[email protected]>
*/
#include <linux/io.h>
#include <linux/init.h>
#include <linux/smp.h>
#include <asm/cpu-info.h>
#include <asm/bootinfo.h>
const char *get_system_type(void)
{
unsigned int processor_id = (¤t_cpu_data)->processor_id;
switch (processor_id & PRID_REV_MASK) {
case PRID_REV_LOONGSON1B:
#if defined(CONFIG_LOONGSON1_LS1B)
return "LOONGSON LS1B";
#elif defined(CONFIG_LOONGSON1_LS1C)
return "LOONGSON LS1C";
#endif
default:
return "LOONGSON (unknown)";
}
}
| linux-master | arch/mips/loongson32/common/setup.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) 2014 Zhang, Keguang <[email protected]>
*/
#include <linux/clk.h>
#include <linux/of_clk.h>
#include <asm/time.h>
void __init plat_time_init(void)
{
struct clk *clk = NULL;
/* initialize LS1X clocks */
of_clk_init(NULL);
/* setup mips r4k timer */
clk = clk_get(NULL, "cpu_clk");
if (IS_ERR(clk))
panic("unable to get cpu clock, err=%ld", PTR_ERR(clk));
mips_hpt_frequency = clk_get_rate(clk) / 2;
}
| linux-master | arch/mips/loongson32/common/time.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) 2011 Zhang, Keguang <[email protected]>
*
* Modified from arch/mips/pnx833x/common/prom.c.
*/
#include <linux/io.h>
#include <linux/init.h>
#include <linux/memblock.h>
#include <linux/serial_reg.h>
#include <asm/fw/fw.h>
#include <loongson1.h>
unsigned long memsize;
void __init prom_init(void)
{
void __iomem *uart_base;
fw_init_cmdline();
memsize = fw_getenvl("memsize");
if(!memsize)
memsize = DEFAULT_MEMSIZE;
if (strstr(arcs_cmdline, "console=ttyS3"))
uart_base = ioremap(LS1X_UART3_BASE, 0x0f);
else if (strstr(arcs_cmdline, "console=ttyS2"))
uart_base = ioremap(LS1X_UART2_BASE, 0x0f);
else if (strstr(arcs_cmdline, "console=ttyS1"))
uart_base = ioremap(LS1X_UART1_BASE, 0x0f);
else
uart_base = ioremap(LS1X_UART0_BASE, 0x0f);
setup_8250_early_printk_port((unsigned long)uart_base, 0, 0);
}
void __init plat_mem_setup(void)
{
memblock_add(0x0, (memsize << 20));
}
| linux-master | arch/mips/loongson32/common/prom.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2006 Ralf Baechle <[email protected]>
*/
#include <linux/dma-direct.h>
#include <asm/ip32/crime.h>
/*
* Few notes.
* 1. CPU sees memory as two chunks: 0-256M@0x0, and the rest @0x40000000+256M
* 2. PCI sees memory as one big chunk @0x0 (or we could use 0x40000000 for
* native-endian)
* 3. All other devices see memory as one big chunk at 0x40000000
* 4. Non-PCI devices will pass NULL as struct device*
*
* Thus we translate differently, depending on device.
*/
#define RAM_OFFSET_MASK 0x3fffffffUL
dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
{
dma_addr_t dma_addr = paddr & RAM_OFFSET_MASK;
if (!dev)
dma_addr += CRIME_HI_MEM_BASE;
return dma_addr;
}
phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dma_addr)
{
phys_addr_t paddr = dma_addr & RAM_OFFSET_MASK;
if (dma_addr >= 256*1024*1024)
paddr += CRIME_HI_MEM_BASE;
return paddr;
}
| linux-master | arch/mips/sgi-ip32/ip32-dma.c |
/*
* IP32 basic setup
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2000 Harald Koerfgen
* Copyright (C) 2002, 2003, 2005 Ilya A. Volynets
* Copyright (C) 2006 Ralf Baechle <[email protected]>
*/
#include <linux/console.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/param.h>
#include <linux/sched.h>
#include <asm/bootinfo.h>
#include <asm/mipsregs.h>
#include <asm/mmu_context.h>
#include <asm/sgialib.h>
#include <asm/time.h>
#include <asm/traps.h>
#include <asm/io.h>
#include <asm/ip32/crime.h>
#include <asm/ip32/mace.h>
#include <asm/ip32/ip32_ints.h>
extern void ip32_be_init(void);
extern void crime_init(void);
#ifdef CONFIG_SGI_O2MACE_ETH
/*
* This is taken care of in here 'cause they say using Arc later on is
* problematic
*/
extern char o2meth_eaddr[8];
static inline unsigned char str2hexnum(unsigned char c)
{
if (c >= '0' && c <= '9')
return c - '0';
if (c >= 'a' && c <= 'f')
return c - 'a' + 10;
return 0; /* foo */
}
static inline void str2eaddr(unsigned char *ea, unsigned char *str)
{
int i;
for (i = 0; i < 6; i++) {
unsigned char num;
if(*str == ':')
str++;
num = str2hexnum(*str++) << 4;
num |= (str2hexnum(*str++));
ea[i] = num;
}
}
#endif
/* An arbitrary time; this can be decreased if reliability looks good */
#define WAIT_MS 10
void __init plat_time_init(void)
{
printk(KERN_INFO "Calibrating system timer... ");
write_c0_count(0);
crime->timer = 0;
while (crime->timer < CRIME_MASTER_FREQ * WAIT_MS / 1000) ;
mips_hpt_frequency = read_c0_count() * 1000 / WAIT_MS;
printk("%d MHz CPU detected\n", mips_hpt_frequency * 2 / 1000000);
}
void __init plat_mem_setup(void)
{
board_be_init = ip32_be_init;
#ifdef CONFIG_SGI_O2MACE_ETH
{
char *mac = ArcGetEnvironmentVariable("eaddr");
str2eaddr(o2meth_eaddr, mac);
}
#endif
#if defined(CONFIG_SERIAL_CORE_CONSOLE)
{
char* con = ArcGetEnvironmentVariable("console");
if (con && *con == 'd') {
static char options[8] __initdata;
char *baud = ArcGetEnvironmentVariable("dbaud");
if (baud)
strcpy(options, baud);
add_preferred_console("ttyS", *(con + 1) == '2' ? 1 : 0,
baud ? options : NULL);
}
}
#endif
}
| linux-master | arch/mips/sgi-ip32/ip32-setup.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1994, 1995, 1996, 1999, 2000 by Ralf Baechle
* Copyright (C) 1999, 2000 by Silicon Graphics
* Copyright (C) 2002 Maciej W. Rozycki
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/sched/signal.h>
#include <asm/traps.h>
#include <linux/uaccess.h>
#include <asm/addrspace.h>
#include <asm/ptrace.h>
#include <asm/tlbdebug.h>
static int ip32_be_handler(struct pt_regs *regs, int is_fixup)
{
int data = regs->cp0_cause & 4;
if (is_fixup)
return MIPS_BE_FIXUP;
printk("Got %cbe at 0x%lx\n", data ? 'd' : 'i', regs->cp0_epc);
show_regs(regs);
dump_tlb_all();
while(1);
force_sig(SIGBUS);
}
void __init ip32_be_init(void)
{
mips_set_be_handler(ip32_be_handler);
}
| linux-master | arch/mips/sgi-ip32/ip32-berr.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001, 2003 Keith M Wesolowski
* Copyright (C) 2005 Ilya A. Volynets <[email protected]>
*/
#include <linux/types.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/export.h>
#include <asm/bootinfo.h>
#include <asm/io.h>
#include <asm/mipsregs.h>
#include <asm/page.h>
#include <asm/ip32/crime.h>
#include <asm/ip32/mace.h>
struct sgi_crime __iomem *crime;
struct sgi_mace __iomem *mace;
EXPORT_SYMBOL_GPL(mace);
void __init crime_init(void)
{
unsigned int id, rev;
const int field = 2 * sizeof(unsigned long);
set_io_port_base((unsigned long) ioremap(MACEPCI_LOW_IO, 0x2000000));
crime = ioremap(CRIME_BASE, sizeof(struct sgi_crime));
mace = ioremap(MACE_BASE, sizeof(struct sgi_mace));
id = crime->id;
rev = id & CRIME_ID_REV;
id = (id & CRIME_ID_IDBITS) >> 4;
printk(KERN_INFO "CRIME id %1x rev %d at 0x%0*lx\n",
id, rev, field, (unsigned long) CRIME_BASE);
}
irqreturn_t crime_memerr_intr(unsigned int irq, void *dev_id)
{
unsigned long stat, addr;
int fatal = 0;
stat = crime->mem_error_stat & CRIME_MEM_ERROR_STAT_MASK;
addr = crime->mem_error_addr & CRIME_MEM_ERROR_ADDR_MASK;
printk("CRIME memory error at 0x%08lx ST 0x%08lx<", addr, stat);
if (stat & CRIME_MEM_ERROR_INV)
printk("INV,");
if (stat & CRIME_MEM_ERROR_ECC) {
unsigned long ecc_syn =
crime->mem_ecc_syn & CRIME_MEM_ERROR_ECC_SYN_MASK;
unsigned long ecc_gen =
crime->mem_ecc_chk & CRIME_MEM_ERROR_ECC_CHK_MASK;
printk("ECC,SYN=0x%08lx,GEN=0x%08lx,", ecc_syn, ecc_gen);
}
if (stat & CRIME_MEM_ERROR_MULTIPLE) {
fatal = 1;
printk("MULTIPLE,");
}
if (stat & CRIME_MEM_ERROR_HARD_ERR) {
fatal = 1;
printk("HARD,");
}
if (stat & CRIME_MEM_ERROR_SOFT_ERR)
printk("SOFT,");
if (stat & CRIME_MEM_ERROR_CPU_ACCESS)
printk("CPU,");
if (stat & CRIME_MEM_ERROR_VICE_ACCESS)
printk("VICE,");
if (stat & CRIME_MEM_ERROR_GBE_ACCESS)
printk("GBE,");
if (stat & CRIME_MEM_ERROR_RE_ACCESS)
printk("RE,REID=0x%02lx,", (stat & CRIME_MEM_ERROR_RE_ID)>>8);
if (stat & CRIME_MEM_ERROR_MACE_ACCESS)
printk("MACE,MACEID=0x%02lx,", stat & CRIME_MEM_ERROR_MACE_ID);
crime->mem_error_stat = 0;
if (fatal) {
printk("FATAL>\n");
panic("Fatal memory error.");
} else
printk("NONFATAL>\n");
return IRQ_HANDLED;
}
irqreturn_t crime_cpuerr_intr(unsigned int irq, void *dev_id)
{
unsigned long stat = crime->cpu_error_stat & CRIME_CPU_ERROR_MASK;
unsigned long addr = crime->cpu_error_addr & CRIME_CPU_ERROR_ADDR_MASK;
addr <<= 2;
printk("CRIME CPU error at 0x%09lx status 0x%08lx\n", addr, stat);
crime->cpu_error_stat = 0;
return IRQ_HANDLED;
}
| linux-master | arch/mips/sgi-ip32/crime.c |
/*
* Code to handle IP32 IRQs
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2000 Harald Koerfgen
* Copyright (C) 2001 Keith M Wesolowski
*/
#include <linux/init.h>
#include <linux/kernel_stat.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/bitops.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/random.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <asm/irq_cpu.h>
#include <asm/mipsregs.h>
#include <asm/signal.h>
#include <asm/time.h>
#include <asm/ip32/crime.h>
#include <asm/ip32/mace.h>
#include <asm/ip32/ip32_ints.h>
/* issue a PIO read to make sure no PIO writes are pending */
static inline void flush_crime_bus(void)
{
crime->control;
}
static inline void flush_mace_bus(void)
{
mace->perif.ctrl.misc;
}
/*
* O2 irq map
*
* IP0 -> software (ignored)
* IP1 -> software (ignored)
* IP2 -> (irq0) C crime 1.1 all interrupts; crime 1.5 ???
* IP3 -> (irq1) X unknown
* IP4 -> (irq2) X unknown
* IP5 -> (irq3) X unknown
* IP6 -> (irq4) X unknown
* IP7 -> (irq5) 7 CPU count/compare timer (system timer)
*
* crime: (C)
*
* CRIME_INT_STAT 31:0:
*
* 0 -> 8 Video in 1
* 1 -> 9 Video in 2
* 2 -> 10 Video out
* 3 -> 11 Mace ethernet
* 4 -> S SuperIO sub-interrupt
* 5 -> M Miscellaneous sub-interrupt
* 6 -> A Audio sub-interrupt
* 7 -> 15 PCI bridge errors
* 8 -> 16 PCI SCSI aic7xxx 0
* 9 -> 17 PCI SCSI aic7xxx 1
* 10 -> 18 PCI slot 0
* 11 -> 19 unused (PCI slot 1)
* 12 -> 20 unused (PCI slot 2)
* 13 -> 21 unused (PCI shared 0)
* 14 -> 22 unused (PCI shared 1)
* 15 -> 23 unused (PCI shared 2)
* 16 -> 24 GBE0 (E)
* 17 -> 25 GBE1 (E)
* 18 -> 26 GBE2 (E)
* 19 -> 27 GBE3 (E)
* 20 -> 28 CPU errors
* 21 -> 29 Memory errors
* 22 -> 30 RE empty edge (E)
* 23 -> 31 RE full edge (E)
* 24 -> 32 RE idle edge (E)
* 25 -> 33 RE empty level
* 26 -> 34 RE full level
* 27 -> 35 RE idle level
* 28 -> 36 unused (software 0) (E)
* 29 -> 37 unused (software 1) (E)
* 30 -> 38 unused (software 2) - crime 1.5 CPU SysCorError (E)
* 31 -> 39 VICE
*
* S, M, A: Use the MACE ISA interrupt register
* MACE_ISA_INT_STAT 31:0
*
* 0-7 -> 40-47 Audio
* 8 -> 48 RTC
* 9 -> 49 Keyboard
* 10 -> X Keyboard polled
* 11 -> 51 Mouse
* 12 -> X Mouse polled
* 13-15 -> 53-55 Count/compare timers
* 16-19 -> 56-59 Parallel (16 E)
* 20-25 -> 60-62 Serial 1 (22 E)
* 26-31 -> 66-71 Serial 2 (28 E)
*
* Note that this means IRQs 12-14, 50, and 52 do not exist. This is a
* different IRQ map than IRIX uses, but that's OK as Linux irq handling
* is quite different anyway.
*/
/* Some initial interrupts to set up */
extern irqreturn_t crime_memerr_intr(int irq, void *dev_id);
extern irqreturn_t crime_cpuerr_intr(int irq, void *dev_id);
/*
* This is for pure CRIME interrupts - ie not MACE. The advantage?
* We get to split the register in half and do faster lookups.
*/
static uint64_t crime_mask;
static inline void crime_enable_irq(struct irq_data *d)
{
unsigned int bit = d->irq - CRIME_IRQ_BASE;
crime_mask |= 1 << bit;
crime->imask = crime_mask;
}
static inline void crime_disable_irq(struct irq_data *d)
{
unsigned int bit = d->irq - CRIME_IRQ_BASE;
crime_mask &= ~(1 << bit);
crime->imask = crime_mask;
flush_crime_bus();
}
static struct irq_chip crime_level_interrupt = {
.name = "IP32 CRIME",
.irq_mask = crime_disable_irq,
.irq_unmask = crime_enable_irq,
};
static void crime_edge_mask_and_ack_irq(struct irq_data *d)
{
unsigned int bit = d->irq - CRIME_IRQ_BASE;
uint64_t crime_int;
/* Edge triggered interrupts must be cleared. */
crime_int = crime->hard_int;
crime_int &= ~(1 << bit);
crime->hard_int = crime_int;
crime_disable_irq(d);
}
static struct irq_chip crime_edge_interrupt = {
.name = "IP32 CRIME",
.irq_ack = crime_edge_mask_and_ack_irq,
.irq_mask = crime_disable_irq,
.irq_mask_ack = crime_edge_mask_and_ack_irq,
.irq_unmask = crime_enable_irq,
};
/*
* This is for MACE PCI interrupts. We can decrease bus traffic by masking
* as close to the source as possible. This also means we can take the
* next chunk of the CRIME register in one piece.
*/
static unsigned long macepci_mask;
static void enable_macepci_irq(struct irq_data *d)
{
macepci_mask |= MACEPCI_CONTROL_INT(d->irq - MACEPCI_SCSI0_IRQ);
mace->pci.control = macepci_mask;
crime_mask |= 1 << (d->irq - CRIME_IRQ_BASE);
crime->imask = crime_mask;
}
static void disable_macepci_irq(struct irq_data *d)
{
crime_mask &= ~(1 << (d->irq - CRIME_IRQ_BASE));
crime->imask = crime_mask;
flush_crime_bus();
macepci_mask &= ~MACEPCI_CONTROL_INT(d->irq - MACEPCI_SCSI0_IRQ);
mace->pci.control = macepci_mask;
flush_mace_bus();
}
static struct irq_chip ip32_macepci_interrupt = {
.name = "IP32 MACE PCI",
.irq_mask = disable_macepci_irq,
.irq_unmask = enable_macepci_irq,
};
/* This is used for MACE ISA interrupts. That means bits 4-6 in the
* CRIME register.
*/
#define MACEISA_AUDIO_INT (MACEISA_AUDIO_SW_INT | \
MACEISA_AUDIO_SC_INT | \
MACEISA_AUDIO1_DMAT_INT | \
MACEISA_AUDIO1_OF_INT | \
MACEISA_AUDIO2_DMAT_INT | \
MACEISA_AUDIO2_MERR_INT | \
MACEISA_AUDIO3_DMAT_INT | \
MACEISA_AUDIO3_MERR_INT)
#define MACEISA_MISC_INT (MACEISA_RTC_INT | \
MACEISA_KEYB_INT | \
MACEISA_KEYB_POLL_INT | \
MACEISA_MOUSE_INT | \
MACEISA_MOUSE_POLL_INT | \
MACEISA_TIMER0_INT | \
MACEISA_TIMER1_INT | \
MACEISA_TIMER2_INT)
#define MACEISA_SUPERIO_INT (MACEISA_PARALLEL_INT | \
MACEISA_PAR_CTXA_INT | \
MACEISA_PAR_CTXB_INT | \
MACEISA_PAR_MERR_INT | \
MACEISA_SERIAL1_INT | \
MACEISA_SERIAL1_TDMAT_INT | \
MACEISA_SERIAL1_TDMAPR_INT | \
MACEISA_SERIAL1_TDMAME_INT | \
MACEISA_SERIAL1_RDMAT_INT | \
MACEISA_SERIAL1_RDMAOR_INT | \
MACEISA_SERIAL2_INT | \
MACEISA_SERIAL2_TDMAT_INT | \
MACEISA_SERIAL2_TDMAPR_INT | \
MACEISA_SERIAL2_TDMAME_INT | \
MACEISA_SERIAL2_RDMAT_INT | \
MACEISA_SERIAL2_RDMAOR_INT)
static unsigned long maceisa_mask;
static void enable_maceisa_irq(struct irq_data *d)
{
unsigned int crime_int = 0;
pr_debug("maceisa enable: %u\n", d->irq);
switch (d->irq) {
case MACEISA_AUDIO_SW_IRQ ... MACEISA_AUDIO3_MERR_IRQ:
crime_int = MACE_AUDIO_INT;
break;
case MACEISA_RTC_IRQ ... MACEISA_TIMER2_IRQ:
crime_int = MACE_MISC_INT;
break;
case MACEISA_PARALLEL_IRQ ... MACEISA_SERIAL2_RDMAOR_IRQ:
crime_int = MACE_SUPERIO_INT;
break;
}
pr_debug("crime_int %08x enabled\n", crime_int);
crime_mask |= crime_int;
crime->imask = crime_mask;
maceisa_mask |= 1 << (d->irq - MACEISA_AUDIO_SW_IRQ);
mace->perif.ctrl.imask = maceisa_mask;
}
static void disable_maceisa_irq(struct irq_data *d)
{
unsigned int crime_int = 0;
maceisa_mask &= ~(1 << (d->irq - MACEISA_AUDIO_SW_IRQ));
if (!(maceisa_mask & MACEISA_AUDIO_INT))
crime_int |= MACE_AUDIO_INT;
if (!(maceisa_mask & MACEISA_MISC_INT))
crime_int |= MACE_MISC_INT;
if (!(maceisa_mask & MACEISA_SUPERIO_INT))
crime_int |= MACE_SUPERIO_INT;
crime_mask &= ~crime_int;
crime->imask = crime_mask;
flush_crime_bus();
mace->perif.ctrl.imask = maceisa_mask;
flush_mace_bus();
}
static void mask_and_ack_maceisa_irq(struct irq_data *d)
{
unsigned long mace_int;
/* edge triggered */
mace_int = mace->perif.ctrl.istat;
mace_int &= ~(1 << (d->irq - MACEISA_AUDIO_SW_IRQ));
mace->perif.ctrl.istat = mace_int;
disable_maceisa_irq(d);
}
static struct irq_chip ip32_maceisa_level_interrupt = {
.name = "IP32 MACE ISA",
.irq_mask = disable_maceisa_irq,
.irq_unmask = enable_maceisa_irq,
};
static struct irq_chip ip32_maceisa_edge_interrupt = {
.name = "IP32 MACE ISA",
.irq_ack = mask_and_ack_maceisa_irq,
.irq_mask = disable_maceisa_irq,
.irq_mask_ack = mask_and_ack_maceisa_irq,
.irq_unmask = enable_maceisa_irq,
};
/* This is used for regular non-ISA, non-PCI MACE interrupts. That means
* bits 0-3 and 7 in the CRIME register.
*/
static void enable_mace_irq(struct irq_data *d)
{
unsigned int bit = d->irq - CRIME_IRQ_BASE;
crime_mask |= (1 << bit);
crime->imask = crime_mask;
}
static void disable_mace_irq(struct irq_data *d)
{
unsigned int bit = d->irq - CRIME_IRQ_BASE;
crime_mask &= ~(1 << bit);
crime->imask = crime_mask;
flush_crime_bus();
}
static struct irq_chip ip32_mace_interrupt = {
.name = "IP32 MACE",
.irq_mask = disable_mace_irq,
.irq_unmask = enable_mace_irq,
};
static void ip32_unknown_interrupt(void)
{
printk("Unknown interrupt occurred!\n");
printk("cp0_status: %08x\n", read_c0_status());
printk("cp0_cause: %08x\n", read_c0_cause());
printk("CRIME intr mask: %016lx\n", crime->imask);
printk("CRIME intr status: %016lx\n", crime->istat);
printk("CRIME hardware intr register: %016lx\n", crime->hard_int);
printk("MACE ISA intr mask: %08lx\n", mace->perif.ctrl.imask);
printk("MACE ISA intr status: %08lx\n", mace->perif.ctrl.istat);
printk("MACE PCI control register: %08x\n", mace->pci.control);
printk("Register dump:\n");
show_regs(get_irq_regs());
printk("Please mail this report to [email protected]\n");
printk("Spinning...");
while(1) ;
}
/* CRIME 1.1 appears to deliver all interrupts to this one pin. */
/* change this to loop over all edge-triggered irqs, exception masked out ones */
static void ip32_irq0(void)
{
uint64_t crime_int;
int irq = 0;
/*
* Sanity check interrupt numbering enum.
* MACE got 32 interrupts and there are 32 MACE ISA interrupts daisy
* chained.
*/
BUILD_BUG_ON(CRIME_VICE_IRQ - MACE_VID_IN1_IRQ != 31);
BUILD_BUG_ON(MACEISA_SERIAL2_RDMAOR_IRQ - MACEISA_AUDIO_SW_IRQ != 31);
crime_int = crime->istat & crime_mask;
/* crime sometime delivers spurious interrupts, ignore them */
if (unlikely(crime_int == 0))
return;
irq = MACE_VID_IN1_IRQ + __ffs(crime_int);
if (crime_int & CRIME_MACEISA_INT_MASK) {
unsigned long mace_int = mace->perif.ctrl.istat;
irq = __ffs(mace_int & maceisa_mask) + MACEISA_AUDIO_SW_IRQ;
}
pr_debug("*irq %u*\n", irq);
do_IRQ(irq);
}
static void ip32_irq1(void)
{
ip32_unknown_interrupt();
}
static void ip32_irq2(void)
{
ip32_unknown_interrupt();
}
static void ip32_irq3(void)
{
ip32_unknown_interrupt();
}
static void ip32_irq4(void)
{
ip32_unknown_interrupt();
}
static void ip32_irq5(void)
{
do_IRQ(MIPS_CPU_IRQ_BASE + 7);
}
asmlinkage void plat_irq_dispatch(void)
{
unsigned int pending = read_c0_status() & read_c0_cause();
if (likely(pending & IE_IRQ0))
ip32_irq0();
else if (unlikely(pending & IE_IRQ1))
ip32_irq1();
else if (unlikely(pending & IE_IRQ2))
ip32_irq2();
else if (unlikely(pending & IE_IRQ3))
ip32_irq3();
else if (unlikely(pending & IE_IRQ4))
ip32_irq4();
else if (likely(pending & IE_IRQ5))
ip32_irq5();
}
void __init arch_init_irq(void)
{
unsigned int irq;
/* Install our interrupt handler, then clear and disable all
* CRIME and MACE interrupts. */
crime->imask = 0;
crime->hard_int = 0;
crime->soft_int = 0;
mace->perif.ctrl.istat = 0;
mace->perif.ctrl.imask = 0;
mips_cpu_irq_init();
for (irq = CRIME_IRQ_BASE; irq <= IP32_IRQ_MAX; irq++) {
switch (irq) {
case MACE_VID_IN1_IRQ ... MACE_PCI_BRIDGE_IRQ:
irq_set_chip_and_handler_name(irq,
&ip32_mace_interrupt,
handle_level_irq,
"level");
break;
case MACEPCI_SCSI0_IRQ ... MACEPCI_SHARED2_IRQ:
irq_set_chip_and_handler_name(irq,
&ip32_macepci_interrupt,
handle_level_irq,
"level");
break;
case CRIME_CPUERR_IRQ:
case CRIME_MEMERR_IRQ:
irq_set_chip_and_handler_name(irq,
&crime_level_interrupt,
handle_level_irq,
"level");
break;
case CRIME_GBE0_IRQ ... CRIME_GBE3_IRQ:
case CRIME_RE_EMPTY_E_IRQ ... CRIME_RE_IDLE_E_IRQ:
case CRIME_SOFT0_IRQ ... CRIME_SOFT2_IRQ:
case CRIME_VICE_IRQ:
irq_set_chip_and_handler_name(irq,
&crime_edge_interrupt,
handle_edge_irq,
"edge");
break;
case MACEISA_PARALLEL_IRQ:
case MACEISA_SERIAL1_TDMAPR_IRQ:
case MACEISA_SERIAL2_TDMAPR_IRQ:
irq_set_chip_and_handler_name(irq,
&ip32_maceisa_edge_interrupt,
handle_edge_irq,
"edge");
break;
default:
irq_set_chip_and_handler_name(irq,
&ip32_maceisa_level_interrupt,
handle_level_irq,
"level");
break;
}
}
if (request_irq(CRIME_MEMERR_IRQ, crime_memerr_intr, 0,
"CRIME memory error", NULL))
pr_err("Failed to register CRIME memory error interrupt\n");
if (request_irq(CRIME_CPUERR_IRQ, crime_cpuerr_intr, 0,
"CRIME CPU error", NULL))
pr_err("Failed to register CRIME CPU error interrupt\n");
#define ALLINTS (IE_IRQ0 | IE_IRQ1 | IE_IRQ2 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5)
change_c0_status(ST0_IM, ALLINTS);
}
| linux-master | arch/mips/sgi-ip32/ip32-irq.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2007 Ralf Baechle ([email protected])
*/
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/serial_8250.h>
#include <linux/rtc/ds1685.h>
#include <asm/ip32/mace.h>
#include <asm/ip32/ip32_ints.h>
extern void ip32_prepare_poweroff(void);
#define MACEISA_SERIAL1_OFFS offsetof(struct sgi_mace, isa.serial1)
#define MACEISA_SERIAL2_OFFS offsetof(struct sgi_mace, isa.serial2)
#define MACE_PORT(offset,_irq) \
{ \
.mapbase = MACE_BASE + offset, \
.irq = _irq, \
.uartclk = 1843200, \
.iotype = UPIO_MEM, \
.flags = UPF_SKIP_TEST|UPF_IOREMAP, \
.regshift = 8, \
}
static struct plat_serial8250_port uart8250_data[] = {
MACE_PORT(MACEISA_SERIAL1_OFFS, MACEISA_SERIAL1_IRQ),
MACE_PORT(MACEISA_SERIAL2_OFFS, MACEISA_SERIAL2_IRQ),
{ },
};
static struct platform_device uart8250_device = {
.name = "serial8250",
.id = PLAT8250_DEV_PLATFORM,
.dev = {
.platform_data = uart8250_data,
},
};
static int __init uart8250_init(void)
{
return platform_device_register(&uart8250_device);
}
device_initcall(uart8250_init);
static __init int meth_devinit(void)
{
struct platform_device *pd;
int ret;
pd = platform_device_alloc("meth", -1);
if (!pd)
return -ENOMEM;
ret = platform_device_add(pd);
if (ret)
platform_device_put(pd);
return ret;
}
device_initcall(meth_devinit);
static __init int sgio2audio_devinit(void)
{
struct platform_device *pd;
int ret;
pd = platform_device_alloc("sgio2audio", -1);
if (!pd)
return -ENOMEM;
ret = platform_device_add(pd);
if (ret)
platform_device_put(pd);
return ret;
}
device_initcall(sgio2audio_devinit);
static __init int sgio2btns_devinit(void)
{
return IS_ERR(platform_device_register_simple("sgibtns", -1, NULL, 0));
}
device_initcall(sgio2btns_devinit);
#define MACE_RTC_RES_START (MACE_BASE + offsetof(struct sgi_mace, isa.rtc))
#define MACE_RTC_RES_END (MACE_RTC_RES_START + 32767)
static struct resource ip32_rtc_resources[] = {
{
.start = MACEISA_RTC_IRQ,
.end = MACEISA_RTC_IRQ,
.flags = IORESOURCE_IRQ
}, {
.start = MACE_RTC_RES_START,
.end = MACE_RTC_RES_END,
.flags = IORESOURCE_MEM,
}
};
/* RTC registers on IP32 are each padded by 256 bytes (0x100). */
static struct ds1685_rtc_platform_data
ip32_rtc_platform_data[] = {
{
.regstep = 0x100,
.bcd_mode = true,
.no_irq = false,
.uie_unsupported = false,
.access_type = ds1685_reg_direct,
.plat_prepare_poweroff = ip32_prepare_poweroff,
},
};
struct platform_device ip32_rtc_device = {
.name = "rtc-ds1685",
.id = -1,
.dev = {
.platform_data = ip32_rtc_platform_data,
},
.num_resources = ARRAY_SIZE(ip32_rtc_resources),
.resource = ip32_rtc_resources,
};
static __init int sgio2_rtc_devinit(void)
{
return platform_device_register(&ip32_rtc_device);
}
device_initcall(sgio2_rtc_devinit);
| linux-master | arch/mips/sgi-ip32/ip32-platform.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 Keith M Wesolowski
* Copyright (C) 2001 Paul Mundt
* Copyright (C) 2003 Guido Guenther <[email protected]>
*/
#include <linux/compiler.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/panic_notifier.h>
#include <linux/sched.h>
#include <linux/sched/signal.h>
#include <linux/notifier.h>
#include <linux/delay.h>
#include <linux/rtc/ds1685.h>
#include <linux/interrupt.h>
#include <linux/pm.h>
#include <asm/addrspace.h>
#include <asm/irq.h>
#include <asm/reboot.h>
#include <asm/wbflush.h>
#include <asm/ip32/mace.h>
#include <asm/ip32/crime.h>
#include <asm/ip32/ip32_ints.h>
#define POWERDOWN_TIMEOUT 120
/*
* Blink frequency during reboot grace period and when panicked.
*/
#define POWERDOWN_FREQ (HZ / 4)
#define PANIC_FREQ (HZ / 8)
extern struct platform_device ip32_rtc_device;
static struct timer_list power_timer, blink_timer;
static unsigned long blink_timer_timeout;
static int has_panicked, shutting_down;
static __noreturn void ip32_poweroff(void *data)
{
void (*poweroff_func)(struct platform_device *) =
symbol_get(ds1685_rtc_poweroff);
#ifdef CONFIG_MODULES
/* If the first __symbol_get failed, our module wasn't loaded. */
if (!poweroff_func) {
request_module("rtc-ds1685");
poweroff_func = symbol_get(ds1685_rtc_poweroff);
}
#endif
if (!poweroff_func)
pr_emerg("RTC not available for power-off. Spinning forever ...\n");
else {
(*poweroff_func)((struct platform_device *)data);
symbol_put(ds1685_rtc_poweroff);
}
unreachable();
}
static void ip32_machine_restart(char *cmd) __noreturn;
static void ip32_machine_restart(char *cmd)
{
msleep(20);
crime->control = CRIME_CONTROL_HARD_RESET;
unreachable();
}
static void blink_timeout(struct timer_list *unused)
{
unsigned long led = mace->perif.ctrl.misc ^ MACEISA_LED_RED;
mace->perif.ctrl.misc = led;
mod_timer(&blink_timer, jiffies + blink_timer_timeout);
}
static void ip32_machine_halt(void)
{
ip32_poweroff(&ip32_rtc_device);
}
static void power_timeout(struct timer_list *unused)
{
ip32_poweroff(&ip32_rtc_device);
}
void ip32_prepare_poweroff(void)
{
if (has_panicked)
return;
if (shutting_down || kill_cad_pid(SIGINT, 1)) {
/* No init process or button pressed twice. */
ip32_poweroff(&ip32_rtc_device);
}
shutting_down = 1;
blink_timer_timeout = POWERDOWN_FREQ;
blink_timeout(&blink_timer);
timer_setup(&power_timer, power_timeout, 0);
power_timer.expires = jiffies + POWERDOWN_TIMEOUT * HZ;
add_timer(&power_timer);
}
static int panic_event(struct notifier_block *this, unsigned long event,
void *ptr)
{
unsigned long led;
if (has_panicked)
return NOTIFY_DONE;
has_panicked = 1;
/* turn off the green LED */
led = mace->perif.ctrl.misc | MACEISA_LED_GREEN;
mace->perif.ctrl.misc = led;
blink_timer_timeout = PANIC_FREQ;
blink_timeout(&blink_timer);
return NOTIFY_DONE;
}
static struct notifier_block panic_block = {
.notifier_call = panic_event,
};
static __init int ip32_reboot_setup(void)
{
/* turn on the green led only */
unsigned long led = mace->perif.ctrl.misc;
led |= MACEISA_LED_RED;
led &= ~MACEISA_LED_GREEN;
mace->perif.ctrl.misc = led;
_machine_restart = ip32_machine_restart;
_machine_halt = ip32_machine_halt;
pm_power_off = ip32_machine_halt;
timer_setup(&blink_timer, blink_timeout, 0);
atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
return 0;
}
subsys_initcall(ip32_reboot_setup);
| linux-master | arch/mips/sgi-ip32/ip32-reset.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2003 Keith M Wesolowski
* Copyright (C) 2005 Ilya A. Volynets (Total Knowledge)
*/
#include <linux/types.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/memblock.h>
#include <linux/mm.h>
#include <asm/ip32/crime.h>
#include <asm/bootinfo.h>
#include <asm/page.h>
extern void crime_init(void);
void __init prom_meminit(void)
{
u64 base, size;
int bank;
crime_init();
for (bank=0; bank < CRIME_MAXBANKS; bank++) {
u64 bankctl = crime->bank_ctrl[bank];
base = (bankctl & CRIME_MEM_BANK_CONTROL_ADDR) << 25;
if (bank != 0 && base == 0)
continue;
size = (bankctl & CRIME_MEM_BANK_CONTROL_SDRAM_SIZE) ? 128 : 32;
size <<= 20;
if (base + size > (256 << 20))
base += CRIME_HI_MEM_BASE;
printk("CRIME MC: bank %u base 0x%016Lx size %LuMiB\n",
bank, base, size >> 20);
memblock_add(base, size);
}
}
| linux-master | arch/mips/sgi-ip32/ip32-memory.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2008 Maxime Bizon <[email protected]>
*/
#include <bcm63xx_io.h>
#include <linux/serial_bcm63xx.h>
#include <asm/setup.h>
static void wait_xfered(void)
{
unsigned int val;
/* wait for any previous char to be transmitted */
do {
val = bcm_uart0_readl(UART_IR_REG);
if (val & UART_IR_STAT(UART_IR_TXEMPTY))
break;
} while (1);
}
void prom_putchar(char c)
{
wait_xfered();
bcm_uart0_writel(c, UART_FIFO_REG);
wait_xfered();
}
| linux-master | arch/mips/bcm63xx/early_printk.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2008 Maxime Bizon <[email protected]>
* Copyright (C) 2012 Kevin Cernekee <[email protected]>
* Copyright (C) 2012 Broadcom Corporation
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <bcm63xx_cpu.h>
#include <bcm63xx_dev_usb_usbd.h>
#define NUM_MMIO 2
#define NUM_IRQ 7
static struct resource usbd_resources[NUM_MMIO + NUM_IRQ];
static u64 usbd_dmamask = DMA_BIT_MASK(32);
static struct platform_device bcm63xx_usbd_device = {
.name = "bcm63xx_udc",
.id = -1,
.num_resources = ARRAY_SIZE(usbd_resources),
.resource = usbd_resources,
.dev = {
.dma_mask = &usbd_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
},
};
int __init bcm63xx_usbd_register(const struct bcm63xx_usbd_platform_data *pd)
{
const int irq_list[NUM_IRQ] = { IRQ_USBD,
IRQ_USBD_RXDMA0, IRQ_USBD_TXDMA0,
IRQ_USBD_RXDMA1, IRQ_USBD_TXDMA1,
IRQ_USBD_RXDMA2, IRQ_USBD_TXDMA2 };
int i;
if (!BCMCPU_IS_6328() && !BCMCPU_IS_6368())
return 0;
usbd_resources[0].start = bcm63xx_regset_address(RSET_USBD);
usbd_resources[0].end = usbd_resources[0].start + RSET_USBD_SIZE - 1;
usbd_resources[0].flags = IORESOURCE_MEM;
usbd_resources[1].start = bcm63xx_regset_address(RSET_USBDMA);
usbd_resources[1].end = usbd_resources[1].start + RSET_USBDMA_SIZE - 1;
usbd_resources[1].flags = IORESOURCE_MEM;
for (i = 0; i < NUM_IRQ; i++) {
struct resource *r = &usbd_resources[NUM_MMIO + i];
r->start = r->end = bcm63xx_get_irq_number(irq_list[i]);
r->flags = IORESOURCE_IRQ;
}
platform_device_add_data(&bcm63xx_usbd_device, pd, sizeof(*pd));
return platform_device_register(&bcm63xx_usbd_device);
}
| linux-master | arch/mips/bcm63xx/dev-usb-usbd.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2008 Maxime Bizon <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/export.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/clk.h>
#include <bcm63xx_cpu.h>
#include <bcm63xx_io.h>
#include <bcm63xx_timer.h>
#include <bcm63xx_regs.h>
static DEFINE_RAW_SPINLOCK(timer_reg_lock);
static DEFINE_RAW_SPINLOCK(timer_data_lock);
static struct clk *periph_clk;
static struct timer_data {
void (*cb)(void *);
void *data;
} timer_data[BCM63XX_TIMER_COUNT];
static irqreturn_t timer_interrupt(int irq, void *dev_id)
{
u32 stat;
int i;
raw_spin_lock(&timer_reg_lock);
stat = bcm_timer_readl(TIMER_IRQSTAT_REG);
bcm_timer_writel(stat, TIMER_IRQSTAT_REG);
raw_spin_unlock(&timer_reg_lock);
for (i = 0; i < BCM63XX_TIMER_COUNT; i++) {
if (!(stat & TIMER_IRQSTAT_TIMER_CAUSE(i)))
continue;
raw_spin_lock(&timer_data_lock);
if (!timer_data[i].cb) {
raw_spin_unlock(&timer_data_lock);
continue;
}
timer_data[i].cb(timer_data[i].data);
raw_spin_unlock(&timer_data_lock);
}
return IRQ_HANDLED;
}
int bcm63xx_timer_enable(int id)
{
u32 reg;
unsigned long flags;
if (id >= BCM63XX_TIMER_COUNT)
return -EINVAL;
raw_spin_lock_irqsave(&timer_reg_lock, flags);
reg = bcm_timer_readl(TIMER_CTLx_REG(id));
reg |= TIMER_CTL_ENABLE_MASK;
bcm_timer_writel(reg, TIMER_CTLx_REG(id));
reg = bcm_timer_readl(TIMER_IRQSTAT_REG);
reg |= TIMER_IRQSTAT_TIMER_IR_EN(id);
bcm_timer_writel(reg, TIMER_IRQSTAT_REG);
raw_spin_unlock_irqrestore(&timer_reg_lock, flags);
return 0;
}
EXPORT_SYMBOL(bcm63xx_timer_enable);
int bcm63xx_timer_disable(int id)
{
u32 reg;
unsigned long flags;
if (id >= BCM63XX_TIMER_COUNT)
return -EINVAL;
raw_spin_lock_irqsave(&timer_reg_lock, flags);
reg = bcm_timer_readl(TIMER_CTLx_REG(id));
reg &= ~TIMER_CTL_ENABLE_MASK;
bcm_timer_writel(reg, TIMER_CTLx_REG(id));
reg = bcm_timer_readl(TIMER_IRQSTAT_REG);
reg &= ~TIMER_IRQSTAT_TIMER_IR_EN(id);
bcm_timer_writel(reg, TIMER_IRQSTAT_REG);
raw_spin_unlock_irqrestore(&timer_reg_lock, flags);
return 0;
}
EXPORT_SYMBOL(bcm63xx_timer_disable);
int bcm63xx_timer_register(int id, void (*callback)(void *data), void *data)
{
unsigned long flags;
int ret;
if (id >= BCM63XX_TIMER_COUNT || !callback)
return -EINVAL;
ret = 0;
raw_spin_lock_irqsave(&timer_data_lock, flags);
if (timer_data[id].cb) {
ret = -EBUSY;
goto out;
}
timer_data[id].cb = callback;
timer_data[id].data = data;
out:
raw_spin_unlock_irqrestore(&timer_data_lock, flags);
return ret;
}
EXPORT_SYMBOL(bcm63xx_timer_register);
void bcm63xx_timer_unregister(int id)
{
unsigned long flags;
if (id >= BCM63XX_TIMER_COUNT)
return;
raw_spin_lock_irqsave(&timer_data_lock, flags);
timer_data[id].cb = NULL;
raw_spin_unlock_irqrestore(&timer_data_lock, flags);
}
EXPORT_SYMBOL(bcm63xx_timer_unregister);
unsigned int bcm63xx_timer_countdown(unsigned int countdown_us)
{
return (clk_get_rate(periph_clk) / (1000 * 1000)) * countdown_us;
}
EXPORT_SYMBOL(bcm63xx_timer_countdown);
int bcm63xx_timer_set(int id, int monotonic, unsigned int countdown_us)
{
u32 reg, countdown;
unsigned long flags;
if (id >= BCM63XX_TIMER_COUNT)
return -EINVAL;
countdown = bcm63xx_timer_countdown(countdown_us);
if (countdown & ~TIMER_CTL_COUNTDOWN_MASK)
return -EINVAL;
raw_spin_lock_irqsave(&timer_reg_lock, flags);
reg = bcm_timer_readl(TIMER_CTLx_REG(id));
if (monotonic)
reg &= ~TIMER_CTL_MONOTONIC_MASK;
else
reg |= TIMER_CTL_MONOTONIC_MASK;
reg &= ~TIMER_CTL_COUNTDOWN_MASK;
reg |= countdown;
bcm_timer_writel(reg, TIMER_CTLx_REG(id));
raw_spin_unlock_irqrestore(&timer_reg_lock, flags);
return 0;
}
EXPORT_SYMBOL(bcm63xx_timer_set);
int bcm63xx_timer_init(void)
{
int ret, irq;
u32 reg;
reg = bcm_timer_readl(TIMER_IRQSTAT_REG);
reg &= ~TIMER_IRQSTAT_TIMER0_IR_EN;
reg &= ~TIMER_IRQSTAT_TIMER1_IR_EN;
reg &= ~TIMER_IRQSTAT_TIMER2_IR_EN;
bcm_timer_writel(reg, TIMER_IRQSTAT_REG);
periph_clk = clk_get(NULL, "periph");
if (IS_ERR(periph_clk))
return -ENODEV;
irq = bcm63xx_get_irq_number(IRQ_TIMER);
ret = request_irq(irq, timer_interrupt, 0, "bcm63xx_timer", NULL);
if (ret) {
pr_err("%s: failed to register irq\n", __func__);
return ret;
}
return 0;
}
arch_initcall(bcm63xx_timer_init);
| linux-master | arch/mips/bcm63xx/timer.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2008 Maxime Bizon <[email protected]>
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <asm/bootinfo.h>
#include <linux/platform_device.h>
#include <bcm63xx_cs.h>
#include <bcm63xx_cpu.h>
#include <bcm63xx_dev_pcmcia.h>
#include <bcm63xx_io.h>
#include <bcm63xx_regs.h>
static struct resource pcmcia_resources[] = {
/* pcmcia registers */
{
/* start & end filled at runtime */
.flags = IORESOURCE_MEM,
},
/* pcmcia memory zone resources */
{
.start = BCM_PCMCIA_COMMON_BASE_PA,
.end = BCM_PCMCIA_COMMON_END_PA,
.flags = IORESOURCE_MEM,
},
{
.start = BCM_PCMCIA_ATTR_BASE_PA,
.end = BCM_PCMCIA_ATTR_END_PA,
.flags = IORESOURCE_MEM,
},
{
.start = BCM_PCMCIA_IO_BASE_PA,
.end = BCM_PCMCIA_IO_END_PA,
.flags = IORESOURCE_MEM,
},
/* PCMCIA irq */
{
/* start filled at runtime */
.flags = IORESOURCE_IRQ,
},
/* declare PCMCIA IO resource also */
{
.start = BCM_PCMCIA_IO_BASE_PA,
.end = BCM_PCMCIA_IO_END_PA,
.flags = IORESOURCE_IO,
},
};
static struct bcm63xx_pcmcia_platform_data pd;
static struct platform_device bcm63xx_pcmcia_device = {
.name = "bcm63xx_pcmcia",
.id = 0,
.num_resources = ARRAY_SIZE(pcmcia_resources),
.resource = pcmcia_resources,
.dev = {
.platform_data = &pd,
},
};
static int __init config_pcmcia_cs(unsigned int cs,
u32 base, unsigned int size)
{
int ret;
ret = bcm63xx_set_cs_status(cs, 0);
if (!ret)
ret = bcm63xx_set_cs_base(cs, base, size);
if (!ret)
ret = bcm63xx_set_cs_status(cs, 1);
return ret;
}
static const struct {
unsigned int cs;
unsigned int base;
unsigned int size;
} pcmcia_cs[3] __initconst = {
{
.cs = MPI_CS_PCMCIA_COMMON,
.base = BCM_PCMCIA_COMMON_BASE_PA,
.size = BCM_PCMCIA_COMMON_SIZE
},
{
.cs = MPI_CS_PCMCIA_ATTR,
.base = BCM_PCMCIA_ATTR_BASE_PA,
.size = BCM_PCMCIA_ATTR_SIZE
},
{
.cs = MPI_CS_PCMCIA_IO,
.base = BCM_PCMCIA_IO_BASE_PA,
.size = BCM_PCMCIA_IO_SIZE
},
};
int __init bcm63xx_pcmcia_register(void)
{
int ret, i;
if (!BCMCPU_IS_6348() && !BCMCPU_IS_6358())
return 0;
/* use correct pcmcia ready gpio depending on processor */
switch (bcm63xx_get_cpu_id()) {
case BCM6348_CPU_ID:
pd.ready_gpio = 22;
break;
case BCM6358_CPU_ID:
pd.ready_gpio = 18;
break;
default:
return -ENODEV;
}
pcmcia_resources[0].start = bcm63xx_regset_address(RSET_PCMCIA);
pcmcia_resources[0].end = pcmcia_resources[0].start +
RSET_PCMCIA_SIZE - 1;
pcmcia_resources[4].start = bcm63xx_get_irq_number(IRQ_PCMCIA);
/* configure pcmcia chip selects */
for (i = 0; i < 3; i++) {
ret = config_pcmcia_cs(pcmcia_cs[i].cs,
pcmcia_cs[i].base,
pcmcia_cs[i].size);
if (ret)
goto out_err;
}
return platform_device_register(&bcm63xx_pcmcia_device);
out_err:
pr_err("unable to set pcmcia chip select\n");
return ret;
}
| linux-master | arch/mips/bcm63xx/dev-pcmcia.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2008 Maxime Bizon <[email protected]>
* Copyright (C) 2008 Nicolas Schichan <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/spinlock.h>
#include <asm/irq_cpu.h>
#include <asm/mipsregs.h>
#include <bcm63xx_cpu.h>
#include <bcm63xx_regs.h>
#include <bcm63xx_io.h>
#include <bcm63xx_irq.h>
static DEFINE_SPINLOCK(ipic_lock);
static DEFINE_SPINLOCK(epic_lock);
static u32 irq_stat_addr[2];
static u32 irq_mask_addr[2];
static void (*dispatch_internal)(int cpu);
static int is_ext_irq_cascaded;
static unsigned int ext_irq_count;
static unsigned int ext_irq_start, ext_irq_end;
static unsigned int ext_irq_cfg_reg1, ext_irq_cfg_reg2;
static void (*internal_irq_mask)(struct irq_data *d);
static void (*internal_irq_unmask)(struct irq_data *d, const struct cpumask *m);
static inline u32 get_ext_irq_perf_reg(int irq)
{
if (irq < 4)
return ext_irq_cfg_reg1;
return ext_irq_cfg_reg2;
}
static inline void handle_internal(int intbit)
{
if (is_ext_irq_cascaded &&
intbit >= ext_irq_start && intbit <= ext_irq_end)
do_IRQ(intbit - ext_irq_start + IRQ_EXTERNAL_BASE);
else
do_IRQ(intbit + IRQ_INTERNAL_BASE);
}
static inline int enable_irq_for_cpu(int cpu, struct irq_data *d,
const struct cpumask *m)
{
bool enable = cpu_online(cpu);
#ifdef CONFIG_SMP
if (m)
enable &= cpumask_test_cpu(cpu, m);
else if (irqd_affinity_was_set(d))
enable &= cpumask_test_cpu(cpu, irq_data_get_affinity_mask(d));
#endif
return enable;
}
/*
* dispatch internal devices IRQ (uart, enet, watchdog, ...). do not
* prioritize any interrupt relatively to another. the static counter
* will resume the loop where it ended the last time we left this
* function.
*/
#define BUILD_IPIC_INTERNAL(width) \
void __dispatch_internal_##width(int cpu) \
{ \
u32 pending[width / 32]; \
unsigned int src, tgt; \
bool irqs_pending = false; \
static unsigned int i[2]; \
unsigned int *next = &i[cpu]; \
unsigned long flags; \
\
/* read registers in reverse order */ \
spin_lock_irqsave(&ipic_lock, flags); \
for (src = 0, tgt = (width / 32); src < (width / 32); src++) { \
u32 val; \
\
val = bcm_readl(irq_stat_addr[cpu] + src * sizeof(u32)); \
val &= bcm_readl(irq_mask_addr[cpu] + src * sizeof(u32)); \
pending[--tgt] = val; \
\
if (val) \
irqs_pending = true; \
} \
spin_unlock_irqrestore(&ipic_lock, flags); \
\
if (!irqs_pending) \
return; \
\
while (1) { \
unsigned int to_call = *next; \
\
*next = (*next + 1) & (width - 1); \
if (pending[to_call / 32] & (1 << (to_call & 0x1f))) { \
handle_internal(to_call); \
break; \
} \
} \
} \
\
static void __internal_irq_mask_##width(struct irq_data *d) \
{ \
u32 val; \
unsigned irq = d->irq - IRQ_INTERNAL_BASE; \
unsigned reg = (irq / 32) ^ (width/32 - 1); \
unsigned bit = irq & 0x1f; \
unsigned long flags; \
int cpu; \
\
spin_lock_irqsave(&ipic_lock, flags); \
for_each_present_cpu(cpu) { \
if (!irq_mask_addr[cpu]) \
break; \
\
val = bcm_readl(irq_mask_addr[cpu] + reg * sizeof(u32));\
val &= ~(1 << bit); \
bcm_writel(val, irq_mask_addr[cpu] + reg * sizeof(u32));\
} \
spin_unlock_irqrestore(&ipic_lock, flags); \
} \
\
static void __internal_irq_unmask_##width(struct irq_data *d, \
const struct cpumask *m) \
{ \
u32 val; \
unsigned irq = d->irq - IRQ_INTERNAL_BASE; \
unsigned reg = (irq / 32) ^ (width/32 - 1); \
unsigned bit = irq & 0x1f; \
unsigned long flags; \
int cpu; \
\
spin_lock_irqsave(&ipic_lock, flags); \
for_each_present_cpu(cpu) { \
if (!irq_mask_addr[cpu]) \
break; \
\
val = bcm_readl(irq_mask_addr[cpu] + reg * sizeof(u32));\
if (enable_irq_for_cpu(cpu, d, m)) \
val |= (1 << bit); \
else \
val &= ~(1 << bit); \
bcm_writel(val, irq_mask_addr[cpu] + reg * sizeof(u32));\
} \
spin_unlock_irqrestore(&ipic_lock, flags); \
}
BUILD_IPIC_INTERNAL(32);
BUILD_IPIC_INTERNAL(64);
asmlinkage void plat_irq_dispatch(void)
{
u32 cause;
do {
cause = read_c0_cause() & read_c0_status() & ST0_IM;
if (!cause)
break;
if (cause & CAUSEF_IP7)
do_IRQ(7);
if (cause & CAUSEF_IP0)
do_IRQ(0);
if (cause & CAUSEF_IP1)
do_IRQ(1);
if (cause & CAUSEF_IP2)
dispatch_internal(0);
if (is_ext_irq_cascaded) {
if (cause & CAUSEF_IP3)
dispatch_internal(1);
} else {
if (cause & CAUSEF_IP3)
do_IRQ(IRQ_EXT_0);
if (cause & CAUSEF_IP4)
do_IRQ(IRQ_EXT_1);
if (cause & CAUSEF_IP5)
do_IRQ(IRQ_EXT_2);
if (cause & CAUSEF_IP6)
do_IRQ(IRQ_EXT_3);
}
} while (1);
}
/*
* internal IRQs operations: only mask/unmask on PERF irq mask
* register.
*/
static void bcm63xx_internal_irq_mask(struct irq_data *d)
{
internal_irq_mask(d);
}
static void bcm63xx_internal_irq_unmask(struct irq_data *d)
{
internal_irq_unmask(d, NULL);
}
/*
* external IRQs operations: mask/unmask and clear on PERF external
* irq control register.
*/
static void bcm63xx_external_irq_mask(struct irq_data *d)
{
unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
u32 reg, regaddr;
unsigned long flags;
regaddr = get_ext_irq_perf_reg(irq);
spin_lock_irqsave(&epic_lock, flags);
reg = bcm_perf_readl(regaddr);
if (BCMCPU_IS_6348())
reg &= ~EXTIRQ_CFG_MASK_6348(irq % 4);
else
reg &= ~EXTIRQ_CFG_MASK(irq % 4);
bcm_perf_writel(reg, regaddr);
spin_unlock_irqrestore(&epic_lock, flags);
if (is_ext_irq_cascaded)
internal_irq_mask(irq_get_irq_data(irq + ext_irq_start));
}
static void bcm63xx_external_irq_unmask(struct irq_data *d)
{
unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
u32 reg, regaddr;
unsigned long flags;
regaddr = get_ext_irq_perf_reg(irq);
spin_lock_irqsave(&epic_lock, flags);
reg = bcm_perf_readl(regaddr);
if (BCMCPU_IS_6348())
reg |= EXTIRQ_CFG_MASK_6348(irq % 4);
else
reg |= EXTIRQ_CFG_MASK(irq % 4);
bcm_perf_writel(reg, regaddr);
spin_unlock_irqrestore(&epic_lock, flags);
if (is_ext_irq_cascaded)
internal_irq_unmask(irq_get_irq_data(irq + ext_irq_start),
NULL);
}
static void bcm63xx_external_irq_clear(struct irq_data *d)
{
unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
u32 reg, regaddr;
unsigned long flags;
regaddr = get_ext_irq_perf_reg(irq);
spin_lock_irqsave(&epic_lock, flags);
reg = bcm_perf_readl(regaddr);
if (BCMCPU_IS_6348())
reg |= EXTIRQ_CFG_CLEAR_6348(irq % 4);
else
reg |= EXTIRQ_CFG_CLEAR(irq % 4);
bcm_perf_writel(reg, regaddr);
spin_unlock_irqrestore(&epic_lock, flags);
}
static int bcm63xx_external_irq_set_type(struct irq_data *d,
unsigned int flow_type)
{
unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
u32 reg, regaddr;
int levelsense, sense, bothedge;
unsigned long flags;
flow_type &= IRQ_TYPE_SENSE_MASK;
if (flow_type == IRQ_TYPE_NONE)
flow_type = IRQ_TYPE_LEVEL_LOW;
levelsense = sense = bothedge = 0;
switch (flow_type) {
case IRQ_TYPE_EDGE_BOTH:
bothedge = 1;
break;
case IRQ_TYPE_EDGE_RISING:
sense = 1;
break;
case IRQ_TYPE_EDGE_FALLING:
break;
case IRQ_TYPE_LEVEL_HIGH:
levelsense = 1;
sense = 1;
break;
case IRQ_TYPE_LEVEL_LOW:
levelsense = 1;
break;
default:
pr_err("bogus flow type combination given !\n");
return -EINVAL;
}
regaddr = get_ext_irq_perf_reg(irq);
spin_lock_irqsave(&epic_lock, flags);
reg = bcm_perf_readl(regaddr);
irq %= 4;
switch (bcm63xx_get_cpu_id()) {
case BCM6348_CPU_ID:
if (levelsense)
reg |= EXTIRQ_CFG_LEVELSENSE_6348(irq);
else
reg &= ~EXTIRQ_CFG_LEVELSENSE_6348(irq);
if (sense)
reg |= EXTIRQ_CFG_SENSE_6348(irq);
else
reg &= ~EXTIRQ_CFG_SENSE_6348(irq);
if (bothedge)
reg |= EXTIRQ_CFG_BOTHEDGE_6348(irq);
else
reg &= ~EXTIRQ_CFG_BOTHEDGE_6348(irq);
break;
case BCM3368_CPU_ID:
case BCM6328_CPU_ID:
case BCM6338_CPU_ID:
case BCM6345_CPU_ID:
case BCM6358_CPU_ID:
case BCM6362_CPU_ID:
case BCM6368_CPU_ID:
if (levelsense)
reg |= EXTIRQ_CFG_LEVELSENSE(irq);
else
reg &= ~EXTIRQ_CFG_LEVELSENSE(irq);
if (sense)
reg |= EXTIRQ_CFG_SENSE(irq);
else
reg &= ~EXTIRQ_CFG_SENSE(irq);
if (bothedge)
reg |= EXTIRQ_CFG_BOTHEDGE(irq);
else
reg &= ~EXTIRQ_CFG_BOTHEDGE(irq);
break;
default:
BUG();
}
bcm_perf_writel(reg, regaddr);
spin_unlock_irqrestore(&epic_lock, flags);
irqd_set_trigger_type(d, flow_type);
if (flow_type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
irq_set_handler_locked(d, handle_level_irq);
else
irq_set_handler_locked(d, handle_edge_irq);
return IRQ_SET_MASK_OK_NOCOPY;
}
#ifdef CONFIG_SMP
static int bcm63xx_internal_set_affinity(struct irq_data *data,
const struct cpumask *dest,
bool force)
{
if (!irqd_irq_disabled(data))
internal_irq_unmask(data, dest);
return 0;
}
#endif
static struct irq_chip bcm63xx_internal_irq_chip = {
.name = "bcm63xx_ipic",
.irq_mask = bcm63xx_internal_irq_mask,
.irq_unmask = bcm63xx_internal_irq_unmask,
};
static struct irq_chip bcm63xx_external_irq_chip = {
.name = "bcm63xx_epic",
.irq_ack = bcm63xx_external_irq_clear,
.irq_mask = bcm63xx_external_irq_mask,
.irq_unmask = bcm63xx_external_irq_unmask,
.irq_set_type = bcm63xx_external_irq_set_type,
};
static void bcm63xx_init_irq(void)
{
int irq_bits;
irq_stat_addr[0] = bcm63xx_regset_address(RSET_PERF);
irq_mask_addr[0] = bcm63xx_regset_address(RSET_PERF);
irq_stat_addr[1] = bcm63xx_regset_address(RSET_PERF);
irq_mask_addr[1] = bcm63xx_regset_address(RSET_PERF);
switch (bcm63xx_get_cpu_id()) {
case BCM3368_CPU_ID:
irq_stat_addr[0] += PERF_IRQSTAT_3368_REG;
irq_mask_addr[0] += PERF_IRQMASK_3368_REG;
irq_stat_addr[1] = 0;
irq_mask_addr[1] = 0;
irq_bits = 32;
ext_irq_count = 4;
ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_3368;
break;
case BCM6328_CPU_ID:
irq_stat_addr[0] += PERF_IRQSTAT_6328_REG(0);
irq_mask_addr[0] += PERF_IRQMASK_6328_REG(0);
irq_stat_addr[1] += PERF_IRQSTAT_6328_REG(1);
irq_mask_addr[1] += PERF_IRQMASK_6328_REG(1);
irq_bits = 64;
ext_irq_count = 4;
is_ext_irq_cascaded = 1;
ext_irq_start = BCM_6328_EXT_IRQ0 - IRQ_INTERNAL_BASE;
ext_irq_end = BCM_6328_EXT_IRQ3 - IRQ_INTERNAL_BASE;
ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6328;
break;
case BCM6338_CPU_ID:
irq_stat_addr[0] += PERF_IRQSTAT_6338_REG;
irq_mask_addr[0] += PERF_IRQMASK_6338_REG;
irq_stat_addr[1] = 0;
irq_mask_addr[1] = 0;
irq_bits = 32;
ext_irq_count = 4;
ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6338;
break;
case BCM6345_CPU_ID:
irq_stat_addr[0] += PERF_IRQSTAT_6345_REG;
irq_mask_addr[0] += PERF_IRQMASK_6345_REG;
irq_stat_addr[1] = 0;
irq_mask_addr[1] = 0;
irq_bits = 32;
ext_irq_count = 4;
ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6345;
break;
case BCM6348_CPU_ID:
irq_stat_addr[0] += PERF_IRQSTAT_6348_REG;
irq_mask_addr[0] += PERF_IRQMASK_6348_REG;
irq_stat_addr[1] = 0;
irq_mask_addr[1] = 0;
irq_bits = 32;
ext_irq_count = 4;
ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6348;
break;
case BCM6358_CPU_ID:
irq_stat_addr[0] += PERF_IRQSTAT_6358_REG(0);
irq_mask_addr[0] += PERF_IRQMASK_6358_REG(0);
irq_stat_addr[1] += PERF_IRQSTAT_6358_REG(1);
irq_mask_addr[1] += PERF_IRQMASK_6358_REG(1);
irq_bits = 32;
ext_irq_count = 4;
is_ext_irq_cascaded = 1;
ext_irq_start = BCM_6358_EXT_IRQ0 - IRQ_INTERNAL_BASE;
ext_irq_end = BCM_6358_EXT_IRQ3 - IRQ_INTERNAL_BASE;
ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6358;
break;
case BCM6362_CPU_ID:
irq_stat_addr[0] += PERF_IRQSTAT_6362_REG(0);
irq_mask_addr[0] += PERF_IRQMASK_6362_REG(0);
irq_stat_addr[1] += PERF_IRQSTAT_6362_REG(1);
irq_mask_addr[1] += PERF_IRQMASK_6362_REG(1);
irq_bits = 64;
ext_irq_count = 4;
is_ext_irq_cascaded = 1;
ext_irq_start = BCM_6362_EXT_IRQ0 - IRQ_INTERNAL_BASE;
ext_irq_end = BCM_6362_EXT_IRQ3 - IRQ_INTERNAL_BASE;
ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6362;
break;
case BCM6368_CPU_ID:
irq_stat_addr[0] += PERF_IRQSTAT_6368_REG(0);
irq_mask_addr[0] += PERF_IRQMASK_6368_REG(0);
irq_stat_addr[1] += PERF_IRQSTAT_6368_REG(1);
irq_mask_addr[1] += PERF_IRQMASK_6368_REG(1);
irq_bits = 64;
ext_irq_count = 6;
is_ext_irq_cascaded = 1;
ext_irq_start = BCM_6368_EXT_IRQ0 - IRQ_INTERNAL_BASE;
ext_irq_end = BCM_6368_EXT_IRQ5 - IRQ_INTERNAL_BASE;
ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6368;
ext_irq_cfg_reg2 = PERF_EXTIRQ_CFG_REG2_6368;
break;
default:
BUG();
}
if (irq_bits == 32) {
dispatch_internal = __dispatch_internal_32;
internal_irq_mask = __internal_irq_mask_32;
internal_irq_unmask = __internal_irq_unmask_32;
} else {
dispatch_internal = __dispatch_internal_64;
internal_irq_mask = __internal_irq_mask_64;
internal_irq_unmask = __internal_irq_unmask_64;
}
}
void __init arch_init_irq(void)
{
int i, irq;
bcm63xx_init_irq();
mips_cpu_irq_init();
for (i = IRQ_INTERNAL_BASE; i < NR_IRQS; ++i)
irq_set_chip_and_handler(i, &bcm63xx_internal_irq_chip,
handle_level_irq);
for (i = IRQ_EXTERNAL_BASE; i < IRQ_EXTERNAL_BASE + ext_irq_count; ++i)
irq_set_chip_and_handler(i, &bcm63xx_external_irq_chip,
handle_edge_irq);
if (!is_ext_irq_cascaded) {
for (i = 3; i < 3 + ext_irq_count; ++i) {
irq = MIPS_CPU_IRQ_BASE + i;
if (request_irq(irq, no_action, IRQF_NO_THREAD,
"cascade_extirq", NULL)) {
pr_err("Failed to request irq %d (cascade_extirq)\n",
irq);
}
}
}
irq = MIPS_CPU_IRQ_BASE + 2;
if (request_irq(irq, no_action, IRQF_NO_THREAD, "cascade_ip2", NULL))
pr_err("Failed to request irq %d (cascade_ip2)\n", irq);
#ifdef CONFIG_SMP
if (is_ext_irq_cascaded) {
irq = MIPS_CPU_IRQ_BASE + 3;
if (request_irq(irq, no_action, IRQF_NO_THREAD, "cascade_ip3",
NULL))
pr_err("Failed to request irq %d (cascade_ip3)\n", irq);
bcm63xx_internal_irq_chip.irq_set_affinity =
bcm63xx_internal_set_affinity;
cpumask_clear(irq_default_affinity);
cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
}
#endif
}
| linux-master | arch/mips/bcm63xx/irq.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2008 Maxime Bizon <[email protected]>
* Copyright (C) 2009 Florian Fainelli <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/cpu.h>
#include <asm/cpu.h>
#include <asm/cpu-info.h>
#include <asm/mipsregs.h>
#include <bcm63xx_cpu.h>
#include <bcm63xx_regs.h>
#include <bcm63xx_io.h>
#include <bcm63xx_irq.h>
const unsigned long *bcm63xx_regs_base;
EXPORT_SYMBOL(bcm63xx_regs_base);
const int *bcm63xx_irqs;
EXPORT_SYMBOL(bcm63xx_irqs);
u16 bcm63xx_cpu_id __read_mostly;
EXPORT_SYMBOL(bcm63xx_cpu_id);
static u8 bcm63xx_cpu_rev;
static unsigned int bcm63xx_cpu_freq;
static unsigned int bcm63xx_memory_size;
static const unsigned long bcm3368_regs_base[] = {
__GEN_CPU_REGS_TABLE(3368)
};
static const int bcm3368_irqs[] = {
__GEN_CPU_IRQ_TABLE(3368)
};
static const unsigned long bcm6328_regs_base[] = {
__GEN_CPU_REGS_TABLE(6328)
};
static const int bcm6328_irqs[] = {
__GEN_CPU_IRQ_TABLE(6328)
};
static const unsigned long bcm6338_regs_base[] = {
__GEN_CPU_REGS_TABLE(6338)
};
static const int bcm6338_irqs[] = {
__GEN_CPU_IRQ_TABLE(6338)
};
static const unsigned long bcm6345_regs_base[] = {
__GEN_CPU_REGS_TABLE(6345)
};
static const int bcm6345_irqs[] = {
__GEN_CPU_IRQ_TABLE(6345)
};
static const unsigned long bcm6348_regs_base[] = {
__GEN_CPU_REGS_TABLE(6348)
};
static const int bcm6348_irqs[] = {
__GEN_CPU_IRQ_TABLE(6348)
};
static const unsigned long bcm6358_regs_base[] = {
__GEN_CPU_REGS_TABLE(6358)
};
static const int bcm6358_irqs[] = {
__GEN_CPU_IRQ_TABLE(6358)
};
static const unsigned long bcm6362_regs_base[] = {
__GEN_CPU_REGS_TABLE(6362)
};
static const int bcm6362_irqs[] = {
__GEN_CPU_IRQ_TABLE(6362)
};
static const unsigned long bcm6368_regs_base[] = {
__GEN_CPU_REGS_TABLE(6368)
};
static const int bcm6368_irqs[] = {
__GEN_CPU_IRQ_TABLE(6368)
};
u8 bcm63xx_get_cpu_rev(void)
{
return bcm63xx_cpu_rev;
}
EXPORT_SYMBOL(bcm63xx_get_cpu_rev);
unsigned int bcm63xx_get_cpu_freq(void)
{
return bcm63xx_cpu_freq;
}
unsigned int bcm63xx_get_memory_size(void)
{
return bcm63xx_memory_size;
}
static unsigned int detect_cpu_clock(void)
{
u16 cpu_id = bcm63xx_get_cpu_id();
switch (cpu_id) {
case BCM3368_CPU_ID:
return 300000000;
case BCM6328_CPU_ID:
{
unsigned int tmp, mips_pll_fcvo;
tmp = bcm_misc_readl(MISC_STRAPBUS_6328_REG);
mips_pll_fcvo = (tmp & STRAPBUS_6328_FCVO_MASK)
>> STRAPBUS_6328_FCVO_SHIFT;
switch (mips_pll_fcvo) {
case 0x12:
case 0x14:
case 0x19:
return 160000000;
case 0x1c:
return 192000000;
case 0x13:
case 0x15:
return 200000000;
case 0x1a:
return 384000000;
case 0x16:
return 400000000;
default:
return 320000000;
}
}
case BCM6338_CPU_ID:
/* BCM6338 has a fixed 240 Mhz frequency */
return 240000000;
case BCM6345_CPU_ID:
/* BCM6345 has a fixed 140Mhz frequency */
return 140000000;
case BCM6348_CPU_ID:
{
unsigned int tmp, n1, n2, m1;
/* 16MHz * (N1 + 1) * (N2 + 2) / (M1_CPU + 1) */
tmp = bcm_perf_readl(PERF_MIPSPLLCTL_REG);
n1 = (tmp & MIPSPLLCTL_N1_MASK) >> MIPSPLLCTL_N1_SHIFT;
n2 = (tmp & MIPSPLLCTL_N2_MASK) >> MIPSPLLCTL_N2_SHIFT;
m1 = (tmp & MIPSPLLCTL_M1CPU_MASK) >> MIPSPLLCTL_M1CPU_SHIFT;
n1 += 1;
n2 += 2;
m1 += 1;
return (16 * 1000000 * n1 * n2) / m1;
}
case BCM6358_CPU_ID:
{
unsigned int tmp, n1, n2, m1;
/* 16MHz * N1 * N2 / M1_CPU */
tmp = bcm_ddr_readl(DDR_DMIPSPLLCFG_REG);
n1 = (tmp & DMIPSPLLCFG_N1_MASK) >> DMIPSPLLCFG_N1_SHIFT;
n2 = (tmp & DMIPSPLLCFG_N2_MASK) >> DMIPSPLLCFG_N2_SHIFT;
m1 = (tmp & DMIPSPLLCFG_M1_MASK) >> DMIPSPLLCFG_M1_SHIFT;
return (16 * 1000000 * n1 * n2) / m1;
}
case BCM6362_CPU_ID:
{
unsigned int tmp, mips_pll_fcvo;
tmp = bcm_misc_readl(MISC_STRAPBUS_6362_REG);
mips_pll_fcvo = (tmp & STRAPBUS_6362_FCVO_MASK)
>> STRAPBUS_6362_FCVO_SHIFT;
switch (mips_pll_fcvo) {
case 0x03:
case 0x0b:
case 0x13:
case 0x1b:
return 240000000;
case 0x04:
case 0x0c:
case 0x14:
case 0x1c:
return 160000000;
case 0x05:
case 0x0e:
case 0x16:
case 0x1e:
case 0x1f:
return 400000000;
case 0x06:
return 440000000;
case 0x07:
case 0x17:
return 384000000;
case 0x15:
case 0x1d:
return 200000000;
default:
return 320000000;
}
}
case BCM6368_CPU_ID:
{
unsigned int tmp, p1, p2, ndiv, m1;
/* (64MHz / P1) * P2 * NDIV / M1_CPU */
tmp = bcm_ddr_readl(DDR_DMIPSPLLCFG_6368_REG);
p1 = (tmp & DMIPSPLLCFG_6368_P1_MASK) >>
DMIPSPLLCFG_6368_P1_SHIFT;
p2 = (tmp & DMIPSPLLCFG_6368_P2_MASK) >>
DMIPSPLLCFG_6368_P2_SHIFT;
ndiv = (tmp & DMIPSPLLCFG_6368_NDIV_MASK) >>
DMIPSPLLCFG_6368_NDIV_SHIFT;
tmp = bcm_ddr_readl(DDR_DMIPSPLLDIV_6368_REG);
m1 = (tmp & DMIPSPLLDIV_6368_MDIV_MASK) >>
DMIPSPLLDIV_6368_MDIV_SHIFT;
return (((64 * 1000000) / p1) * p2 * ndiv) / m1;
}
default:
panic("Failed to detect clock for CPU with id=%04X\n", cpu_id);
}
}
/*
* attempt to detect the amount of memory installed
*/
static unsigned int detect_memory_size(void)
{
unsigned int cols = 0, rows = 0, is_32bits = 0, banks = 0;
u32 val;
if (BCMCPU_IS_6328() || BCMCPU_IS_6362())
return bcm_ddr_readl(DDR_CSEND_REG) << 24;
if (BCMCPU_IS_6345()) {
val = bcm_sdram_readl(SDRAM_MBASE_REG);
return val * 8 * 1024 * 1024;
}
if (BCMCPU_IS_6338() || BCMCPU_IS_6348()) {
val = bcm_sdram_readl(SDRAM_CFG_REG);
rows = (val & SDRAM_CFG_ROW_MASK) >> SDRAM_CFG_ROW_SHIFT;
cols = (val & SDRAM_CFG_COL_MASK) >> SDRAM_CFG_COL_SHIFT;
is_32bits = (val & SDRAM_CFG_32B_MASK) ? 1 : 0;
banks = (val & SDRAM_CFG_BANK_MASK) ? 2 : 1;
}
if (BCMCPU_IS_3368() || BCMCPU_IS_6358() || BCMCPU_IS_6368()) {
val = bcm_memc_readl(MEMC_CFG_REG);
rows = (val & MEMC_CFG_ROW_MASK) >> MEMC_CFG_ROW_SHIFT;
cols = (val & MEMC_CFG_COL_MASK) >> MEMC_CFG_COL_SHIFT;
is_32bits = (val & MEMC_CFG_32B_MASK) ? 0 : 1;
banks = 2;
}
/* 0 => 11 address bits ... 2 => 13 address bits */
rows += 11;
/* 0 => 8 address bits ... 2 => 10 address bits */
cols += 8;
return 1 << (cols + rows + (is_32bits + 1) + banks);
}
void __init bcm63xx_cpu_init(void)
{
unsigned int tmp;
unsigned int cpu = smp_processor_id();
u32 chipid_reg;
/* soc registers location depends on cpu type */
chipid_reg = 0;
switch (current_cpu_type()) {
case CPU_BMIPS3300:
if ((read_c0_prid() & PRID_IMP_MASK) != PRID_IMP_BMIPS3300_ALT)
__cpu_name[cpu] = "Broadcom BCM6338";
fallthrough;
case CPU_BMIPS32:
chipid_reg = BCM_6345_PERF_BASE;
break;
case CPU_BMIPS4350:
switch ((read_c0_prid() & PRID_REV_MASK)) {
case 0x04:
chipid_reg = BCM_3368_PERF_BASE;
break;
case 0x10:
chipid_reg = BCM_6345_PERF_BASE;
break;
default:
chipid_reg = BCM_6368_PERF_BASE;
break;
}
break;
}
/*
* really early to panic, but delaying panic would not help since we
* will never get any working console
*/
if (!chipid_reg)
panic("unsupported Broadcom CPU");
/* read out CPU type */
tmp = bcm_readl(chipid_reg);
bcm63xx_cpu_id = (tmp & REV_CHIPID_MASK) >> REV_CHIPID_SHIFT;
bcm63xx_cpu_rev = (tmp & REV_REVID_MASK) >> REV_REVID_SHIFT;
switch (bcm63xx_cpu_id) {
case BCM3368_CPU_ID:
bcm63xx_regs_base = bcm3368_regs_base;
bcm63xx_irqs = bcm3368_irqs;
break;
case BCM6328_CPU_ID:
bcm63xx_regs_base = bcm6328_regs_base;
bcm63xx_irqs = bcm6328_irqs;
break;
case BCM6338_CPU_ID:
bcm63xx_regs_base = bcm6338_regs_base;
bcm63xx_irqs = bcm6338_irqs;
break;
case BCM6345_CPU_ID:
bcm63xx_regs_base = bcm6345_regs_base;
bcm63xx_irqs = bcm6345_irqs;
break;
case BCM6348_CPU_ID:
bcm63xx_regs_base = bcm6348_regs_base;
bcm63xx_irqs = bcm6348_irqs;
break;
case BCM6358_CPU_ID:
bcm63xx_regs_base = bcm6358_regs_base;
bcm63xx_irqs = bcm6358_irqs;
break;
case BCM6362_CPU_ID:
bcm63xx_regs_base = bcm6362_regs_base;
bcm63xx_irqs = bcm6362_irqs;
break;
case BCM6368_CPU_ID:
bcm63xx_regs_base = bcm6368_regs_base;
bcm63xx_irqs = bcm6368_irqs;
break;
default:
panic("unsupported broadcom CPU %x", bcm63xx_cpu_id);
break;
}
bcm63xx_cpu_freq = detect_cpu_clock();
bcm63xx_memory_size = detect_memory_size();
pr_info("Detected Broadcom 0x%04x CPU revision %02x\n",
bcm63xx_cpu_id, bcm63xx_cpu_rev);
pr_info("CPU frequency is %u MHz\n",
bcm63xx_cpu_freq / 1000000);
pr_info("%uMB of RAM installed\n",
bcm63xx_memory_size >> 20);
}
| linux-master | arch/mips/bcm63xx/cpu.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2011 Florian Fainelli <[email protected]>
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <bcm63xx_cpu.h>
static struct resource rng_resources[] = {
{
.start = -1, /* filled at runtime */
.end = -1, /* filled at runtime */
.flags = IORESOURCE_MEM,
},
};
static struct platform_device bcm63xx_rng_device = {
.name = "bcm63xx-rng",
.id = -1,
.num_resources = ARRAY_SIZE(rng_resources),
.resource = rng_resources,
};
int __init bcm63xx_rng_register(void)
{
if (!BCMCPU_IS_6368())
return -ENODEV;
rng_resources[0].start = bcm63xx_regset_address(RSET_RNG);
rng_resources[0].end = rng_resources[0].start;
rng_resources[0].end += RSET_RNG_SIZE - 1;
return platform_device_register(&bcm63xx_rng_device);
}
arch_initcall(bcm63xx_rng_register);
| linux-master | arch/mips/bcm63xx/dev-rng.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2008 Maxime Bizon <[email protected]>
* Copyright (C) 2008-2011 Florian Fainelli <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/platform_device.h>
#include <linux/gpio/driver.h>
#include <bcm63xx_cpu.h>
#include <bcm63xx_gpio.h>
#include <bcm63xx_io.h>
#include <bcm63xx_regs.h>
static u32 gpio_out_low_reg;
static void bcm63xx_gpio_out_low_reg_init(void)
{
switch (bcm63xx_get_cpu_id()) {
case BCM6345_CPU_ID:
gpio_out_low_reg = GPIO_DATA_LO_REG_6345;
break;
default:
gpio_out_low_reg = GPIO_DATA_LO_REG;
break;
}
}
static DEFINE_SPINLOCK(bcm63xx_gpio_lock);
static u32 gpio_out_low, gpio_out_high;
static void bcm63xx_gpio_set(struct gpio_chip *chip,
unsigned gpio, int val)
{
u32 reg;
u32 mask;
u32 *v;
unsigned long flags;
BUG_ON(gpio >= chip->ngpio);
if (gpio < 32) {
reg = gpio_out_low_reg;
mask = 1 << gpio;
v = &gpio_out_low;
} else {
reg = GPIO_DATA_HI_REG;
mask = 1 << (gpio - 32);
v = &gpio_out_high;
}
spin_lock_irqsave(&bcm63xx_gpio_lock, flags);
if (val)
*v |= mask;
else
*v &= ~mask;
bcm_gpio_writel(*v, reg);
spin_unlock_irqrestore(&bcm63xx_gpio_lock, flags);
}
static int bcm63xx_gpio_get(struct gpio_chip *chip, unsigned gpio)
{
u32 reg;
u32 mask;
BUG_ON(gpio >= chip->ngpio);
if (gpio < 32) {
reg = gpio_out_low_reg;
mask = 1 << gpio;
} else {
reg = GPIO_DATA_HI_REG;
mask = 1 << (gpio - 32);
}
return !!(bcm_gpio_readl(reg) & mask);
}
static int bcm63xx_gpio_set_direction(struct gpio_chip *chip,
unsigned gpio, int dir)
{
u32 reg;
u32 mask;
u32 tmp;
unsigned long flags;
BUG_ON(gpio >= chip->ngpio);
if (gpio < 32) {
reg = GPIO_CTL_LO_REG;
mask = 1 << gpio;
} else {
reg = GPIO_CTL_HI_REG;
mask = 1 << (gpio - 32);
}
spin_lock_irqsave(&bcm63xx_gpio_lock, flags);
tmp = bcm_gpio_readl(reg);
if (dir == BCM63XX_GPIO_DIR_IN)
tmp &= ~mask;
else
tmp |= mask;
bcm_gpio_writel(tmp, reg);
spin_unlock_irqrestore(&bcm63xx_gpio_lock, flags);
return 0;
}
static int bcm63xx_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
{
return bcm63xx_gpio_set_direction(chip, gpio, BCM63XX_GPIO_DIR_IN);
}
static int bcm63xx_gpio_direction_output(struct gpio_chip *chip,
unsigned gpio, int value)
{
bcm63xx_gpio_set(chip, gpio, value);
return bcm63xx_gpio_set_direction(chip, gpio, BCM63XX_GPIO_DIR_OUT);
}
static struct gpio_chip bcm63xx_gpio_chip = {
.label = "bcm63xx-gpio",
.direction_input = bcm63xx_gpio_direction_input,
.direction_output = bcm63xx_gpio_direction_output,
.get = bcm63xx_gpio_get,
.set = bcm63xx_gpio_set,
.base = 0,
};
int __init bcm63xx_gpio_init(void)
{
bcm63xx_gpio_out_low_reg_init();
gpio_out_low = bcm_gpio_readl(gpio_out_low_reg);
if (!BCMCPU_IS_6345())
gpio_out_high = bcm_gpio_readl(GPIO_DATA_HI_REG);
bcm63xx_gpio_chip.ngpio = bcm63xx_gpio_count();
pr_info("registering %d GPIOs\n", bcm63xx_gpio_chip.ngpio);
return gpiochip_add_data(&bcm63xx_gpio_chip, NULL);
}
| linux-master | arch/mips/bcm63xx/gpio.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2008 Maxime Bizon <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/spinlock.h>
#include <linux/log2.h>
#include <bcm63xx_cpu.h>
#include <bcm63xx_io.h>
#include <bcm63xx_regs.h>
#include <bcm63xx_cs.h>
static DEFINE_SPINLOCK(bcm63xx_cs_lock);
/*
* check if given chip select exists
*/
static int is_valid_cs(unsigned int cs)
{
if (cs > 6)
return 0;
return 1;
}
/*
* Configure chipselect base address and size (bytes).
* Size must be a power of two between 8k and 256M.
*/
int bcm63xx_set_cs_base(unsigned int cs, u32 base, unsigned int size)
{
unsigned long flags;
u32 val;
if (!is_valid_cs(cs))
return -EINVAL;
/* sanity check on size */
if (size != roundup_pow_of_two(size))
return -EINVAL;
if (size < 8 * 1024 || size > 256 * 1024 * 1024)
return -EINVAL;
val = (base & MPI_CSBASE_BASE_MASK);
/* 8k => 0 - 256M => 15 */
val |= (ilog2(size) - ilog2(8 * 1024)) << MPI_CSBASE_SIZE_SHIFT;
spin_lock_irqsave(&bcm63xx_cs_lock, flags);
bcm_mpi_writel(val, MPI_CSBASE_REG(cs));
spin_unlock_irqrestore(&bcm63xx_cs_lock, flags);
return 0;
}
EXPORT_SYMBOL(bcm63xx_set_cs_base);
/*
* configure chipselect timing (ns)
*/
int bcm63xx_set_cs_timing(unsigned int cs, unsigned int wait,
unsigned int setup, unsigned int hold)
{
unsigned long flags;
u32 val;
if (!is_valid_cs(cs))
return -EINVAL;
spin_lock_irqsave(&bcm63xx_cs_lock, flags);
val = bcm_mpi_readl(MPI_CSCTL_REG(cs));
val &= ~(MPI_CSCTL_WAIT_MASK);
val &= ~(MPI_CSCTL_SETUP_MASK);
val &= ~(MPI_CSCTL_HOLD_MASK);
val |= wait << MPI_CSCTL_WAIT_SHIFT;
val |= setup << MPI_CSCTL_SETUP_SHIFT;
val |= hold << MPI_CSCTL_HOLD_SHIFT;
bcm_mpi_writel(val, MPI_CSCTL_REG(cs));
spin_unlock_irqrestore(&bcm63xx_cs_lock, flags);
return 0;
}
EXPORT_SYMBOL(bcm63xx_set_cs_timing);
/*
* configure other chipselect parameter (data bus size, ...)
*/
int bcm63xx_set_cs_param(unsigned int cs, u32 params)
{
unsigned long flags;
u32 val;
if (!is_valid_cs(cs))
return -EINVAL;
/* none of this fields apply to pcmcia */
if (cs == MPI_CS_PCMCIA_COMMON ||
cs == MPI_CS_PCMCIA_ATTR ||
cs == MPI_CS_PCMCIA_IO)
return -EINVAL;
spin_lock_irqsave(&bcm63xx_cs_lock, flags);
val = bcm_mpi_readl(MPI_CSCTL_REG(cs));
val &= ~(MPI_CSCTL_DATA16_MASK);
val &= ~(MPI_CSCTL_SYNCMODE_MASK);
val &= ~(MPI_CSCTL_TSIZE_MASK);
val &= ~(MPI_CSCTL_ENDIANSWAP_MASK);
val |= params;
bcm_mpi_writel(val, MPI_CSCTL_REG(cs));
spin_unlock_irqrestore(&bcm63xx_cs_lock, flags);
return 0;
}
EXPORT_SYMBOL(bcm63xx_set_cs_param);
/*
* set cs status (enable/disable)
*/
int bcm63xx_set_cs_status(unsigned int cs, int enable)
{
unsigned long flags;
u32 val;
if (!is_valid_cs(cs))
return -EINVAL;
spin_lock_irqsave(&bcm63xx_cs_lock, flags);
val = bcm_mpi_readl(MPI_CSCTL_REG(cs));
if (enable)
val |= MPI_CSCTL_ENABLE_MASK;
else
val &= ~MPI_CSCTL_ENABLE_MASK;
bcm_mpi_writel(val, MPI_CSCTL_REG(cs));
spin_unlock_irqrestore(&bcm63xx_cs_lock, flags);
return 0;
}
EXPORT_SYMBOL(bcm63xx_set_cs_status);
| linux-master | arch/mips/bcm63xx/cs.c |
/*
* Broadcom BCM63xx flash registration
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2008 Maxime Bizon <[email protected]>
* Copyright (C) 2008 Florian Fainelli <[email protected]>
* Copyright (C) 2012 Jonas Gorski <[email protected]>
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/physmap.h>
#include <bcm63xx_cpu.h>
#include <bcm63xx_dev_flash.h>
#include <bcm63xx_regs.h>
#include <bcm63xx_io.h>
static struct mtd_partition mtd_partitions[] = {
{
.name = "cfe",
.offset = 0x0,
.size = 0x40000,
}
};
static const char *bcm63xx_part_types[] = { "bcm63xxpart", NULL };
static struct physmap_flash_data flash_data = {
.width = 2,
.parts = mtd_partitions,
.part_probe_types = bcm63xx_part_types,
};
static struct resource mtd_resources[] = {
{
.start = 0, /* filled at runtime */
.end = 0, /* filled at runtime */
.flags = IORESOURCE_MEM,
}
};
static struct platform_device mtd_dev = {
.name = "physmap-flash",
.resource = mtd_resources,
.num_resources = ARRAY_SIZE(mtd_resources),
.dev = {
.platform_data = &flash_data,
},
};
static int __init bcm63xx_detect_flash_type(void)
{
u32 val;
switch (bcm63xx_get_cpu_id()) {
case BCM6328_CPU_ID:
val = bcm_misc_readl(MISC_STRAPBUS_6328_REG);
if (val & STRAPBUS_6328_BOOT_SEL_SERIAL)
return BCM63XX_FLASH_TYPE_SERIAL;
else
return BCM63XX_FLASH_TYPE_NAND;
case BCM6338_CPU_ID:
case BCM6345_CPU_ID:
case BCM6348_CPU_ID:
/* no way to auto detect so assume parallel */
return BCM63XX_FLASH_TYPE_PARALLEL;
case BCM3368_CPU_ID:
case BCM6358_CPU_ID:
val = bcm_gpio_readl(GPIO_STRAPBUS_REG);
if (val & STRAPBUS_6358_BOOT_SEL_PARALLEL)
return BCM63XX_FLASH_TYPE_PARALLEL;
else
return BCM63XX_FLASH_TYPE_SERIAL;
case BCM6362_CPU_ID:
val = bcm_misc_readl(MISC_STRAPBUS_6362_REG);
if (val & STRAPBUS_6362_BOOT_SEL_SERIAL)
return BCM63XX_FLASH_TYPE_SERIAL;
else
return BCM63XX_FLASH_TYPE_NAND;
case BCM6368_CPU_ID:
val = bcm_gpio_readl(GPIO_STRAPBUS_REG);
switch (val & STRAPBUS_6368_BOOT_SEL_MASK) {
case STRAPBUS_6368_BOOT_SEL_NAND:
return BCM63XX_FLASH_TYPE_NAND;
case STRAPBUS_6368_BOOT_SEL_SERIAL:
return BCM63XX_FLASH_TYPE_SERIAL;
case STRAPBUS_6368_BOOT_SEL_PARALLEL:
return BCM63XX_FLASH_TYPE_PARALLEL;
}
fallthrough;
default:
return -EINVAL;
}
}
int __init bcm63xx_flash_register(void)
{
int flash_type;
u32 val;
flash_type = bcm63xx_detect_flash_type();
switch (flash_type) {
case BCM63XX_FLASH_TYPE_PARALLEL:
/* read base address of boot chip select (0) */
val = bcm_mpi_readl(MPI_CSBASE_REG(0));
val &= MPI_CSBASE_BASE_MASK;
mtd_resources[0].start = val;
mtd_resources[0].end = 0x1FFFFFFF;
return platform_device_register(&mtd_dev);
case BCM63XX_FLASH_TYPE_SERIAL:
pr_warn("unsupported serial flash detected\n");
return -ENODEV;
case BCM63XX_FLASH_TYPE_NAND:
pr_warn("unsupported NAND flash detected\n");
return -ENODEV;
default:
pr_err("flash detection failed for BCM%x: %d\n",
bcm63xx_get_cpu_id(), flash_type);
return -ENODEV;
}
}
| linux-master | arch/mips/bcm63xx/dev-flash.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2012 Jonas Gorski <[email protected]>
*/
#include <linux/init.h>
#include <linux/export.h>
#include <linux/mutex.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <bcm63xx_cpu.h>
#include <bcm63xx_io.h>
#include <bcm63xx_regs.h>
#include <bcm63xx_reset.h>
#define __GEN_RESET_BITS_TABLE(__cpu) \
[BCM63XX_RESET_SPI] = BCM## __cpu ##_RESET_SPI, \
[BCM63XX_RESET_ENET] = BCM## __cpu ##_RESET_ENET, \
[BCM63XX_RESET_USBH] = BCM## __cpu ##_RESET_USBH, \
[BCM63XX_RESET_USBD] = BCM## __cpu ##_RESET_USBD, \
[BCM63XX_RESET_DSL] = BCM## __cpu ##_RESET_DSL, \
[BCM63XX_RESET_SAR] = BCM## __cpu ##_RESET_SAR, \
[BCM63XX_RESET_EPHY] = BCM## __cpu ##_RESET_EPHY, \
[BCM63XX_RESET_ENETSW] = BCM## __cpu ##_RESET_ENETSW, \
[BCM63XX_RESET_PCM] = BCM## __cpu ##_RESET_PCM, \
[BCM63XX_RESET_MPI] = BCM## __cpu ##_RESET_MPI, \
[BCM63XX_RESET_PCIE] = BCM## __cpu ##_RESET_PCIE, \
[BCM63XX_RESET_PCIE_EXT] = BCM## __cpu ##_RESET_PCIE_EXT,
#define BCM3368_RESET_SPI SOFTRESET_3368_SPI_MASK
#define BCM3368_RESET_ENET SOFTRESET_3368_ENET_MASK
#define BCM3368_RESET_USBH 0
#define BCM3368_RESET_USBD SOFTRESET_3368_USBS_MASK
#define BCM3368_RESET_DSL 0
#define BCM3368_RESET_SAR 0
#define BCM3368_RESET_EPHY SOFTRESET_3368_EPHY_MASK
#define BCM3368_RESET_ENETSW 0
#define BCM3368_RESET_PCM SOFTRESET_3368_PCM_MASK
#define BCM3368_RESET_MPI SOFTRESET_3368_MPI_MASK
#define BCM3368_RESET_PCIE 0
#define BCM3368_RESET_PCIE_EXT 0
#define BCM6328_RESET_SPI SOFTRESET_6328_SPI_MASK
#define BCM6328_RESET_ENET 0
#define BCM6328_RESET_USBH SOFTRESET_6328_USBH_MASK
#define BCM6328_RESET_USBD SOFTRESET_6328_USBS_MASK
#define BCM6328_RESET_DSL 0
#define BCM6328_RESET_SAR SOFTRESET_6328_SAR_MASK
#define BCM6328_RESET_EPHY SOFTRESET_6328_EPHY_MASK
#define BCM6328_RESET_ENETSW SOFTRESET_6328_ENETSW_MASK
#define BCM6328_RESET_PCM SOFTRESET_6328_PCM_MASK
#define BCM6328_RESET_MPI 0
#define BCM6328_RESET_PCIE \
(SOFTRESET_6328_PCIE_MASK | \
SOFTRESET_6328_PCIE_CORE_MASK | \
SOFTRESET_6328_PCIE_HARD_MASK)
#define BCM6328_RESET_PCIE_EXT SOFTRESET_6328_PCIE_EXT_MASK
#define BCM6338_RESET_SPI SOFTRESET_6338_SPI_MASK
#define BCM6338_RESET_ENET SOFTRESET_6338_ENET_MASK
#define BCM6338_RESET_USBH SOFTRESET_6338_USBH_MASK
#define BCM6338_RESET_USBD SOFTRESET_6338_USBS_MASK
#define BCM6338_RESET_DSL SOFTRESET_6338_ADSL_MASK
#define BCM6338_RESET_SAR SOFTRESET_6338_SAR_MASK
#define BCM6338_RESET_EPHY 0
#define BCM6338_RESET_ENETSW 0
#define BCM6338_RESET_PCM 0
#define BCM6338_RESET_MPI 0
#define BCM6338_RESET_PCIE 0
#define BCM6338_RESET_PCIE_EXT 0
#define BCM6348_RESET_SPI SOFTRESET_6348_SPI_MASK
#define BCM6348_RESET_ENET SOFTRESET_6348_ENET_MASK
#define BCM6348_RESET_USBH SOFTRESET_6348_USBH_MASK
#define BCM6348_RESET_USBD SOFTRESET_6348_USBS_MASK
#define BCM6348_RESET_DSL SOFTRESET_6348_ADSL_MASK
#define BCM6348_RESET_SAR SOFTRESET_6348_SAR_MASK
#define BCM6348_RESET_EPHY 0
#define BCM6348_RESET_ENETSW 0
#define BCM6348_RESET_PCM 0
#define BCM6348_RESET_MPI 0
#define BCM6348_RESET_PCIE 0
#define BCM6348_RESET_PCIE_EXT 0
#define BCM6358_RESET_SPI SOFTRESET_6358_SPI_MASK
#define BCM6358_RESET_ENET SOFTRESET_6358_ENET_MASK
#define BCM6358_RESET_USBH SOFTRESET_6358_USBH_MASK
#define BCM6358_RESET_USBD 0
#define BCM6358_RESET_DSL SOFTRESET_6358_ADSL_MASK
#define BCM6358_RESET_SAR SOFTRESET_6358_SAR_MASK
#define BCM6358_RESET_EPHY SOFTRESET_6358_EPHY_MASK
#define BCM6358_RESET_ENETSW 0
#define BCM6358_RESET_PCM SOFTRESET_6358_PCM_MASK
#define BCM6358_RESET_MPI SOFTRESET_6358_MPI_MASK
#define BCM6358_RESET_PCIE 0
#define BCM6358_RESET_PCIE_EXT 0
#define BCM6362_RESET_SPI SOFTRESET_6362_SPI_MASK
#define BCM6362_RESET_ENET 0
#define BCM6362_RESET_USBH SOFTRESET_6362_USBH_MASK
#define BCM6362_RESET_USBD SOFTRESET_6362_USBS_MASK
#define BCM6362_RESET_DSL 0
#define BCM6362_RESET_SAR SOFTRESET_6362_SAR_MASK
#define BCM6362_RESET_EPHY SOFTRESET_6362_EPHY_MASK
#define BCM6362_RESET_ENETSW SOFTRESET_6362_ENETSW_MASK
#define BCM6362_RESET_PCM SOFTRESET_6362_PCM_MASK
#define BCM6362_RESET_MPI 0
#define BCM6362_RESET_PCIE (SOFTRESET_6362_PCIE_MASK | \
SOFTRESET_6362_PCIE_CORE_MASK)
#define BCM6362_RESET_PCIE_EXT SOFTRESET_6362_PCIE_EXT_MASK
#define BCM6368_RESET_SPI SOFTRESET_6368_SPI_MASK
#define BCM6368_RESET_ENET 0
#define BCM6368_RESET_USBH SOFTRESET_6368_USBH_MASK
#define BCM6368_RESET_USBD SOFTRESET_6368_USBS_MASK
#define BCM6368_RESET_DSL 0
#define BCM6368_RESET_SAR SOFTRESET_6368_SAR_MASK
#define BCM6368_RESET_EPHY SOFTRESET_6368_EPHY_MASK
#define BCM6368_RESET_ENETSW SOFTRESET_6368_ENETSW_MASK
#define BCM6368_RESET_PCM SOFTRESET_6368_PCM_MASK
#define BCM6368_RESET_MPI SOFTRESET_6368_MPI_MASK
#define BCM6368_RESET_PCIE 0
#define BCM6368_RESET_PCIE_EXT 0
/*
* core reset bits
*/
static const u32 bcm3368_reset_bits[] = {
__GEN_RESET_BITS_TABLE(3368)
};
static const u32 bcm6328_reset_bits[] = {
__GEN_RESET_BITS_TABLE(6328)
};
static const u32 bcm6338_reset_bits[] = {
__GEN_RESET_BITS_TABLE(6338)
};
static const u32 bcm6348_reset_bits[] = {
__GEN_RESET_BITS_TABLE(6348)
};
static const u32 bcm6358_reset_bits[] = {
__GEN_RESET_BITS_TABLE(6358)
};
static const u32 bcm6362_reset_bits[] = {
__GEN_RESET_BITS_TABLE(6362)
};
static const u32 bcm6368_reset_bits[] = {
__GEN_RESET_BITS_TABLE(6368)
};
const u32 *bcm63xx_reset_bits;
static int reset_reg;
static int __init bcm63xx_reset_bits_init(void)
{
if (BCMCPU_IS_3368()) {
reset_reg = PERF_SOFTRESET_6358_REG;
bcm63xx_reset_bits = bcm3368_reset_bits;
} else if (BCMCPU_IS_6328()) {
reset_reg = PERF_SOFTRESET_6328_REG;
bcm63xx_reset_bits = bcm6328_reset_bits;
} else if (BCMCPU_IS_6338()) {
reset_reg = PERF_SOFTRESET_REG;
bcm63xx_reset_bits = bcm6338_reset_bits;
} else if (BCMCPU_IS_6348()) {
reset_reg = PERF_SOFTRESET_REG;
bcm63xx_reset_bits = bcm6348_reset_bits;
} else if (BCMCPU_IS_6358()) {
reset_reg = PERF_SOFTRESET_6358_REG;
bcm63xx_reset_bits = bcm6358_reset_bits;
} else if (BCMCPU_IS_6362()) {
reset_reg = PERF_SOFTRESET_6362_REG;
bcm63xx_reset_bits = bcm6362_reset_bits;
} else if (BCMCPU_IS_6368()) {
reset_reg = PERF_SOFTRESET_6368_REG;
bcm63xx_reset_bits = bcm6368_reset_bits;
}
return 0;
}
static DEFINE_SPINLOCK(reset_mutex);
static void __bcm63xx_core_set_reset(u32 mask, int enable)
{
unsigned long flags;
u32 val;
if (!mask)
return;
spin_lock_irqsave(&reset_mutex, flags);
val = bcm_perf_readl(reset_reg);
if (enable)
val &= ~mask;
else
val |= mask;
bcm_perf_writel(val, reset_reg);
spin_unlock_irqrestore(&reset_mutex, flags);
}
void bcm63xx_core_set_reset(enum bcm63xx_core_reset core, int reset)
{
__bcm63xx_core_set_reset(bcm63xx_reset_bits[core], reset);
}
EXPORT_SYMBOL(bcm63xx_core_set_reset);
postcore_initcall(bcm63xx_reset_bits_init);
| linux-master | arch/mips/bcm63xx/reset.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2008 Maxime Bizon <[email protected]>
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/memblock.h>
#include <linux/ioport.h>
#include <linux/pm.h>
#include <asm/bootinfo.h>
#include <asm/time.h>
#include <asm/reboot.h>
#include <asm/cacheflush.h>
#include <bcm63xx_board.h>
#include <bcm63xx_cpu.h>
#include <bcm63xx_regs.h>
#include <bcm63xx_io.h>
#include <bcm63xx_gpio.h>
void bcm63xx_machine_halt(void)
{
pr_info("System halted\n");
while (1)
;
}
static void bcm6348_a1_reboot(void)
{
u32 reg;
/* soft reset all blocks */
pr_info("soft-resetting all blocks ...\n");
reg = bcm_perf_readl(PERF_SOFTRESET_REG);
reg &= ~SOFTRESET_6348_ALL;
bcm_perf_writel(reg, PERF_SOFTRESET_REG);
mdelay(10);
reg = bcm_perf_readl(PERF_SOFTRESET_REG);
reg |= SOFTRESET_6348_ALL;
bcm_perf_writel(reg, PERF_SOFTRESET_REG);
mdelay(10);
/* Jump to the power on address. */
pr_info("jumping to reset vector.\n");
/* set high vectors (base at 0xbfc00000 */
set_c0_status(ST0_BEV | ST0_ERL);
/* run uncached in kseg0 */
change_c0_config(CONF_CM_CMASK, CONF_CM_UNCACHED);
__flush_cache_all();
/* remove all wired TLB entries */
write_c0_wired(0);
__asm__ __volatile__(
"jr\t%0"
:
: "r" (0xbfc00000));
while (1)
;
}
void bcm63xx_machine_reboot(void)
{
u32 reg, perf_regs[2] = { 0, 0 };
unsigned int i;
/* mask and clear all external irq */
switch (bcm63xx_get_cpu_id()) {
case BCM3368_CPU_ID:
perf_regs[0] = PERF_EXTIRQ_CFG_REG_3368;
break;
case BCM6328_CPU_ID:
perf_regs[0] = PERF_EXTIRQ_CFG_REG_6328;
break;
case BCM6338_CPU_ID:
perf_regs[0] = PERF_EXTIRQ_CFG_REG_6338;
break;
case BCM6345_CPU_ID:
perf_regs[0] = PERF_EXTIRQ_CFG_REG_6345;
break;
case BCM6348_CPU_ID:
perf_regs[0] = PERF_EXTIRQ_CFG_REG_6348;
break;
case BCM6358_CPU_ID:
perf_regs[0] = PERF_EXTIRQ_CFG_REG_6358;
break;
case BCM6362_CPU_ID:
perf_regs[0] = PERF_EXTIRQ_CFG_REG_6362;
break;
}
for (i = 0; i < 2; i++) {
if (!perf_regs[i])
break;
reg = bcm_perf_readl(perf_regs[i]);
if (BCMCPU_IS_6348()) {
reg &= ~EXTIRQ_CFG_MASK_ALL_6348;
reg |= EXTIRQ_CFG_CLEAR_ALL_6348;
} else {
reg &= ~EXTIRQ_CFG_MASK_ALL;
reg |= EXTIRQ_CFG_CLEAR_ALL;
}
bcm_perf_writel(reg, perf_regs[i]);
}
if (BCMCPU_IS_6348() && (bcm63xx_get_cpu_rev() == 0xa1))
bcm6348_a1_reboot();
pr_info("triggering watchdog soft-reset...\n");
if (BCMCPU_IS_6328()) {
bcm_wdt_writel(1, WDT_SOFTRESET_REG);
} else {
reg = bcm_perf_readl(PERF_SYS_PLL_CTL_REG);
reg |= SYS_PLL_SOFT_RESET;
bcm_perf_writel(reg, PERF_SYS_PLL_CTL_REG);
}
while (1)
;
}
static void __bcm63xx_machine_reboot(char *p)
{
bcm63xx_machine_reboot();
}
/*
* return system type in /proc/cpuinfo
*/
const char *get_system_type(void)
{
static char buf[128];
snprintf(buf, sizeof(buf), "bcm63xx/%s (0x%04x/0x%02X)",
board_get_name(),
bcm63xx_get_cpu_id(), bcm63xx_get_cpu_rev());
return buf;
}
void __init plat_time_init(void)
{
mips_hpt_frequency = bcm63xx_get_cpu_freq() / 2;
}
void __init plat_mem_setup(void)
{
memblock_add(0, bcm63xx_get_memory_size());
_machine_halt = bcm63xx_machine_halt;
_machine_restart = __bcm63xx_machine_reboot;
pm_power_off = bcm63xx_machine_halt;
set_io_port_base(0);
ioport_resource.start = 0;
ioport_resource.end = ~0;
board_setup();
}
int __init bcm63xx_register_devices(void)
{
/* register gpiochip */
bcm63xx_gpio_init();
return board_register_devices();
}
arch_initcall(bcm63xx_register_devices);
| linux-master | arch/mips/bcm63xx/setup.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2008 Maxime Bizon <[email protected]>
*/
#include <linux/init.h>
#include <linux/export.h>
#include <linux/mutex.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/delay.h>
#include <bcm63xx_cpu.h>
#include <bcm63xx_io.h>
#include <bcm63xx_regs.h>
#include <bcm63xx_reset.h>
struct clk {
void (*set)(struct clk *, int);
unsigned int rate;
unsigned int usage;
int id;
};
static DEFINE_MUTEX(clocks_mutex);
static void clk_enable_unlocked(struct clk *clk)
{
if (clk->set && (clk->usage++) == 0)
clk->set(clk, 1);
}
static void clk_disable_unlocked(struct clk *clk)
{
if (clk->set && (--clk->usage) == 0)
clk->set(clk, 0);
}
static void bcm_hwclock_set(u32 mask, int enable)
{
u32 reg;
reg = bcm_perf_readl(PERF_CKCTL_REG);
if (enable)
reg |= mask;
else
reg &= ~mask;
bcm_perf_writel(reg, PERF_CKCTL_REG);
}
/*
* Ethernet MAC "misc" clock: dma clocks and main clock on 6348
*/
static void enet_misc_set(struct clk *clk, int enable)
{
u32 mask;
if (BCMCPU_IS_6338())
mask = CKCTL_6338_ENET_EN;
else if (BCMCPU_IS_6345())
mask = CKCTL_6345_ENET_EN;
else if (BCMCPU_IS_6348())
mask = CKCTL_6348_ENET_EN;
else
/* BCMCPU_IS_6358 */
mask = CKCTL_6358_EMUSB_EN;
bcm_hwclock_set(mask, enable);
}
static struct clk clk_enet_misc = {
.set = enet_misc_set,
};
/*
* Ethernet MAC clocks: only relevant on 6358, silently enable misc
* clocks
*/
static void enetx_set(struct clk *clk, int enable)
{
if (enable)
clk_enable_unlocked(&clk_enet_misc);
else
clk_disable_unlocked(&clk_enet_misc);
if (BCMCPU_IS_3368() || BCMCPU_IS_6358()) {
u32 mask;
if (clk->id == 0)
mask = CKCTL_6358_ENET0_EN;
else
mask = CKCTL_6358_ENET1_EN;
bcm_hwclock_set(mask, enable);
}
}
static struct clk clk_enet0 = {
.id = 0,
.set = enetx_set,
};
static struct clk clk_enet1 = {
.id = 1,
.set = enetx_set,
};
/*
* Ethernet PHY clock
*/
static void ephy_set(struct clk *clk, int enable)
{
if (BCMCPU_IS_3368() || BCMCPU_IS_6358())
bcm_hwclock_set(CKCTL_6358_EPHY_EN, enable);
}
static struct clk clk_ephy = {
.set = ephy_set,
};
/*
* Ethernet switch SAR clock
*/
static void swpkt_sar_set(struct clk *clk, int enable)
{
if (BCMCPU_IS_6368())
bcm_hwclock_set(CKCTL_6368_SWPKT_SAR_EN, enable);
else
return;
}
static struct clk clk_swpkt_sar = {
.set = swpkt_sar_set,
};
/*
* Ethernet switch USB clock
*/
static void swpkt_usb_set(struct clk *clk, int enable)
{
if (BCMCPU_IS_6368())
bcm_hwclock_set(CKCTL_6368_SWPKT_USB_EN, enable);
else
return;
}
static struct clk clk_swpkt_usb = {
.set = swpkt_usb_set,
};
/*
* Ethernet switch clock
*/
static void enetsw_set(struct clk *clk, int enable)
{
if (BCMCPU_IS_6328()) {
bcm_hwclock_set(CKCTL_6328_ROBOSW_EN, enable);
} else if (BCMCPU_IS_6362()) {
bcm_hwclock_set(CKCTL_6362_ROBOSW_EN, enable);
} else if (BCMCPU_IS_6368()) {
if (enable) {
clk_enable_unlocked(&clk_swpkt_sar);
clk_enable_unlocked(&clk_swpkt_usb);
} else {
clk_disable_unlocked(&clk_swpkt_usb);
clk_disable_unlocked(&clk_swpkt_sar);
}
bcm_hwclock_set(CKCTL_6368_ROBOSW_EN, enable);
} else {
return;
}
if (enable) {
/* reset switch core afer clock change */
bcm63xx_core_set_reset(BCM63XX_RESET_ENETSW, 1);
msleep(10);
bcm63xx_core_set_reset(BCM63XX_RESET_ENETSW, 0);
msleep(10);
}
}
static struct clk clk_enetsw = {
.set = enetsw_set,
};
/*
* PCM clock
*/
static void pcm_set(struct clk *clk, int enable)
{
if (BCMCPU_IS_3368())
bcm_hwclock_set(CKCTL_3368_PCM_EN, enable);
if (BCMCPU_IS_6358())
bcm_hwclock_set(CKCTL_6358_PCM_EN, enable);
}
static struct clk clk_pcm = {
.set = pcm_set,
};
/*
* USB host clock
*/
static void usbh_set(struct clk *clk, int enable)
{
if (BCMCPU_IS_6328())
bcm_hwclock_set(CKCTL_6328_USBH_EN, enable);
else if (BCMCPU_IS_6348())
bcm_hwclock_set(CKCTL_6348_USBH_EN, enable);
else if (BCMCPU_IS_6362())
bcm_hwclock_set(CKCTL_6362_USBH_EN, enable);
else if (BCMCPU_IS_6368())
bcm_hwclock_set(CKCTL_6368_USBH_EN, enable);
}
static struct clk clk_usbh = {
.set = usbh_set,
};
/*
* USB device clock
*/
static void usbd_set(struct clk *clk, int enable)
{
if (BCMCPU_IS_6328())
bcm_hwclock_set(CKCTL_6328_USBD_EN, enable);
else if (BCMCPU_IS_6362())
bcm_hwclock_set(CKCTL_6362_USBD_EN, enable);
else if (BCMCPU_IS_6368())
bcm_hwclock_set(CKCTL_6368_USBD_EN, enable);
}
static struct clk clk_usbd = {
.set = usbd_set,
};
/*
* SPI clock
*/
static void spi_set(struct clk *clk, int enable)
{
u32 mask;
if (BCMCPU_IS_6338())
mask = CKCTL_6338_SPI_EN;
else if (BCMCPU_IS_6348())
mask = CKCTL_6348_SPI_EN;
else if (BCMCPU_IS_3368() || BCMCPU_IS_6358())
mask = CKCTL_6358_SPI_EN;
else if (BCMCPU_IS_6362())
mask = CKCTL_6362_SPI_EN;
else
/* BCMCPU_IS_6368 */
mask = CKCTL_6368_SPI_EN;
bcm_hwclock_set(mask, enable);
}
static struct clk clk_spi = {
.set = spi_set,
};
/*
* HSSPI clock
*/
static void hsspi_set(struct clk *clk, int enable)
{
u32 mask;
if (BCMCPU_IS_6328())
mask = CKCTL_6328_HSSPI_EN;
else if (BCMCPU_IS_6362())
mask = CKCTL_6362_HSSPI_EN;
else
return;
bcm_hwclock_set(mask, enable);
}
static struct clk clk_hsspi = {
.set = hsspi_set,
};
/*
* HSSPI PLL
*/
static struct clk clk_hsspi_pll;
/*
* XTM clock
*/
static void xtm_set(struct clk *clk, int enable)
{
if (!BCMCPU_IS_6368())
return;
if (enable)
clk_enable_unlocked(&clk_swpkt_sar);
else
clk_disable_unlocked(&clk_swpkt_sar);
bcm_hwclock_set(CKCTL_6368_SAR_EN, enable);
if (enable) {
/* reset sar core afer clock change */
bcm63xx_core_set_reset(BCM63XX_RESET_SAR, 1);
mdelay(1);
bcm63xx_core_set_reset(BCM63XX_RESET_SAR, 0);
mdelay(1);
}
}
static struct clk clk_xtm = {
.set = xtm_set,
};
/*
* IPsec clock
*/
static void ipsec_set(struct clk *clk, int enable)
{
if (BCMCPU_IS_6362())
bcm_hwclock_set(CKCTL_6362_IPSEC_EN, enable);
else if (BCMCPU_IS_6368())
bcm_hwclock_set(CKCTL_6368_IPSEC_EN, enable);
}
static struct clk clk_ipsec = {
.set = ipsec_set,
};
/*
* PCIe clock
*/
static void pcie_set(struct clk *clk, int enable)
{
if (BCMCPU_IS_6328())
bcm_hwclock_set(CKCTL_6328_PCIE_EN, enable);
else if (BCMCPU_IS_6362())
bcm_hwclock_set(CKCTL_6362_PCIE_EN, enable);
}
static struct clk clk_pcie = {
.set = pcie_set,
};
/*
* Internal peripheral clock
*/
static struct clk clk_periph = {
.rate = (50 * 1000 * 1000),
};
/*
* Linux clock API implementation
*/
int clk_enable(struct clk *clk)
{
if (!clk)
return 0;
mutex_lock(&clocks_mutex);
clk_enable_unlocked(clk);
mutex_unlock(&clocks_mutex);
return 0;
}
EXPORT_SYMBOL(clk_enable);
void clk_disable(struct clk *clk)
{
if (!clk)
return;
mutex_lock(&clocks_mutex);
clk_disable_unlocked(clk);
mutex_unlock(&clocks_mutex);
}
EXPORT_SYMBOL(clk_disable);
struct clk *clk_get_parent(struct clk *clk)
{
return NULL;
}
EXPORT_SYMBOL(clk_get_parent);
int clk_set_parent(struct clk *clk, struct clk *parent)
{
return 0;
}
EXPORT_SYMBOL(clk_set_parent);
unsigned long clk_get_rate(struct clk *clk)
{
if (!clk)
return 0;
return clk->rate;
}
EXPORT_SYMBOL(clk_get_rate);
int clk_set_rate(struct clk *clk, unsigned long rate)
{
return 0;
}
EXPORT_SYMBOL_GPL(clk_set_rate);
long clk_round_rate(struct clk *clk, unsigned long rate)
{
return 0;
}
EXPORT_SYMBOL_GPL(clk_round_rate);
static struct clk_lookup bcm3368_clks[] = {
/* fixed rate clocks */
CLKDEV_INIT(NULL, "periph", &clk_periph),
CLKDEV_INIT("bcm63xx_uart.0", "refclk", &clk_periph),
CLKDEV_INIT("bcm63xx_uart.1", "refclk", &clk_periph),
/* gated clocks */
CLKDEV_INIT(NULL, "enet0", &clk_enet0),
CLKDEV_INIT(NULL, "enet1", &clk_enet1),
CLKDEV_INIT(NULL, "ephy", &clk_ephy),
CLKDEV_INIT(NULL, "usbh", &clk_usbh),
CLKDEV_INIT(NULL, "usbd", &clk_usbd),
CLKDEV_INIT(NULL, "spi", &clk_spi),
CLKDEV_INIT(NULL, "pcm", &clk_pcm),
CLKDEV_INIT("bcm63xx_enet.0", "enet", &clk_enet0),
CLKDEV_INIT("bcm63xx_enet.1", "enet", &clk_enet1),
};
static struct clk_lookup bcm6328_clks[] = {
/* fixed rate clocks */
CLKDEV_INIT(NULL, "periph", &clk_periph),
CLKDEV_INIT("bcm63xx_uart.0", "refclk", &clk_periph),
CLKDEV_INIT("bcm63xx_uart.1", "refclk", &clk_periph),
CLKDEV_INIT("bcm63xx-hsspi.0", "pll", &clk_hsspi_pll),
/* gated clocks */
CLKDEV_INIT(NULL, "enetsw", &clk_enetsw),
CLKDEV_INIT(NULL, "usbh", &clk_usbh),
CLKDEV_INIT(NULL, "usbd", &clk_usbd),
CLKDEV_INIT(NULL, "hsspi", &clk_hsspi),
CLKDEV_INIT(NULL, "pcie", &clk_pcie),
};
static struct clk_lookup bcm6338_clks[] = {
/* fixed rate clocks */
CLKDEV_INIT(NULL, "periph", &clk_periph),
CLKDEV_INIT("bcm63xx_uart.0", "refclk", &clk_periph),
/* gated clocks */
CLKDEV_INIT(NULL, "enet0", &clk_enet0),
CLKDEV_INIT(NULL, "enet1", &clk_enet1),
CLKDEV_INIT(NULL, "ephy", &clk_ephy),
CLKDEV_INIT(NULL, "usbh", &clk_usbh),
CLKDEV_INIT(NULL, "usbd", &clk_usbd),
CLKDEV_INIT(NULL, "spi", &clk_spi),
CLKDEV_INIT("bcm63xx_enet.0", "enet", &clk_enet_misc),
};
static struct clk_lookup bcm6345_clks[] = {
/* fixed rate clocks */
CLKDEV_INIT(NULL, "periph", &clk_periph),
CLKDEV_INIT("bcm63xx_uart.0", "refclk", &clk_periph),
/* gated clocks */
CLKDEV_INIT(NULL, "enet0", &clk_enet0),
CLKDEV_INIT(NULL, "enet1", &clk_enet1),
CLKDEV_INIT(NULL, "ephy", &clk_ephy),
CLKDEV_INIT(NULL, "usbh", &clk_usbh),
CLKDEV_INIT(NULL, "usbd", &clk_usbd),
CLKDEV_INIT(NULL, "spi", &clk_spi),
CLKDEV_INIT("bcm63xx_enet.0", "enet", &clk_enet_misc),
};
static struct clk_lookup bcm6348_clks[] = {
/* fixed rate clocks */
CLKDEV_INIT(NULL, "periph", &clk_periph),
CLKDEV_INIT("bcm63xx_uart.0", "refclk", &clk_periph),
/* gated clocks */
CLKDEV_INIT(NULL, "enet0", &clk_enet0),
CLKDEV_INIT(NULL, "enet1", &clk_enet1),
CLKDEV_INIT(NULL, "ephy", &clk_ephy),
CLKDEV_INIT(NULL, "usbh", &clk_usbh),
CLKDEV_INIT(NULL, "usbd", &clk_usbd),
CLKDEV_INIT(NULL, "spi", &clk_spi),
CLKDEV_INIT("bcm63xx_enet.0", "enet", &clk_enet_misc),
CLKDEV_INIT("bcm63xx_enet.1", "enet", &clk_enet_misc),
};
static struct clk_lookup bcm6358_clks[] = {
/* fixed rate clocks */
CLKDEV_INIT(NULL, "periph", &clk_periph),
CLKDEV_INIT("bcm63xx_uart.0", "refclk", &clk_periph),
CLKDEV_INIT("bcm63xx_uart.1", "refclk", &clk_periph),
/* gated clocks */
CLKDEV_INIT(NULL, "enet0", &clk_enet0),
CLKDEV_INIT(NULL, "enet1", &clk_enet1),
CLKDEV_INIT(NULL, "ephy", &clk_ephy),
CLKDEV_INIT(NULL, "usbh", &clk_usbh),
CLKDEV_INIT(NULL, "usbd", &clk_usbd),
CLKDEV_INIT(NULL, "spi", &clk_spi),
CLKDEV_INIT(NULL, "pcm", &clk_pcm),
CLKDEV_INIT(NULL, "swpkt_sar", &clk_swpkt_sar),
CLKDEV_INIT(NULL, "swpkt_usb", &clk_swpkt_usb),
CLKDEV_INIT("bcm63xx_enet.0", "enet", &clk_enet0),
CLKDEV_INIT("bcm63xx_enet.1", "enet", &clk_enet1),
};
static struct clk_lookup bcm6362_clks[] = {
/* fixed rate clocks */
CLKDEV_INIT(NULL, "periph", &clk_periph),
CLKDEV_INIT("bcm63xx_uart.0", "refclk", &clk_periph),
CLKDEV_INIT("bcm63xx_uart.1", "refclk", &clk_periph),
CLKDEV_INIT("bcm63xx-hsspi.0", "pll", &clk_hsspi_pll),
/* gated clocks */
CLKDEV_INIT(NULL, "enetsw", &clk_enetsw),
CLKDEV_INIT(NULL, "usbh", &clk_usbh),
CLKDEV_INIT(NULL, "usbd", &clk_usbd),
CLKDEV_INIT(NULL, "spi", &clk_spi),
CLKDEV_INIT(NULL, "hsspi", &clk_hsspi),
CLKDEV_INIT(NULL, "pcie", &clk_pcie),
CLKDEV_INIT(NULL, "ipsec", &clk_ipsec),
};
static struct clk_lookup bcm6368_clks[] = {
/* fixed rate clocks */
CLKDEV_INIT(NULL, "periph", &clk_periph),
CLKDEV_INIT("bcm63xx_uart.0", "refclk", &clk_periph),
CLKDEV_INIT("bcm63xx_uart.1", "refclk", &clk_periph),
/* gated clocks */
CLKDEV_INIT(NULL, "enetsw", &clk_enetsw),
CLKDEV_INIT(NULL, "usbh", &clk_usbh),
CLKDEV_INIT(NULL, "usbd", &clk_usbd),
CLKDEV_INIT(NULL, "spi", &clk_spi),
CLKDEV_INIT(NULL, "xtm", &clk_xtm),
CLKDEV_INIT(NULL, "ipsec", &clk_ipsec),
};
#define HSSPI_PLL_HZ_6328 133333333
#define HSSPI_PLL_HZ_6362 400000000
static int __init bcm63xx_clk_init(void)
{
switch (bcm63xx_get_cpu_id()) {
case BCM3368_CPU_ID:
clkdev_add_table(bcm3368_clks, ARRAY_SIZE(bcm3368_clks));
break;
case BCM6328_CPU_ID:
clk_hsspi_pll.rate = HSSPI_PLL_HZ_6328;
clkdev_add_table(bcm6328_clks, ARRAY_SIZE(bcm6328_clks));
break;
case BCM6338_CPU_ID:
clkdev_add_table(bcm6338_clks, ARRAY_SIZE(bcm6338_clks));
break;
case BCM6345_CPU_ID:
clkdev_add_table(bcm6345_clks, ARRAY_SIZE(bcm6345_clks));
break;
case BCM6348_CPU_ID:
clkdev_add_table(bcm6348_clks, ARRAY_SIZE(bcm6348_clks));
break;
case BCM6358_CPU_ID:
clkdev_add_table(bcm6358_clks, ARRAY_SIZE(bcm6358_clks));
break;
case BCM6362_CPU_ID:
clk_hsspi_pll.rate = HSSPI_PLL_HZ_6362;
clkdev_add_table(bcm6362_clks, ARRAY_SIZE(bcm6362_clks));
break;
case BCM6368_CPU_ID:
clkdev_add_table(bcm6368_clks, ARRAY_SIZE(bcm6368_clks));
break;
}
return 0;
}
arch_initcall(bcm63xx_clk_init);
| linux-master | arch/mips/bcm63xx/clk.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2008 Maxime Bizon <[email protected]>
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <bcm63xx_cpu.h>
static struct resource uart0_resources[] = {
{
/* start & end filled at runtime */
.flags = IORESOURCE_MEM,
},
{
/* start filled at runtime */
.flags = IORESOURCE_IRQ,
},
};
static struct resource uart1_resources[] = {
{
/* start & end filled at runtime */
.flags = IORESOURCE_MEM,
},
{
/* start filled at runtime */
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device bcm63xx_uart_devices[] = {
{
.name = "bcm63xx_uart",
.id = 0,
.num_resources = ARRAY_SIZE(uart0_resources),
.resource = uart0_resources,
},
{
.name = "bcm63xx_uart",
.id = 1,
.num_resources = ARRAY_SIZE(uart1_resources),
.resource = uart1_resources,
}
};
int __init bcm63xx_uart_register(unsigned int id)
{
if (id >= ARRAY_SIZE(bcm63xx_uart_devices))
return -ENODEV;
if (id == 1 && (!BCMCPU_IS_3368() && !BCMCPU_IS_6358() &&
!BCMCPU_IS_6368()))
return -ENODEV;
if (id == 0) {
uart0_resources[0].start = bcm63xx_regset_address(RSET_UART0);
uart0_resources[0].end = uart0_resources[0].start +
RSET_UART_SIZE - 1;
uart0_resources[1].start = bcm63xx_get_irq_number(IRQ_UART0);
}
if (id == 1) {
uart1_resources[0].start = bcm63xx_regset_address(RSET_UART1);
uart1_resources[0].end = uart1_resources[0].start +
RSET_UART_SIZE - 1;
uart1_resources[1].start = bcm63xx_get_irq_number(IRQ_UART1);
}
return platform_device_register(&bcm63xx_uart_devices[id]);
}
| linux-master | arch/mips/bcm63xx/dev-uart.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2008 Maxime Bizon <[email protected]>
* Copyright (C) 2008 Florian Fainelli <[email protected]>
* Copyright (C) 2012 Jonas Gorski <[email protected]>
*/
#define pr_fmt(fmt) "bcm63xx_nvram: " fmt
#include <linux/bcm963xx_nvram.h>
#include <linux/init.h>
#include <linux/crc32.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/if_ether.h>
#include <bcm63xx_nvram.h>
#define BCM63XX_DEFAULT_PSI_SIZE 64
static struct bcm963xx_nvram nvram;
static int mac_addr_used;
void __init bcm63xx_nvram_init(void *addr)
{
u32 crc, expected_crc;
u8 hcs_mac_addr[ETH_ALEN] = { 0x00, 0x10, 0x18, 0xff, 0xff, 0xff };
/* extract nvram data */
memcpy(&nvram, addr, BCM963XX_NVRAM_V5_SIZE);
/* check checksum before using data */
if (bcm963xx_nvram_checksum(&nvram, &expected_crc, &crc))
pr_warn("nvram checksum failed, contents may be invalid (expected %08x, got %08x)\n",
expected_crc, crc);
/* Cable modems have a different NVRAM which is embedded in the eCos
* firmware and not easily extractible, give at least a MAC address
* pool.
*/
if (BCMCPU_IS_3368()) {
memcpy(nvram.mac_addr_base, hcs_mac_addr, ETH_ALEN);
nvram.mac_addr_count = 2;
}
}
u8 *bcm63xx_nvram_get_name(void)
{
return nvram.name;
}
EXPORT_SYMBOL(bcm63xx_nvram_get_name);
int bcm63xx_nvram_get_mac_address(u8 *mac)
{
u8 *oui;
int count;
if (mac_addr_used >= nvram.mac_addr_count) {
pr_err("not enough mac addresses\n");
return -ENODEV;
}
memcpy(mac, nvram.mac_addr_base, ETH_ALEN);
oui = mac + ETH_ALEN/2 - 1;
count = mac_addr_used;
while (count--) {
u8 *p = mac + ETH_ALEN - 1;
do {
(*p)++;
if (*p != 0)
break;
p--;
} while (p != oui);
if (p == oui) {
pr_err("unable to fetch mac address\n");
return -ENODEV;
}
}
mac_addr_used++;
return 0;
}
EXPORT_SYMBOL(bcm63xx_nvram_get_mac_address);
int bcm63xx_nvram_get_psi_size(void)
{
if (nvram.psi_size > 0)
return nvram.psi_size;
return BCM63XX_DEFAULT_PSI_SIZE;
}
EXPORT_SYMBOL(bcm63xx_nvram_get_psi_size);
| linux-master | arch/mips/bcm63xx/nvram.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2008 Maxime Bizon <[email protected]>
*/
#include <linux/init.h>
#include <linux/memblock.h>
#include <linux/smp.h>
#include <asm/bootinfo.h>
#include <asm/bmips.h>
#include <asm/smp-ops.h>
#include <asm/mipsregs.h>
#include <bcm63xx_board.h>
#include <bcm63xx_cpu.h>
#include <bcm63xx_io.h>
#include <bcm63xx_regs.h>
void __init prom_init(void)
{
u32 reg, mask;
bcm63xx_cpu_init();
/* stop any running watchdog */
bcm_wdt_writel(WDT_STOP_1, WDT_CTL_REG);
bcm_wdt_writel(WDT_STOP_2, WDT_CTL_REG);
/* disable all hardware blocks clock for now */
if (BCMCPU_IS_3368())
mask = CKCTL_3368_ALL_SAFE_EN;
else if (BCMCPU_IS_6328())
mask = CKCTL_6328_ALL_SAFE_EN;
else if (BCMCPU_IS_6338())
mask = CKCTL_6338_ALL_SAFE_EN;
else if (BCMCPU_IS_6345())
mask = CKCTL_6345_ALL_SAFE_EN;
else if (BCMCPU_IS_6348())
mask = CKCTL_6348_ALL_SAFE_EN;
else if (BCMCPU_IS_6358())
mask = CKCTL_6358_ALL_SAFE_EN;
else if (BCMCPU_IS_6362())
mask = CKCTL_6362_ALL_SAFE_EN;
else if (BCMCPU_IS_6368())
mask = CKCTL_6368_ALL_SAFE_EN;
else
mask = 0;
reg = bcm_perf_readl(PERF_CKCTL_REG);
reg &= ~mask;
bcm_perf_writel(reg, PERF_CKCTL_REG);
/* do low level board init */
board_prom_init();
/* set up SMP */
if (!register_bmips_smp_ops()) {
/*
* BCM6328 might not have its second CPU enabled, while BCM3368
* and BCM6358 need special handling for their shared TLB, so
* disable SMP for now.
*/
if (BCMCPU_IS_6328()) {
reg = bcm_readl(BCM_6328_OTP_BASE +
OTP_USER_BITS_6328_REG(3));
if (reg & OTP_6328_REG3_TP1_DISABLED)
bmips_smp_enabled = 0;
} else if (BCMCPU_IS_3368() || BCMCPU_IS_6358()) {
bmips_smp_enabled = 0;
}
if (!bmips_smp_enabled)
return;
/*
* The bootloader has set up the CPU1 reset vector at
* 0xa000_0200.
* This conflicts with the special interrupt vector (IV).
* The bootloader has also set up CPU1 to respond to the wrong
* IPI interrupt.
* Here we will start up CPU1 in the background and ask it to
* reconfigure itself then go back to sleep.
*/
memcpy((void *)0xa0000200, bmips_smp_movevec, 0x20);
__sync();
set_c0_cause(C_SW0);
cpumask_set_cpu(1, &bmips_booted_mask);
/*
* FIXME: we really should have some sort of hazard barrier here
*/
}
}
| linux-master | arch/mips/bcm63xx/prom.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2008 Florian Fainelli <[email protected]>
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/platform_data/bcm7038_wdt.h>
#include <bcm63xx_cpu.h>
static struct resource wdt_resources[] = {
{
.start = -1, /* filled at runtime */
.end = -1, /* filled at runtime */
.flags = IORESOURCE_MEM,
},
};
static struct bcm7038_wdt_platform_data bcm63xx_wdt_pdata = {
.clk_name = "periph",
};
static struct platform_device bcm63xx_wdt_device = {
.name = "bcm63xx-wdt",
.id = -1,
.num_resources = ARRAY_SIZE(wdt_resources),
.resource = wdt_resources,
.dev = {
.platform_data = &bcm63xx_wdt_pdata,
},
};
int __init bcm63xx_wdt_register(void)
{
wdt_resources[0].start = bcm63xx_regset_address(RSET_WDT);
wdt_resources[0].end = wdt_resources[0].start;
wdt_resources[0].end += RSET_WDT_SIZE - 1;
return platform_device_register(&bcm63xx_wdt_device);
}
arch_initcall(bcm63xx_wdt_register);
| linux-master | arch/mips/bcm63xx/dev-wdt.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2008 Maxime Bizon <[email protected]>
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/export.h>
#include <bcm63xx_dev_enet.h>
#include <bcm63xx_io.h>
#include <bcm63xx_regs.h>
static const unsigned long bcm6348_regs_enetdmac[] = {
[ENETDMAC_CHANCFG] = ENETDMAC_CHANCFG_REG,
[ENETDMAC_IR] = ENETDMAC_IR_REG,
[ENETDMAC_IRMASK] = ENETDMAC_IRMASK_REG,
[ENETDMAC_MAXBURST] = ENETDMAC_MAXBURST_REG,
};
static const unsigned long bcm6345_regs_enetdmac[] = {
[ENETDMAC_CHANCFG] = ENETDMA_6345_CHANCFG_REG,
[ENETDMAC_IR] = ENETDMA_6345_IR_REG,
[ENETDMAC_IRMASK] = ENETDMA_6345_IRMASK_REG,
[ENETDMAC_MAXBURST] = ENETDMA_6345_MAXBURST_REG,
[ENETDMAC_BUFALLOC] = ENETDMA_6345_BUFALLOC_REG,
[ENETDMAC_RSTART] = ENETDMA_6345_RSTART_REG,
[ENETDMAC_FC] = ENETDMA_6345_FC_REG,
[ENETDMAC_LEN] = ENETDMA_6345_LEN_REG,
};
const unsigned long *bcm63xx_regs_enetdmac;
EXPORT_SYMBOL(bcm63xx_regs_enetdmac);
static __init void bcm63xx_enetdmac_regs_init(void)
{
if (BCMCPU_IS_6345())
bcm63xx_regs_enetdmac = bcm6345_regs_enetdmac;
else
bcm63xx_regs_enetdmac = bcm6348_regs_enetdmac;
}
static struct resource shared_res[] = {
{
.start = -1, /* filled at runtime */
.end = -1, /* filled at runtime */
.flags = IORESOURCE_MEM,
},
{
.start = -1, /* filled at runtime */
.end = -1, /* filled at runtime */
.flags = IORESOURCE_MEM,
},
{
.start = -1, /* filled at runtime */
.end = -1, /* filled at runtime */
.flags = IORESOURCE_MEM,
},
};
static struct platform_device bcm63xx_enet_shared_device = {
.name = "bcm63xx_enet_shared",
.id = 0,
.num_resources = ARRAY_SIZE(shared_res),
.resource = shared_res,
};
static int shared_device_registered;
static u64 enet_dmamask = DMA_BIT_MASK(32);
static struct resource enet0_res[] = {
{
.start = -1, /* filled at runtime */
.end = -1, /* filled at runtime */
.flags = IORESOURCE_MEM,
},
{
.start = -1, /* filled at runtime */
.flags = IORESOURCE_IRQ,
},
{
.start = -1, /* filled at runtime */
.flags = IORESOURCE_IRQ,
},
{
.start = -1, /* filled at runtime */
.flags = IORESOURCE_IRQ,
},
};
static struct bcm63xx_enet_platform_data enet0_pd;
static struct platform_device bcm63xx_enet0_device = {
.name = "bcm63xx_enet",
.id = 0,
.num_resources = ARRAY_SIZE(enet0_res),
.resource = enet0_res,
.dev = {
.platform_data = &enet0_pd,
.dma_mask = &enet_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
},
};
static struct resource enet1_res[] = {
{
.start = -1, /* filled at runtime */
.end = -1, /* filled at runtime */
.flags = IORESOURCE_MEM,
},
{
.start = -1, /* filled at runtime */
.flags = IORESOURCE_IRQ,
},
{
.start = -1, /* filled at runtime */
.flags = IORESOURCE_IRQ,
},
{
.start = -1, /* filled at runtime */
.flags = IORESOURCE_IRQ,
},
};
static struct bcm63xx_enet_platform_data enet1_pd;
static struct platform_device bcm63xx_enet1_device = {
.name = "bcm63xx_enet",
.id = 1,
.num_resources = ARRAY_SIZE(enet1_res),
.resource = enet1_res,
.dev = {
.platform_data = &enet1_pd,
.dma_mask = &enet_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
},
};
static struct resource enetsw_res[] = {
{
/* start & end filled at runtime */
.flags = IORESOURCE_MEM,
},
{
/* start filled at runtime */
.flags = IORESOURCE_IRQ,
},
{
/* start filled at runtime */
.flags = IORESOURCE_IRQ,
},
};
static struct bcm63xx_enetsw_platform_data enetsw_pd;
static struct platform_device bcm63xx_enetsw_device = {
.name = "bcm63xx_enetsw",
.num_resources = ARRAY_SIZE(enetsw_res),
.resource = enetsw_res,
.dev = {
.platform_data = &enetsw_pd,
.dma_mask = &enet_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
},
};
static int __init register_shared(void)
{
int ret, chan_count;
if (shared_device_registered)
return 0;
bcm63xx_enetdmac_regs_init();
shared_res[0].start = bcm63xx_regset_address(RSET_ENETDMA);
shared_res[0].end = shared_res[0].start;
if (BCMCPU_IS_6345())
shared_res[0].end += (RSET_6345_ENETDMA_SIZE) - 1;
else
shared_res[0].end += (RSET_ENETDMA_SIZE) - 1;
if (BCMCPU_IS_6328() || BCMCPU_IS_6362() || BCMCPU_IS_6368())
chan_count = 32;
else if (BCMCPU_IS_6345())
chan_count = 8;
else
chan_count = 16;
shared_res[1].start = bcm63xx_regset_address(RSET_ENETDMAC);
shared_res[1].end = shared_res[1].start;
shared_res[1].end += RSET_ENETDMAC_SIZE(chan_count) - 1;
shared_res[2].start = bcm63xx_regset_address(RSET_ENETDMAS);
shared_res[2].end = shared_res[2].start;
shared_res[2].end += RSET_ENETDMAS_SIZE(chan_count) - 1;
ret = platform_device_register(&bcm63xx_enet_shared_device);
if (ret)
return ret;
shared_device_registered = 1;
return 0;
}
int __init bcm63xx_enet_register(int unit,
const struct bcm63xx_enet_platform_data *pd)
{
struct platform_device *pdev;
struct bcm63xx_enet_platform_data *dpd;
int ret;
if (unit > 1)
return -ENODEV;
if (unit == 1 && (BCMCPU_IS_6338() || BCMCPU_IS_6345()))
return -ENODEV;
ret = register_shared();
if (ret)
return ret;
if (unit == 0) {
enet0_res[0].start = bcm63xx_regset_address(RSET_ENET0);
enet0_res[0].end = enet0_res[0].start;
enet0_res[0].end += RSET_ENET_SIZE - 1;
enet0_res[1].start = bcm63xx_get_irq_number(IRQ_ENET0);
enet0_res[2].start = bcm63xx_get_irq_number(IRQ_ENET0_RXDMA);
enet0_res[3].start = bcm63xx_get_irq_number(IRQ_ENET0_TXDMA);
pdev = &bcm63xx_enet0_device;
} else {
enet1_res[0].start = bcm63xx_regset_address(RSET_ENET1);
enet1_res[0].end = enet1_res[0].start;
enet1_res[0].end += RSET_ENET_SIZE - 1;
enet1_res[1].start = bcm63xx_get_irq_number(IRQ_ENET1);
enet1_res[2].start = bcm63xx_get_irq_number(IRQ_ENET1_RXDMA);
enet1_res[3].start = bcm63xx_get_irq_number(IRQ_ENET1_TXDMA);
pdev = &bcm63xx_enet1_device;
}
/* copy given platform data */
dpd = pdev->dev.platform_data;
memcpy(dpd, pd, sizeof(*pd));
/* adjust them in case internal phy is used */
if (dpd->use_internal_phy) {
/* internal phy only exists for enet0 */
if (unit == 1)
return -ENODEV;
dpd->phy_id = 1;
dpd->has_phy_interrupt = 1;
dpd->phy_interrupt = bcm63xx_get_irq_number(IRQ_ENET_PHY);
}
dpd->dma_chan_en_mask = ENETDMAC_CHANCFG_EN_MASK;
dpd->dma_chan_int_mask = ENETDMAC_IR_PKTDONE_MASK;
if (BCMCPU_IS_6345()) {
dpd->dma_chan_en_mask |= ENETDMAC_CHANCFG_CHAINING_MASK;
dpd->dma_chan_en_mask |= ENETDMAC_CHANCFG_WRAP_EN_MASK;
dpd->dma_chan_en_mask |= ENETDMAC_CHANCFG_FLOWC_EN_MASK;
dpd->dma_chan_int_mask |= ENETDMA_IR_BUFDONE_MASK;
dpd->dma_chan_int_mask |= ENETDMA_IR_NOTOWNER_MASK;
dpd->dma_chan_width = ENETDMA_6345_CHAN_WIDTH;
dpd->dma_desc_shift = ENETDMA_6345_DESC_SHIFT;
} else {
dpd->dma_has_sram = true;
dpd->dma_chan_width = ENETDMA_CHAN_WIDTH;
}
if (unit == 0) {
dpd->rx_chan = 0;
dpd->tx_chan = 1;
} else {
dpd->rx_chan = 2;
dpd->tx_chan = 3;
}
ret = platform_device_register(pdev);
if (ret)
return ret;
return 0;
}
int __init
bcm63xx_enetsw_register(const struct bcm63xx_enetsw_platform_data *pd)
{
int ret;
if (!BCMCPU_IS_6328() && !BCMCPU_IS_6362() && !BCMCPU_IS_6368())
return -ENODEV;
ret = register_shared();
if (ret)
return ret;
enetsw_res[0].start = bcm63xx_regset_address(RSET_ENETSW);
enetsw_res[0].end = enetsw_res[0].start;
enetsw_res[0].end += RSET_ENETSW_SIZE - 1;
enetsw_res[1].start = bcm63xx_get_irq_number(IRQ_ENETSW_RXDMA0);
enetsw_res[2].start = bcm63xx_get_irq_number(IRQ_ENETSW_TXDMA0);
if (!enetsw_res[2].start)
enetsw_res[2].start = -1;
memcpy(bcm63xx_enetsw_device.dev.platform_data, pd, sizeof(*pd));
if (BCMCPU_IS_6328())
enetsw_pd.num_ports = ENETSW_PORTS_6328;
else if (BCMCPU_IS_6362() || BCMCPU_IS_6368())
enetsw_pd.num_ports = ENETSW_PORTS_6368;
enetsw_pd.dma_has_sram = true;
enetsw_pd.dma_chan_width = ENETDMA_CHAN_WIDTH;
enetsw_pd.dma_chan_en_mask = ENETDMAC_CHANCFG_EN_MASK;
enetsw_pd.dma_chan_int_mask = ENETDMAC_IR_PKTDONE_MASK;
ret = platform_device_register(&bcm63xx_enetsw_device);
if (ret)
return ret;
return 0;
}
| linux-master | arch/mips/bcm63xx/dev-enet.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2012 Jonas Gorski <[email protected]>
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <bcm63xx_cpu.h>
#include <bcm63xx_dev_hsspi.h>
#include <bcm63xx_regs.h>
static struct resource spi_resources[] = {
{
.start = -1, /* filled at runtime */
.end = -1, /* filled at runtime */
.flags = IORESOURCE_MEM,
},
{
.start = -1, /* filled at runtime */
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device bcm63xx_hsspi_device = {
.name = "bcm63xx-hsspi",
.id = 0,
.num_resources = ARRAY_SIZE(spi_resources),
.resource = spi_resources,
};
int __init bcm63xx_hsspi_register(void)
{
if (!BCMCPU_IS_6328() && !BCMCPU_IS_6362())
return -ENODEV;
spi_resources[0].start = bcm63xx_regset_address(RSET_HSSPI);
spi_resources[0].end = spi_resources[0].start;
spi_resources[0].end += RSET_HSSPI_SIZE - 1;
spi_resources[1].start = bcm63xx_get_irq_number(IRQ_HSSPI);
return platform_device_register(&bcm63xx_hsspi_device);
}
| linux-master | arch/mips/bcm63xx/dev-hsspi.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2009-2011 Florian Fainelli <[email protected]>
* Copyright (C) 2010 Tanguy Bouzeloc <[email protected]>
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/platform_device.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <bcm63xx_cpu.h>
#include <bcm63xx_dev_spi.h>
#include <bcm63xx_regs.h>
static struct resource spi_resources[] = {
{
.start = -1, /* filled at runtime */
.end = -1, /* filled at runtime */
.flags = IORESOURCE_MEM,
},
{
.start = -1, /* filled at runtime */
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device bcm63xx_spi_device = {
.id = -1,
.num_resources = ARRAY_SIZE(spi_resources),
.resource = spi_resources,
};
int __init bcm63xx_spi_register(void)
{
if (BCMCPU_IS_6328() || BCMCPU_IS_6345())
return -ENODEV;
spi_resources[0].start = bcm63xx_regset_address(RSET_SPI);
spi_resources[0].end = spi_resources[0].start;
spi_resources[1].start = bcm63xx_get_irq_number(IRQ_SPI);
if (BCMCPU_IS_6338() || BCMCPU_IS_6348()) {
bcm63xx_spi_device.name = "bcm6348-spi",
spi_resources[0].end += BCM_6348_RSET_SPI_SIZE - 1;
}
if (BCMCPU_IS_3368() || BCMCPU_IS_6358() || BCMCPU_IS_6362() ||
BCMCPU_IS_6368()) {
bcm63xx_spi_device.name = "bcm6358-spi",
spi_resources[0].end += BCM_6358_RSET_SPI_SIZE - 1;
}
return platform_device_register(&bcm63xx_spi_device);
}
| linux-master | arch/mips/bcm63xx/dev-spi.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2008 Maxime Bizon <[email protected]>
* Copyright (C) 2008 Florian Fainelli <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/platform_device.h>
#include <linux/ssb/ssb.h>
#include <asm/addrspace.h>
#include <bcm63xx_board.h>
#include <bcm63xx_cpu.h>
#include <bcm63xx_dev_uart.h>
#include <bcm63xx_regs.h>
#include <bcm63xx_io.h>
#include <bcm63xx_nvram.h>
#include <bcm63xx_dev_pci.h>
#include <bcm63xx_dev_enet.h>
#include <bcm63xx_dev_flash.h>
#include <bcm63xx_dev_hsspi.h>
#include <bcm63xx_dev_pcmcia.h>
#include <bcm63xx_dev_spi.h>
#include <bcm63xx_dev_usb_usbd.h>
#include <board_bcm963xx.h>
#include <uapi/linux/bcm933xx_hcs.h>
#define HCS_OFFSET_128K 0x20000
static struct board_info board;
/*
* known 3368 boards
*/
#ifdef CONFIG_BCM63XX_CPU_3368
static struct board_info __initdata board_cvg834g = {
.name = "CVG834G_E15R3921",
.expected_cpu_id = 0x3368,
.ephy_reset_gpio = 36,
.ephy_reset_gpio_flags = GPIOF_INIT_HIGH,
.has_pci = 1,
.has_uart0 = 1,
.has_uart1 = 1,
.has_enet0 = 1,
.enet0 = {
.has_phy = 1,
.use_internal_phy = 1,
},
.leds = {
{
.name = "CVG834G:green:power",
.gpio = 37,
.default_trigger= "default-on",
},
},
};
#endif /* CONFIG_BCM63XX_CPU_3368 */
/*
* known 6328 boards
*/
#ifdef CONFIG_BCM63XX_CPU_6328
static struct board_info __initdata board_96328avng = {
.name = "96328avng",
.expected_cpu_id = 0x6328,
.has_pci = 1,
.has_uart0 = 1,
.has_usbd = 0,
.usbd = {
.use_fullspeed = 0,
.port_no = 0,
},
.leds = {
{
.name = "96328avng::ppp-fail",
.gpio = 2,
.active_low = 1,
},
{
.name = "96328avng::power",
.gpio = 4,
.active_low = 1,
.default_trigger = "default-on",
},
{
.name = "96328avng::power-fail",
.gpio = 8,
.active_low = 1,
},
{
.name = "96328avng::wps",
.gpio = 9,
.active_low = 1,
},
{
.name = "96328avng::ppp",
.gpio = 11,
.active_low = 1,
},
},
};
#endif /* CONFIG_BCM63XX_CPU_6328 */
/*
* known 6338 boards
*/
#ifdef CONFIG_BCM63XX_CPU_6338
static struct board_info __initdata board_96338gw = {
.name = "96338GW",
.expected_cpu_id = 0x6338,
.has_ohci0 = 1,
.has_uart0 = 1,
.has_enet0 = 1,
.enet0 = {
.force_speed_100 = 1,
.force_duplex_full = 1,
},
.leds = {
{
.name = "adsl",
.gpio = 3,
.active_low = 1,
},
{
.name = "ses",
.gpio = 5,
.active_low = 1,
},
{
.name = "ppp-fail",
.gpio = 4,
.active_low = 1,
},
{
.name = "power",
.gpio = 0,
.active_low = 1,
.default_trigger = "default-on",
},
{
.name = "stop",
.gpio = 1,
.active_low = 1,
}
},
};
static struct board_info __initdata board_96338w = {
.name = "96338W",
.expected_cpu_id = 0x6338,
.has_uart0 = 1,
.has_enet0 = 1,
.enet0 = {
.force_speed_100 = 1,
.force_duplex_full = 1,
},
.leds = {
{
.name = "adsl",
.gpio = 3,
.active_low = 1,
},
{
.name = "ses",
.gpio = 5,
.active_low = 1,
},
{
.name = "ppp-fail",
.gpio = 4,
.active_low = 1,
},
{
.name = "power",
.gpio = 0,
.active_low = 1,
.default_trigger = "default-on",
},
{
.name = "stop",
.gpio = 1,
.active_low = 1,
},
},
};
#endif /* CONFIG_BCM63XX_CPU_6338 */
/*
* known 6345 boards
*/
#ifdef CONFIG_BCM63XX_CPU_6345
static struct board_info __initdata board_96345gw2 = {
.name = "96345GW2",
.expected_cpu_id = 0x6345,
.has_uart0 = 1,
};
#endif /* CONFIG_BCM63XX_CPU_6345 */
/*
* known 6348 boards
*/
#ifdef CONFIG_BCM63XX_CPU_6348
static struct board_info __initdata board_96348r = {
.name = "96348R",
.expected_cpu_id = 0x6348,
.has_pci = 1,
.has_uart0 = 1,
.has_enet0 = 1,
.enet0 = {
.has_phy = 1,
.use_internal_phy = 1,
},
.leds = {
{
.name = "adsl-fail",
.gpio = 2,
.active_low = 1,
},
{
.name = "ppp",
.gpio = 3,
.active_low = 1,
},
{
.name = "ppp-fail",
.gpio = 4,
.active_low = 1,
},
{
.name = "power",
.gpio = 0,
.active_low = 1,
.default_trigger = "default-on",
},
{
.name = "stop",
.gpio = 1,
.active_low = 1,
},
},
};
static struct board_info __initdata board_96348gw_10 = {
.name = "96348GW-10",
.expected_cpu_id = 0x6348,
.has_ohci0 = 1,
.has_pccard = 1,
.has_pci = 1,
.has_uart0 = 1,
.has_enet0 = 1,
.enet0 = {
.has_phy = 1,
.use_internal_phy = 1,
},
.has_enet1 = 1,
.enet1 = {
.force_speed_100 = 1,
.force_duplex_full = 1,
},
.leds = {
{
.name = "adsl-fail",
.gpio = 2,
.active_low = 1,
},
{
.name = "ppp",
.gpio = 3,
.active_low = 1,
},
{
.name = "ppp-fail",
.gpio = 4,
.active_low = 1,
},
{
.name = "power",
.gpio = 0,
.active_low = 1,
.default_trigger = "default-on",
},
{
.name = "stop",
.gpio = 1,
.active_low = 1,
},
},
};
static struct board_info __initdata board_96348gw_11 = {
.name = "96348GW-11",
.expected_cpu_id = 0x6348,
.has_ohci0 = 1,
.has_pccard = 1,
.has_pci = 1,
.has_uart0 = 1,
.has_enet0 = 1,
.enet0 = {
.has_phy = 1,
.use_internal_phy = 1,
},
.has_enet1 = 1,
.enet1 = {
.force_speed_100 = 1,
.force_duplex_full = 1,
},
.leds = {
{
.name = "adsl-fail",
.gpio = 2,
.active_low = 1,
},
{
.name = "ppp",
.gpio = 3,
.active_low = 1,
},
{
.name = "ppp-fail",
.gpio = 4,
.active_low = 1,
},
{
.name = "power",
.gpio = 0,
.active_low = 1,
.default_trigger = "default-on",
},
{
.name = "stop",
.gpio = 1,
.active_low = 1,
},
},
};
static struct board_info __initdata board_96348gw = {
.name = "96348GW",
.expected_cpu_id = 0x6348,
.has_ohci0 = 1,
.has_pci = 1,
.has_uart0 = 1,
.has_enet0 = 1,
.enet0 = {
.has_phy = 1,
.use_internal_phy = 1,
},
.has_enet1 = 1,
.enet1 = {
.force_speed_100 = 1,
.force_duplex_full = 1,
},
.leds = {
{
.name = "adsl-fail",
.gpio = 2,
.active_low = 1,
},
{
.name = "ppp",
.gpio = 3,
.active_low = 1,
},
{
.name = "ppp-fail",
.gpio = 4,
.active_low = 1,
},
{
.name = "power",
.gpio = 0,
.active_low = 1,
.default_trigger = "default-on",
},
{
.name = "stop",
.gpio = 1,
.active_low = 1,
},
},
};
static struct board_info __initdata board_FAST2404 = {
.name = "F@ST2404",
.expected_cpu_id = 0x6348,
.has_ohci0 = 1,
.has_pccard = 1,
.has_pci = 1,
.has_uart0 = 1,
.has_enet0 = 1,
.enet0 = {
.has_phy = 1,
.use_internal_phy = 1,
},
.has_enet1 = 1,
.enet1 = {
.force_speed_100 = 1,
.force_duplex_full = 1,
},
};
static struct board_info __initdata board_rta1025w_16 = {
.name = "RTA1025W_16",
.expected_cpu_id = 0x6348,
.has_pci = 1,
.has_enet0 = 1,
.enet0 = {
.has_phy = 1,
.use_internal_phy = 1,
},
.has_enet1 = 1,
.enet1 = {
.force_speed_100 = 1,
.force_duplex_full = 1,
},
};
static struct board_info __initdata board_DV201AMR = {
.name = "DV201AMR",
.expected_cpu_id = 0x6348,
.has_ohci0 = 1,
.has_pci = 1,
.has_uart0 = 1,
.has_enet0 = 1,
.enet0 = {
.has_phy = 1,
.use_internal_phy = 1,
},
.has_enet1 = 1,
.enet1 = {
.force_speed_100 = 1,
.force_duplex_full = 1,
},
};
static struct board_info __initdata board_96348gw_a = {
.name = "96348GW-A",
.expected_cpu_id = 0x6348,
.has_ohci0 = 1,
.has_pci = 1,
.has_uart0 = 1,
.has_enet0 = 1,
.enet0 = {
.has_phy = 1,
.use_internal_phy = 1,
},
.has_enet1 = 1,
.enet1 = {
.force_speed_100 = 1,
.force_duplex_full = 1,
},
};
#endif /* CONFIG_BCM63XX_CPU_6348 */
/*
* known 6358 boards
*/
#ifdef CONFIG_BCM63XX_CPU_6358
static struct board_info __initdata board_96358vw = {
.name = "96358VW",
.expected_cpu_id = 0x6358,
.has_ehci0 = 1,
.has_ohci0 = 1,
.has_pccard = 1,
.has_pci = 1,
.has_uart0 = 1,
.has_enet0 = 1,
.enet0 = {
.has_phy = 1,
.use_internal_phy = 1,
},
.has_enet1 = 1,
.enet1 = {
.force_speed_100 = 1,
.force_duplex_full = 1,
},
.leds = {
{
.name = "adsl-fail",
.gpio = 15,
.active_low = 1,
},
{
.name = "ppp",
.gpio = 22,
.active_low = 1,
},
{
.name = "ppp-fail",
.gpio = 23,
.active_low = 1,
},
{
.name = "power",
.gpio = 4,
.default_trigger = "default-on",
},
{
.name = "stop",
.gpio = 5,
},
},
};
static struct board_info __initdata board_96358vw2 = {
.name = "96358VW2",
.expected_cpu_id = 0x6358,
.has_ehci0 = 1,
.has_ohci0 = 1,
.has_pccard = 1,
.has_pci = 1,
.has_uart0 = 1,
.has_enet0 = 1,
.enet0 = {
.has_phy = 1,
.use_internal_phy = 1,
},
.has_enet1 = 1,
.enet1 = {
.force_speed_100 = 1,
.force_duplex_full = 1,
},
.leds = {
{
.name = "adsl",
.gpio = 22,
.active_low = 1,
},
{
.name = "ppp-fail",
.gpio = 23,
},
{
.name = "power",
.gpio = 5,
.active_low = 1,
.default_trigger = "default-on",
},
{
.name = "stop",
.gpio = 4,
.active_low = 1,
},
},
};
static struct board_info __initdata board_AGPFS0 = {
.name = "AGPF-S0",
.expected_cpu_id = 0x6358,
.has_ehci0 = 1,
.has_ohci0 = 1,
.has_pci = 1,
.has_uart0 = 1,
.has_enet0 = 1,
.enet0 = {
.has_phy = 1,
.use_internal_phy = 1,
},
.has_enet1 = 1,
.enet1 = {
.force_speed_100 = 1,
.force_duplex_full = 1,
},
};
static struct board_info __initdata board_DWVS0 = {
.name = "DWV-S0",
.expected_cpu_id = 0x6358,
.has_ehci0 = 1,
.has_ohci0 = 1,
.has_pci = 1,
.has_enet0 = 1,
.enet0 = {
.has_phy = 1,
.use_internal_phy = 1,
},
.has_enet1 = 1,
.enet1 = {
.force_speed_100 = 1,
.force_duplex_full = 1,
},
};
#endif /* CONFIG_BCM63XX_CPU_6358 */
/*
* all boards
*/
static const struct board_info __initconst *bcm963xx_boards[] = {
#ifdef CONFIG_BCM63XX_CPU_3368
&board_cvg834g,
#endif /* CONFIG_BCM63XX_CPU_3368 */
#ifdef CONFIG_BCM63XX_CPU_6328
&board_96328avng,
#endif /* CONFIG_BCM63XX_CPU_6328 */
#ifdef CONFIG_BCM63XX_CPU_6338
&board_96338gw,
&board_96338w,
#endif /* CONFIG_BCM63XX_CPU_6338 */
#ifdef CONFIG_BCM63XX_CPU_6345
&board_96345gw2,
#endif /* CONFIG_BCM63XX_CPU_6345 */
#ifdef CONFIG_BCM63XX_CPU_6348
&board_96348r,
&board_96348gw,
&board_96348gw_10,
&board_96348gw_11,
&board_FAST2404,
&board_DV201AMR,
&board_96348gw_a,
&board_rta1025w_16,
#endif /* CONFIG_BCM63XX_CPU_6348 */
#ifdef CONFIG_BCM63XX_CPU_6358
&board_96358vw,
&board_96358vw2,
&board_AGPFS0,
&board_DWVS0,
#endif /* CONFIG_BCM63XX_CPU_6358 */
};
/*
* Register a sane SPROMv2 to make the on-board
* bcm4318 WLAN work
*/
#ifdef CONFIG_SSB_PCIHOST
static struct ssb_sprom bcm63xx_sprom = {
.revision = 0x02,
.board_rev = 0x17,
.country_code = 0x0,
.ant_available_bg = 0x3,
.pa0b0 = 0x15ae,
.pa0b1 = 0xfa85,
.pa0b2 = 0xfe8d,
.pa1b0 = 0xffff,
.pa1b1 = 0xffff,
.pa1b2 = 0xffff,
.gpio0 = 0xff,
.gpio1 = 0xff,
.gpio2 = 0xff,
.gpio3 = 0xff,
.maxpwr_bg = 0x004c,
.itssi_bg = 0x00,
.boardflags_lo = 0x2848,
.boardflags_hi = 0x0000,
};
int bcm63xx_get_fallback_sprom(struct ssb_bus *bus, struct ssb_sprom *out)
{
if (bus->bustype == SSB_BUSTYPE_PCI) {
memcpy(out, &bcm63xx_sprom, sizeof(struct ssb_sprom));
return 0;
} else {
pr_err("unable to fill SPROM for given bustype\n");
return -EINVAL;
}
}
#endif /* CONFIG_SSB_PCIHOST */
/*
* return board name for /proc/cpuinfo
*/
const char *board_get_name(void)
{
return board.name;
}
/*
* early init callback, read nvram data from flash and checksum it
*/
void __init board_prom_init(void)
{
unsigned int i;
u8 *boot_addr, *cfe;
char cfe_version[32];
char *board_name = NULL;
u32 val;
struct bcm_hcs *hcs;
/* read base address of boot chip select (0)
* 6328/6362 do not have MPI but boot from a fixed address
*/
if (BCMCPU_IS_6328() || BCMCPU_IS_6362()) {
val = 0x18000000;
} else {
val = bcm_mpi_readl(MPI_CSBASE_REG(0));
val &= MPI_CSBASE_BASE_MASK;
}
boot_addr = (u8 *)KSEG1ADDR(val);
/* dump cfe version */
cfe = boot_addr + BCM963XX_CFE_VERSION_OFFSET;
if (strstarts(cfe, "cfe-")) {
if(cfe[4] == 'v') {
if(cfe[5] == 'd')
snprintf(cfe_version, 11, "%s",
(char *) &cfe[5]);
else if (cfe[10] > 0)
snprintf(cfe_version, sizeof(cfe_version),
"%u.%u.%u-%u.%u-%u", cfe[5], cfe[6],
cfe[7], cfe[8], cfe[9], cfe[10]);
else
snprintf(cfe_version, sizeof(cfe_version),
"%u.%u.%u-%u.%u", cfe[5], cfe[6],
cfe[7], cfe[8], cfe[9]);
} else {
snprintf(cfe_version, 12, "%s", (char *) &cfe[4]);
}
} else {
strcpy(cfe_version, "unknown");
}
pr_info("CFE version: %s\n", cfe_version);
bcm63xx_nvram_init(boot_addr + BCM963XX_NVRAM_OFFSET);
if (BCMCPU_IS_3368()) {
hcs = (struct bcm_hcs *)boot_addr;
board_name = hcs->filename;
} else {
board_name = bcm63xx_nvram_get_name();
}
/* find board by name */
for (i = 0; i < ARRAY_SIZE(bcm963xx_boards); i++) {
if (strncmp(board_name, bcm963xx_boards[i]->name, 16))
continue;
/* copy, board desc array is marked initdata */
memcpy(&board, bcm963xx_boards[i], sizeof(board));
break;
}
/* bail out if board is not found, will complain later */
if (!board.name[0]) {
char name[17];
memcpy(name, board_name, 16);
name[16] = 0;
pr_err("unknown bcm963xx board: %s\n", name);
return;
}
/* setup pin multiplexing depending on board enabled device,
* this has to be done this early since PCI init is done
* inside arch_initcall */
val = 0;
#ifdef CONFIG_PCI
if (board.has_pci) {
bcm63xx_pci_enabled = 1;
if (BCMCPU_IS_6348())
val |= GPIO_MODE_6348_G2_PCI;
}
#endif /* CONFIG_PCI */
if (board.has_pccard) {
if (BCMCPU_IS_6348())
val |= GPIO_MODE_6348_G1_MII_PCCARD;
}
if (board.has_enet0 && !board.enet0.use_internal_phy) {
if (BCMCPU_IS_6348())
val |= GPIO_MODE_6348_G3_EXT_MII |
GPIO_MODE_6348_G0_EXT_MII;
}
if (board.has_enet1 && !board.enet1.use_internal_phy) {
if (BCMCPU_IS_6348())
val |= GPIO_MODE_6348_G3_EXT_MII |
GPIO_MODE_6348_G0_EXT_MII;
}
bcm_gpio_writel(val, GPIO_MODE_REG);
}
/*
* second stage init callback, good time to panic if we couldn't
* identify on which board we're running since early printk is working
*/
void __init board_setup(void)
{
if (!board.name[0])
panic("unable to detect bcm963xx board");
pr_info("board name: %s\n", board.name);
/* make sure we're running on expected cpu */
if (bcm63xx_get_cpu_id() != board.expected_cpu_id)
panic("unexpected CPU for bcm963xx board");
}
static struct gpio_led_platform_data bcm63xx_led_data;
static struct platform_device bcm63xx_gpio_leds = {
.name = "leds-gpio",
.id = 0,
.dev.platform_data = &bcm63xx_led_data,
};
/*
* third stage init callback, register all board devices.
*/
int __init board_register_devices(void)
{
if (board.has_uart0)
bcm63xx_uart_register(0);
if (board.has_uart1)
bcm63xx_uart_register(1);
if (board.has_pccard)
bcm63xx_pcmcia_register();
if (board.has_enet0 &&
!bcm63xx_nvram_get_mac_address(board.enet0.mac_addr))
bcm63xx_enet_register(0, &board.enet0);
if (board.has_enet1 &&
!bcm63xx_nvram_get_mac_address(board.enet1.mac_addr))
bcm63xx_enet_register(1, &board.enet1);
if (board.has_enetsw &&
!bcm63xx_nvram_get_mac_address(board.enetsw.mac_addr))
bcm63xx_enetsw_register(&board.enetsw);
if (board.has_usbd)
bcm63xx_usbd_register(&board.usbd);
/* Generate MAC address for WLAN and register our SPROM,
* do this after registering enet devices
*/
#ifdef CONFIG_SSB_PCIHOST
if (!bcm63xx_nvram_get_mac_address(bcm63xx_sprom.il0mac)) {
memcpy(bcm63xx_sprom.et0mac, bcm63xx_sprom.il0mac, ETH_ALEN);
memcpy(bcm63xx_sprom.et1mac, bcm63xx_sprom.il0mac, ETH_ALEN);
if (ssb_arch_register_fallback_sprom(
&bcm63xx_get_fallback_sprom) < 0)
pr_err("failed to register fallback SPROM\n");
}
#endif /* CONFIG_SSB_PCIHOST */
bcm63xx_spi_register();
bcm63xx_hsspi_register();
bcm63xx_flash_register();
bcm63xx_led_data.num_leds = ARRAY_SIZE(board.leds);
bcm63xx_led_data.leds = board.leds;
platform_device_register(&bcm63xx_gpio_leds);
if (board.ephy_reset_gpio && board.ephy_reset_gpio_flags)
gpio_request_one(board.ephy_reset_gpio,
board.ephy_reset_gpio_flags, "ephy-reset");
return 0;
}
| linux-master | arch/mips/bcm63xx/boards/board_bcm963xx.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Code for replacing ftrace calls with jumps.
*
* Copyright (C) 2007-2008 Steven Rostedt <[email protected]>
* Copyright (C) 2009, 2010 DSLab, Lanzhou University, China
* Author: Wu Zhangjin <[email protected]>
*
* Thanks goes to Steven Rostedt for writing the original x86 version.
*/
#include <linux/uaccess.h>
#include <linux/init.h>
#include <linux/ftrace.h>
#include <linux/syscalls.h>
#include <asm/asm.h>
#include <asm/asm-offsets.h>
#include <asm/cacheflush.h>
#include <asm/syscall.h>
#include <asm/uasm.h>
#include <asm/unistd.h>
#include <asm-generic/sections.h>
#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
#define MCOUNT_OFFSET_INSNS 5
#else
#define MCOUNT_OFFSET_INSNS 4
#endif
#ifdef CONFIG_DYNAMIC_FTRACE
/* Arch override because MIPS doesn't need to run this from stop_machine() */
void arch_ftrace_update_code(int command)
{
ftrace_modify_all_code(command);
}
#define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */
#define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */
#define JUMP_RANGE_MASK ((1UL << 28) - 1)
#define INSN_NOP 0x00000000 /* nop */
#define INSN_JAL(addr) \
((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK)))
static unsigned int insn_jal_ftrace_caller __read_mostly;
static unsigned int insn_la_mcount[2] __read_mostly;
static unsigned int insn_j_ftrace_graph_caller __maybe_unused __read_mostly;
static inline void ftrace_dyn_arch_init_insns(void)
{
u32 *buf;
unsigned int v1;
/* la v1, _mcount */
v1 = 3;
buf = (u32 *)&insn_la_mcount[0];
UASM_i_LA(&buf, v1, MCOUNT_ADDR);
/* jal (ftrace_caller + 8), jump over the first two instruction */
buf = (u32 *)&insn_jal_ftrace_caller;
uasm_i_jal(&buf, (FTRACE_ADDR + 8) & JUMP_RANGE_MASK);
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/* j ftrace_graph_caller */
buf = (u32 *)&insn_j_ftrace_graph_caller;
uasm_i_j(&buf, (unsigned long)ftrace_graph_caller & JUMP_RANGE_MASK);
#endif
}
static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
{
int faulted;
/* *(unsigned int *)ip = new_code; */
safe_store_code(new_code, ip, faulted);
if (unlikely(faulted))
return -EFAULT;
flush_icache_range(ip, ip + 8);
return 0;
}
#ifndef CONFIG_64BIT
static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1,
unsigned int new_code2)
{
int faulted;
safe_store_code(new_code1, ip, faulted);
if (unlikely(faulted))
return -EFAULT;
ip += 4;
safe_store_code(new_code2, ip, faulted);
if (unlikely(faulted))
return -EFAULT;
ip -= 4;
flush_icache_range(ip, ip + 8);
return 0;
}
static int ftrace_modify_code_2r(unsigned long ip, unsigned int new_code1,
unsigned int new_code2)
{
int faulted;
ip += 4;
safe_store_code(new_code2, ip, faulted);
if (unlikely(faulted))
return -EFAULT;
ip -= 4;
safe_store_code(new_code1, ip, faulted);
if (unlikely(faulted))
return -EFAULT;
flush_icache_range(ip, ip + 8);
return 0;
}
#endif
/*
* The details about the calling site of mcount on MIPS
*
* 1. For kernel:
*
* move at, ra
* jal _mcount --> nop
* sub sp, sp, 8 --> nop (CONFIG_32BIT)
*
* 2. For modules:
*
* 2.1 For KBUILD_MCOUNT_RA_ADDRESS and CONFIG_32BIT
*
* lui v1, hi_16bit_of_mcount --> b 1f (0x10000005)
* addiu v1, v1, low_16bit_of_mcount --> nop (CONFIG_32BIT)
* move at, ra
* move $12, ra_address
* jalr v1
* sub sp, sp, 8
* 1: offset = 5 instructions
* 2.2 For the Other situations
*
* lui v1, hi_16bit_of_mcount --> b 1f (0x10000004)
* addiu v1, v1, low_16bit_of_mcount --> nop (CONFIG_32BIT)
* move at, ra
* jalr v1
* nop | move $12, ra_address | sub sp, sp, 8
* 1: offset = 4 instructions
*/
#define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS)
int ftrace_make_nop(struct module *mod,
struct dyn_ftrace *rec, unsigned long addr)
{
unsigned int new;
unsigned long ip = rec->ip;
/*
* If ip is in kernel space, no long call, otherwise, long call is
* needed.
*/
new = core_kernel_text(ip) ? INSN_NOP : INSN_B_1F;
#ifdef CONFIG_64BIT
return ftrace_modify_code(ip, new);
#else
/*
* On 32 bit MIPS platforms, gcc adds a stack adjust
* instruction in the delay slot after the branch to
* mcount and expects mcount to restore the sp on return.
* This is based on a legacy API and does nothing but
* waste instructions so it's being removed at runtime.
*/
return ftrace_modify_code_2(ip, new, INSN_NOP);
#endif
}
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
unsigned int new;
unsigned long ip = rec->ip;
new = core_kernel_text(ip) ? insn_jal_ftrace_caller : insn_la_mcount[0];
#ifdef CONFIG_64BIT
return ftrace_modify_code(ip, new);
#else
return ftrace_modify_code_2r(ip, new, core_kernel_text(ip) ?
INSN_NOP : insn_la_mcount[1]);
#endif
}
#define FTRACE_CALL_IP ((unsigned long)(&ftrace_call))
int ftrace_update_ftrace_func(ftrace_func_t func)
{
unsigned int new;
new = INSN_JAL((unsigned long)func);
return ftrace_modify_code(FTRACE_CALL_IP, new);
}
int __init ftrace_dyn_arch_init(void)
{
/* Encode the instructions when booting */
ftrace_dyn_arch_init_insns();
/* Remove "b ftrace_stub" to ensure ftrace_caller() is executed */
ftrace_modify_code(MCOUNT_ADDR, INSN_NOP);
return 0;
}
#endif /* CONFIG_DYNAMIC_FTRACE */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
#ifdef CONFIG_DYNAMIC_FTRACE
extern void ftrace_graph_call(void);
#define FTRACE_GRAPH_CALL_IP ((unsigned long)(&ftrace_graph_call))
int ftrace_enable_ftrace_graph_caller(void)
{
return ftrace_modify_code(FTRACE_GRAPH_CALL_IP,
insn_j_ftrace_graph_caller);
}
int ftrace_disable_ftrace_graph_caller(void)
{
return ftrace_modify_code(FTRACE_GRAPH_CALL_IP, INSN_NOP);
}
#endif /* CONFIG_DYNAMIC_FTRACE */
#ifndef KBUILD_MCOUNT_RA_ADDRESS
#define S_RA_SP (0xafbf << 16) /* s{d,w} ra, offset(sp) */
#define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */
#define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */
unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long
old_parent_ra, unsigned long parent_ra_addr, unsigned long fp)
{
unsigned long sp, ip, tmp;
unsigned int code;
int faulted;
/*
* For module, move the ip from the return address after the
* instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for
* kernel, move after the instruction "move ra, at"(offset is 16)
*/
ip = self_ra - (core_kernel_text(self_ra) ? 16 : 24);
/*
* search the text until finding the non-store instruction or "s{d,w}
* ra, offset(sp)" instruction
*/
do {
/* get the code at "ip": code = *(unsigned int *)ip; */
safe_load_code(code, ip, faulted);
if (unlikely(faulted))
return 0;
/*
* If we hit the non-store instruction before finding where the
* ra is stored, then this is a leaf function and it does not
* store the ra on the stack
*/
if ((code & S_R_SP) != S_R_SP)
return parent_ra_addr;
/* Move to the next instruction */
ip -= 4;
} while ((code & S_RA_SP) != S_RA_SP);
sp = fp + (code & OFFSET_MASK);
/* tmp = *(unsigned long *)sp; */
safe_load_stack(tmp, sp, faulted);
if (unlikely(faulted))
return 0;
if (tmp == old_parent_ra)
return sp;
return 0;
}
#endif /* !KBUILD_MCOUNT_RA_ADDRESS */
/*
* Hook the return address and push it in the stack of return addrs
* in current thread info.
*/
void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra,
unsigned long fp)
{
unsigned long old_parent_ra;
unsigned long return_hooker = (unsigned long)
&return_to_handler;
int faulted, insns;
if (unlikely(ftrace_graph_is_dead()))
return;
if (unlikely(atomic_read(¤t->tracing_graph_pause)))
return;
/*
* "parent_ra_addr" is the stack address where the return address of
* the caller of _mcount is saved.
*
* If gcc < 4.5, a leaf function does not save the return address
* in the stack address, so we "emulate" one in _mcount's stack space,
* and hijack it directly.
* For a non-leaf function, it does save the return address to its own
* stack space, so we can not hijack it directly, but need to find the
* real stack address, which is done by ftrace_get_parent_addr().
*
* If gcc >= 4.5, with the new -mmcount-ra-address option, for a
* non-leaf function, the location of the return address will be saved
* to $12 for us.
* For a leaf function, it just puts a zero into $12, so we handle
* it in ftrace_graph_caller() of mcount.S.
*/
/* old_parent_ra = *parent_ra_addr; */
safe_load_stack(old_parent_ra, parent_ra_addr, faulted);
if (unlikely(faulted))
goto out;
#ifndef KBUILD_MCOUNT_RA_ADDRESS
parent_ra_addr = (unsigned long *)ftrace_get_parent_ra_addr(self_ra,
old_parent_ra, (unsigned long)parent_ra_addr, fp);
/*
* If fails when getting the stack address of the non-leaf function's
* ra, stop function graph tracer and return
*/
if (parent_ra_addr == NULL)
goto out;
#endif
/* *parent_ra_addr = return_hooker; */
safe_store_stack(return_hooker, parent_ra_addr, faulted);
if (unlikely(faulted))
goto out;
/*
* Get the recorded ip of the current mcount calling site in the
* __mcount_loc section, which will be used to filter the function
* entries configured through the tracing/set_graph_function interface.
*/
insns = core_kernel_text(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1;
self_ra -= (MCOUNT_INSN_SIZE * insns);
if (function_graph_enter(old_parent_ra, self_ra, fp, NULL))
*parent_ra_addr = old_parent_ra;
return;
out:
ftrace_graph_stop();
WARN_ON(1);
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
#ifdef CONFIG_FTRACE_SYSCALLS
#ifdef CONFIG_32BIT
unsigned long __init arch_syscall_addr(int nr)
{
return (unsigned long)sys_call_table[nr - __NR_O32_Linux];
}
#endif
#ifdef CONFIG_64BIT
unsigned long __init arch_syscall_addr(int nr)
{
#ifdef CONFIG_MIPS32_N32
if (nr >= __NR_N32_Linux && nr < __NR_N32_Linux + __NR_N32_Linux_syscalls)
return (unsigned long)sysn32_call_table[nr - __NR_N32_Linux];
#endif
if (nr >= __NR_64_Linux && nr < __NR_64_Linux + __NR_64_Linux_syscalls)
return (unsigned long)sys_call_table[nr - __NR_64_Linux];
#ifdef CONFIG_MIPS32_O32
if (nr >= __NR_O32_Linux && nr < __NR_O32_Linux + __NR_O32_Linux_syscalls)
return (unsigned long)sys32_call_table[nr - __NR_O32_Linux];
#endif
return (unsigned long) &sys_ni_syscall;
}
#endif
#endif /* CONFIG_FTRACE_SYSCALLS */
| linux-master | arch/mips/kernel/ftrace.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2007 by Ralf Baechle
*/
#include <linux/clocksource.h>
#include <linux/cpufreq.h>
#include <linux/init.h>
#include <linux/sched_clock.h>
#include <asm/time.h>
static u64 c0_hpt_read(struct clocksource *cs)
{
return read_c0_count();
}
static struct clocksource clocksource_mips = {
.name = "MIPS",
.read = c0_hpt_read,
.mask = CLOCKSOURCE_MASK(32),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static u64 __maybe_unused notrace r4k_read_sched_clock(void)
{
return read_c0_count();
}
static inline unsigned int rdhwr_count(void)
{
unsigned int count;
__asm__ __volatile__(
" .set push\n"
" .set mips32r2\n"
" rdhwr %0, $2\n"
" .set pop\n"
: "=r" (count));
return count;
}
static bool rdhwr_count_usable(void)
{
unsigned int prev, curr, i;
/*
* Older QEMUs have a broken implementation of RDHWR for the CP0 count
* which always returns a constant value. Try to identify this and don't
* use it in the VDSO if it is broken. This workaround can be removed
* once the fix has been in QEMU stable for a reasonable amount of time.
*/
for (i = 0, prev = rdhwr_count(); i < 100; i++) {
curr = rdhwr_count();
if (curr != prev)
return true;
prev = curr;
}
pr_warn("Not using R4K clocksource in VDSO due to broken RDHWR\n");
return false;
}
#ifdef CONFIG_CPU_FREQ
static bool __read_mostly r4k_clock_unstable;
static void r4k_clocksource_unstable(char *reason)
{
if (r4k_clock_unstable)
return;
r4k_clock_unstable = true;
pr_info("R4K timer is unstable due to %s\n", reason);
clocksource_mark_unstable(&clocksource_mips);
}
static int r4k_cpufreq_callback(struct notifier_block *nb,
unsigned long val, void *data)
{
if (val == CPUFREQ_POSTCHANGE)
r4k_clocksource_unstable("CPU frequency change");
return 0;
}
static struct notifier_block r4k_cpufreq_notifier = {
.notifier_call = r4k_cpufreq_callback,
};
static int __init r4k_register_cpufreq_notifier(void)
{
return cpufreq_register_notifier(&r4k_cpufreq_notifier,
CPUFREQ_TRANSITION_NOTIFIER);
}
core_initcall(r4k_register_cpufreq_notifier);
#endif /* !CONFIG_CPU_FREQ */
int __init init_r4k_clocksource(void)
{
if (!cpu_has_counter || !mips_hpt_frequency)
return -ENXIO;
/* Calculate a somewhat reasonable rating value */
clocksource_mips.rating = 200 + mips_hpt_frequency / 10000000;
/*
* R2 onwards makes the count accessible to user mode so it can be used
* by the VDSO (HWREna is configured by configure_hwrena()).
*/
if (cpu_has_mips_r2_r6 && rdhwr_count_usable())
clocksource_mips.vdso_clock_mode = VDSO_CLOCKMODE_R4K;
clocksource_register_hz(&clocksource_mips, mips_hpt_frequency);
#ifndef CONFIG_CPU_FREQ
sched_clock_register(r4k_read_sched_clock, 32, mips_hpt_frequency);
#endif
return 0;
}
| linux-master | arch/mips/kernel/csrc-r4k.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2002, 2003, 06, 07 Ralf Baechle ([email protected])
* Copyright (C) 2007 MIPS Technologies, Inc.
* written by Ralf Baechle ([email protected])
*/
#include <linux/kernel.h>
#include <linux/console.h>
#include <linux/printk.h>
#include <linux/init.h>
#include <asm/setup.h>
static void early_console_write(struct console *con, const char *s, unsigned n)
{
while (n-- && *s) {
if (*s == '\n')
prom_putchar('\r');
prom_putchar(*s);
s++;
}
}
static struct console early_console_prom = {
.name = "early",
.write = early_console_write,
.flags = CON_PRINTBUFFER | CON_BOOT,
.index = -1
};
void __init setup_early_printk(void)
{
if (early_console)
return;
early_console = &early_console_prom;
register_console(&early_console_prom);
}
| linux-master | arch/mips/kernel/early_printk.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2000,2001,2004 Broadcom Corporation
*/
#include <linux/clocksource.h>
#include <linux/sched_clock.h>
#include <asm/addrspace.h>
#include <asm/io.h>
#include <asm/time.h>
#include <asm/sibyte/bcm1480_regs.h>
#include <asm/sibyte/sb1250_regs.h>
#include <asm/sibyte/bcm1480_int.h>
#include <asm/sibyte/bcm1480_scd.h>
#include <asm/sibyte/sb1250.h>
static u64 bcm1480_hpt_read(struct clocksource *cs)
{
return (u64) __raw_readq(IOADDR(A_SCD_ZBBUS_CYCLE_COUNT));
}
struct clocksource bcm1480_clocksource = {
.name = "zbbus-cycles",
.rating = 200,
.read = bcm1480_hpt_read,
.mask = CLOCKSOURCE_MASK(64),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static u64 notrace sb1480_read_sched_clock(void)
{
return __raw_readq(IOADDR(A_SCD_ZBBUS_CYCLE_COUNT));
}
void __init sb1480_clocksource_init(void)
{
struct clocksource *cs = &bcm1480_clocksource;
unsigned int plldiv;
unsigned long zbbus;
plldiv = G_BCM1480_SYS_PLL_DIV(__raw_readq(IOADDR(A_SCD_SYSTEM_CFG)));
zbbus = ((plldiv >> 1) * 50000000) + ((plldiv & 1) * 25000000);
clocksource_register_hz(cs, zbbus);
sched_clock_register(sb1480_read_sched_clock, 64, zbbus);
}
| linux-master | arch/mips/kernel/csrc-bcm1480.c |
// SPDX-License-Identifier: GPL-2.0
/*
* i8253.c 8253/PIT functions
*
*/
#include <linux/clockchips.h>
#include <linux/i8253.h>
#include <linux/export.h>
#include <linux/smp.h>
#include <linux/irq.h>
#include <asm/time.h>
static irqreturn_t timer_interrupt(int irq, void *dev_id)
{
i8253_clockevent.event_handler(&i8253_clockevent);
return IRQ_HANDLED;
}
void __init setup_pit_timer(void)
{
unsigned long flags = IRQF_NOBALANCING | IRQF_TIMER;
clockevent_i8253_init(true);
if (request_irq(0, timer_interrupt, flags, "timer", NULL))
pr_err("Failed to request irq 0 (timer)\n");
}
static int __init init_pit_clocksource(void)
{
if (num_possible_cpus() > 1 || /* PIT does not scale! */
!clockevent_state_periodic(&i8253_clockevent))
return 0;
return clocksource_i8253_init();
}
arch_initcall(init_pit_clocksource);
| linux-master | arch/mips/kernel/i8253.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2013 Imagination Technologies
* Author: Paul Burton <[email protected]>
*/
#include <linux/bitfield.h>
#include <linux/errno.h>
#include <linux/percpu.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/spinlock.h>
#include <asm/mips-cps.h>
void __iomem *mips_cpc_base;
static DEFINE_PER_CPU_ALIGNED(spinlock_t, cpc_core_lock);
static DEFINE_PER_CPU_ALIGNED(unsigned long, cpc_core_lock_flags);
phys_addr_t __weak mips_cpc_default_phys_base(void)
{
struct device_node *cpc_node;
struct resource res;
int err;
cpc_node = of_find_compatible_node(of_root, NULL, "mti,mips-cpc");
if (cpc_node) {
err = of_address_to_resource(cpc_node, 0, &res);
of_node_put(cpc_node);
if (!err)
return res.start;
}
return 0;
}
/**
* mips_cpc_phys_base - retrieve the physical base address of the CPC
*
* This function returns the physical base address of the Cluster Power
* Controller memory mapped registers, or 0 if no Cluster Power Controller
* is present.
*/
static phys_addr_t mips_cpc_phys_base(void)
{
unsigned long cpc_base;
if (!mips_cm_present())
return 0;
if (!(read_gcr_cpc_status() & CM_GCR_CPC_STATUS_EX))
return 0;
/* If the CPC is already enabled, leave it so */
cpc_base = read_gcr_cpc_base();
if (cpc_base & CM_GCR_CPC_BASE_CPCEN)
return cpc_base & CM_GCR_CPC_BASE_CPCBASE;
/* Otherwise, use the default address */
cpc_base = mips_cpc_default_phys_base();
if (!cpc_base)
return cpc_base;
/* Enable the CPC, mapped at the default address */
write_gcr_cpc_base(cpc_base | CM_GCR_CPC_BASE_CPCEN);
return cpc_base;
}
int mips_cpc_probe(void)
{
phys_addr_t addr;
unsigned int cpu;
for_each_possible_cpu(cpu)
spin_lock_init(&per_cpu(cpc_core_lock, cpu));
addr = mips_cpc_phys_base();
if (!addr)
return -ENODEV;
mips_cpc_base = ioremap(addr, 0x8000);
if (!mips_cpc_base)
return -ENXIO;
return 0;
}
void mips_cpc_lock_other(unsigned int core)
{
unsigned int curr_core;
if (mips_cm_revision() >= CM_REV_CM3)
/* Systems with CM >= 3 lock the CPC via mips_cm_lock_other */
return;
preempt_disable();
curr_core = cpu_core(¤t_cpu_data);
spin_lock_irqsave(&per_cpu(cpc_core_lock, curr_core),
per_cpu(cpc_core_lock_flags, curr_core));
write_cpc_cl_other(FIELD_PREP(CPC_Cx_OTHER_CORENUM, core));
/*
* Ensure the core-other region reflects the appropriate core &
* VP before any accesses to it occur.
*/
mb();
}
void mips_cpc_unlock_other(void)
{
unsigned int curr_core;
if (mips_cm_revision() >= CM_REV_CM3)
/* Systems with CM >= 3 lock the CPC via mips_cm_lock_other */
return;
curr_core = cpu_core(¤t_cpu_data);
spin_unlock_irqrestore(&per_cpu(cpc_core_lock, curr_core),
per_cpu(cpc_core_lock_flags, curr_core));
preempt_enable();
}
| linux-master | arch/mips/kernel/mips-cpc.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2000,2001,2004 Broadcom Corporation
*/
#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/percpu.h>
#include <linux/smp.h>
#include <linux/irq.h>
#include <asm/addrspace.h>
#include <asm/io.h>
#include <asm/time.h>
#include <asm/sibyte/bcm1480_regs.h>
#include <asm/sibyte/sb1250_regs.h>
#include <asm/sibyte/bcm1480_int.h>
#include <asm/sibyte/bcm1480_scd.h>
#include <asm/sibyte/sb1250.h>
#define IMR_IP2_VAL K_BCM1480_INT_MAP_I0
#define IMR_IP3_VAL K_BCM1480_INT_MAP_I1
#define IMR_IP4_VAL K_BCM1480_INT_MAP_I2
/*
* The general purpose timer ticks at 1MHz independent if
* the rest of the system
*/
static int sibyte_set_periodic(struct clock_event_device *evt)
{
unsigned int cpu = smp_processor_id();
void __iomem *cfg, *init;
cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT));
__raw_writeq(0, cfg);
__raw_writeq((V_SCD_TIMER_FREQ / HZ) - 1, init);
__raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS, cfg);
return 0;
}
static int sibyte_shutdown(struct clock_event_device *evt)
{
unsigned int cpu = smp_processor_id();
void __iomem *cfg;
cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
/* Stop the timer until we actually program a shot */
__raw_writeq(0, cfg);
return 0;
}
static int sibyte_next_event(unsigned long delta, struct clock_event_device *cd)
{
unsigned int cpu = smp_processor_id();
void __iomem *cfg, *init;
cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT));
__raw_writeq(0, cfg);
__raw_writeq(delta - 1, init);
__raw_writeq(M_SCD_TIMER_ENABLE, cfg);
return 0;
}
static irqreturn_t sibyte_counter_handler(int irq, void *dev_id)
{
unsigned int cpu = smp_processor_id();
struct clock_event_device *cd = dev_id;
void __iomem *cfg;
unsigned long tmode;
if (clockevent_state_periodic(cd))
tmode = M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS;
else
tmode = 0;
/* ACK interrupt */
cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
____raw_writeq(tmode, cfg);
cd->event_handler(cd);
return IRQ_HANDLED;
}
static DEFINE_PER_CPU(struct clock_event_device, sibyte_hpt_clockevent);
static DEFINE_PER_CPU(char [18], sibyte_hpt_name);
void sb1480_clockevent_init(void)
{
unsigned int cpu = smp_processor_id();
unsigned int irq = K_BCM1480_INT_TIMER_0 + cpu;
struct clock_event_device *cd = &per_cpu(sibyte_hpt_clockevent, cpu);
unsigned char *name = per_cpu(sibyte_hpt_name, cpu);
unsigned long flags = IRQF_PERCPU | IRQF_TIMER;
BUG_ON(cpu > 3); /* Only have 4 general purpose timers */
sprintf(name, "bcm1480-counter-%d", cpu);
cd->name = name;
cd->features = CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_ONESHOT;
clockevent_set_clock(cd, V_SCD_TIMER_FREQ);
cd->max_delta_ns = clockevent_delta2ns(0x7fffff, cd);
cd->max_delta_ticks = 0x7fffff;
cd->min_delta_ns = clockevent_delta2ns(2, cd);
cd->min_delta_ticks = 2;
cd->rating = 200;
cd->irq = irq;
cd->cpumask = cpumask_of(cpu);
cd->set_next_event = sibyte_next_event;
cd->set_state_shutdown = sibyte_shutdown;
cd->set_state_periodic = sibyte_set_periodic;
cd->set_state_oneshot = sibyte_shutdown;
clockevents_register_device(cd);
bcm1480_mask_irq(cpu, irq);
/*
* Map the timer interrupt to IP[4] of this cpu
*/
__raw_writeq(IMR_IP4_VAL,
IOADDR(A_BCM1480_IMR_REGISTER(cpu,
R_BCM1480_IMR_INTERRUPT_MAP_BASE_H) + (irq << 3)));
bcm1480_unmask_irq(cpu, irq);
irq_set_affinity(irq, cpumask_of(cpu));
if (request_irq(irq, sibyte_counter_handler, flags, name, cd))
pr_err("Failed to request irq %d (%s)\n", irq, name);
}
| linux-master | arch/mips/kernel/cevt-bcm1480.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Based on linux/arch/mips/kernel/cevt-r4k.c,
* linux/arch/mips/jmr3927/rbhma3100/setup.c
*
* Copyright 2001 MontaVista Software Inc.
* Copyright (C) 2000-2001 Toshiba Corporation
* Copyright (C) 2007 MIPS Technologies, Inc.
* Copyright (C) 2007 Ralf Baechle <[email protected]>
*/
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/sched_clock.h>
#include <asm/time.h>
#include <asm/txx9tmr.h>
#define TCR_BASE (TXx9_TMTCR_CCDE | TXx9_TMTCR_CRE | TXx9_TMTCR_TMODE_ITVL)
#define TIMER_CCD 0 /* 1/2 */
#define TIMER_CLK(imclk) ((imclk) / (2 << TIMER_CCD))
struct txx9_clocksource {
struct clocksource cs;
struct txx9_tmr_reg __iomem *tmrptr;
};
static u64 txx9_cs_read(struct clocksource *cs)
{
struct txx9_clocksource *txx9_cs =
container_of(cs, struct txx9_clocksource, cs);
return __raw_readl(&txx9_cs->tmrptr->trr);
}
/* Use 1 bit smaller width to use full bits in that width */
#define TXX9_CLOCKSOURCE_BITS (TXX9_TIMER_BITS - 1)
static struct txx9_clocksource txx9_clocksource = {
.cs = {
.name = "TXx9",
.rating = 200,
.read = txx9_cs_read,
.mask = CLOCKSOURCE_MASK(TXX9_CLOCKSOURCE_BITS),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
},
};
static u64 notrace txx9_read_sched_clock(void)
{
return __raw_readl(&txx9_clocksource.tmrptr->trr);
}
void __init txx9_clocksource_init(unsigned long baseaddr,
unsigned int imbusclk)
{
struct txx9_tmr_reg __iomem *tmrptr;
clocksource_register_hz(&txx9_clocksource.cs, TIMER_CLK(imbusclk));
tmrptr = ioremap(baseaddr, sizeof(struct txx9_tmr_reg));
__raw_writel(TCR_BASE, &tmrptr->tcr);
__raw_writel(0, &tmrptr->tisr);
__raw_writel(TIMER_CCD, &tmrptr->ccdr);
__raw_writel(TXx9_TMITMR_TZCE, &tmrptr->itmr);
__raw_writel(1 << TXX9_CLOCKSOURCE_BITS, &tmrptr->cpra);
__raw_writel(TCR_BASE | TXx9_TMTCR_TCE, &tmrptr->tcr);
txx9_clocksource.tmrptr = tmrptr;
sched_clock_register(txx9_read_sched_clock, TXX9_CLOCKSOURCE_BITS,
TIMER_CLK(imbusclk));
}
struct txx9_clock_event_device {
struct clock_event_device cd;
struct txx9_tmr_reg __iomem *tmrptr;
};
static void txx9tmr_stop_and_clear(struct txx9_tmr_reg __iomem *tmrptr)
{
/* stop and reset counter */
__raw_writel(TCR_BASE, &tmrptr->tcr);
/* clear pending interrupt */
__raw_writel(0, &tmrptr->tisr);
}
static int txx9tmr_set_state_periodic(struct clock_event_device *evt)
{
struct txx9_clock_event_device *txx9_cd =
container_of(evt, struct txx9_clock_event_device, cd);
struct txx9_tmr_reg __iomem *tmrptr = txx9_cd->tmrptr;
txx9tmr_stop_and_clear(tmrptr);
__raw_writel(TXx9_TMITMR_TIIE | TXx9_TMITMR_TZCE, &tmrptr->itmr);
/* start timer */
__raw_writel(((u64)(NSEC_PER_SEC / HZ) * evt->mult) >> evt->shift,
&tmrptr->cpra);
__raw_writel(TCR_BASE | TXx9_TMTCR_TCE, &tmrptr->tcr);
return 0;
}
static int txx9tmr_set_state_oneshot(struct clock_event_device *evt)
{
struct txx9_clock_event_device *txx9_cd =
container_of(evt, struct txx9_clock_event_device, cd);
struct txx9_tmr_reg __iomem *tmrptr = txx9_cd->tmrptr;
txx9tmr_stop_and_clear(tmrptr);
__raw_writel(TXx9_TMITMR_TIIE, &tmrptr->itmr);
return 0;
}
static int txx9tmr_set_state_shutdown(struct clock_event_device *evt)
{
struct txx9_clock_event_device *txx9_cd =
container_of(evt, struct txx9_clock_event_device, cd);
struct txx9_tmr_reg __iomem *tmrptr = txx9_cd->tmrptr;
txx9tmr_stop_and_clear(tmrptr);
__raw_writel(0, &tmrptr->itmr);
return 0;
}
static int txx9tmr_tick_resume(struct clock_event_device *evt)
{
struct txx9_clock_event_device *txx9_cd =
container_of(evt, struct txx9_clock_event_device, cd);
struct txx9_tmr_reg __iomem *tmrptr = txx9_cd->tmrptr;
txx9tmr_stop_and_clear(tmrptr);
__raw_writel(TIMER_CCD, &tmrptr->ccdr);
__raw_writel(0, &tmrptr->itmr);
return 0;
}
static int txx9tmr_set_next_event(unsigned long delta,
struct clock_event_device *evt)
{
struct txx9_clock_event_device *txx9_cd =
container_of(evt, struct txx9_clock_event_device, cd);
struct txx9_tmr_reg __iomem *tmrptr = txx9_cd->tmrptr;
txx9tmr_stop_and_clear(tmrptr);
/* start timer */
__raw_writel(delta, &tmrptr->cpra);
__raw_writel(TCR_BASE | TXx9_TMTCR_TCE, &tmrptr->tcr);
return 0;
}
static struct txx9_clock_event_device txx9_clock_event_device = {
.cd = {
.name = "TXx9",
.features = CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_ONESHOT,
.rating = 200,
.set_state_shutdown = txx9tmr_set_state_shutdown,
.set_state_periodic = txx9tmr_set_state_periodic,
.set_state_oneshot = txx9tmr_set_state_oneshot,
.tick_resume = txx9tmr_tick_resume,
.set_next_event = txx9tmr_set_next_event,
},
};
static irqreturn_t txx9tmr_interrupt(int irq, void *dev_id)
{
struct txx9_clock_event_device *txx9_cd = dev_id;
struct clock_event_device *cd = &txx9_cd->cd;
struct txx9_tmr_reg __iomem *tmrptr = txx9_cd->tmrptr;
__raw_writel(0, &tmrptr->tisr); /* ack interrupt */
cd->event_handler(cd);
return IRQ_HANDLED;
}
void __init txx9_clockevent_init(unsigned long baseaddr, int irq,
unsigned int imbusclk)
{
struct clock_event_device *cd = &txx9_clock_event_device.cd;
struct txx9_tmr_reg __iomem *tmrptr;
tmrptr = ioremap(baseaddr, sizeof(struct txx9_tmr_reg));
txx9tmr_stop_and_clear(tmrptr);
__raw_writel(TIMER_CCD, &tmrptr->ccdr);
__raw_writel(0, &tmrptr->itmr);
txx9_clock_event_device.tmrptr = tmrptr;
clockevent_set_clock(cd, TIMER_CLK(imbusclk));
cd->max_delta_ns =
clockevent_delta2ns(0xffffffff >> (32 - TXX9_TIMER_BITS), cd);
cd->max_delta_ticks = 0xffffffff >> (32 - TXX9_TIMER_BITS);
cd->min_delta_ns = clockevent_delta2ns(0xf, cd);
cd->min_delta_ticks = 0xf;
cd->irq = irq;
cd->cpumask = cpumask_of(0);
clockevents_register_device(cd);
if (request_irq(irq, txx9tmr_interrupt, IRQF_PERCPU | IRQF_TIMER,
"txx9tmr", &txx9_clock_event_device))
pr_err("Failed to request irq %d (txx9tmr)\n", irq);
printk(KERN_INFO "TXx9: clockevent device at 0x%lx, irq %d\n",
baseaddr, irq);
}
void __init txx9_tmr_init(unsigned long baseaddr)
{
struct txx9_tmr_reg __iomem *tmrptr;
tmrptr = ioremap(baseaddr, sizeof(struct txx9_tmr_reg));
/* Start once to make CounterResetEnable effective */
__raw_writel(TXx9_TMTCR_CRE | TXx9_TMTCR_TCE, &tmrptr->tcr);
/* Stop and reset the counter */
__raw_writel(TXx9_TMTCR_CRE, &tmrptr->tcr);
__raw_writel(0, &tmrptr->tisr);
__raw_writel(0xffffffff, &tmrptr->cpra);
__raw_writel(0, &tmrptr->itmr);
__raw_writel(0, &tmrptr->ccdr);
__raw_writel(0, &tmrptr->pgmr);
iounmap(tmrptr);
}
| linux-master | arch/mips/kernel/cevt-txx9.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1994 - 1999, 2000 by Ralf Baechle and others.
* Copyright (C) 2005, 2006 by Ralf Baechle ([email protected])
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
* Copyright (C) 2004 Thiemo Seufer
* Copyright (C) 2013 Imagination Technologies Ltd.
*/
#include <linux/cpu.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/kallsyms.h>
#include <linux/kernel.h>
#include <linux/nmi.h>
#include <linux/personality.h>
#include <linux/prctl.h>
#include <linux/random.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/sched/task_stack.h>
#include <asm/abi.h>
#include <asm/asm.h>
#include <asm/dsemul.h>
#include <asm/dsp.h>
#include <asm/exec.h>
#include <asm/fpu.h>
#include <asm/inst.h>
#include <asm/irq.h>
#include <asm/irq_regs.h>
#include <asm/isadep.h>
#include <asm/msa.h>
#include <asm/mips-cps.h>
#include <asm/mipsregs.h>
#include <asm/processor.h>
#include <asm/reg.h>
#include <asm/stacktrace.h>
#ifdef CONFIG_HOTPLUG_CPU
void __noreturn arch_cpu_idle_dead(void)
{
play_dead();
}
#endif
asmlinkage void ret_from_fork(void);
asmlinkage void ret_from_kernel_thread(void);
void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp)
{
unsigned long status;
/* New thread loses kernel privileges. */
status = regs->cp0_status & ~(ST0_CU0|ST0_CU1|ST0_CU2|ST0_FR|KU_MASK);
status |= KU_USER;
regs->cp0_status = status;
lose_fpu(0);
clear_thread_flag(TIF_MSA_CTX_LIVE);
clear_used_math();
#ifdef CONFIG_MIPS_FP_SUPPORT
atomic_set(¤t->thread.bd_emu_frame, BD_EMUFRAME_NONE);
#endif
init_dsp();
regs->cp0_epc = pc;
regs->regs[29] = sp;
}
void exit_thread(struct task_struct *tsk)
{
/*
* User threads may have allocated a delay slot emulation frame.
* If so, clean up that allocation.
*/
if (!(current->flags & PF_KTHREAD))
dsemul_thread_cleanup(tsk);
}
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{
/*
* Save any process state which is live in hardware registers to the
* parent context prior to duplication. This prevents the new child
* state becoming stale if the parent is preempted before copy_thread()
* gets a chance to save the parent's live hardware registers to the
* child context.
*/
preempt_disable();
if (is_msa_enabled())
save_msa(current);
else if (is_fpu_owner())
_save_fp(current);
save_dsp(current);
preempt_enable();
*dst = *src;
return 0;
}
/*
* Copy architecture-specific thread state
*/
int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
{
unsigned long clone_flags = args->flags;
unsigned long usp = args->stack;
unsigned long tls = args->tls;
struct thread_info *ti = task_thread_info(p);
struct pt_regs *childregs, *regs = current_pt_regs();
unsigned long childksp;
childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32;
/* set up new TSS. */
childregs = (struct pt_regs *) childksp - 1;
/* Put the stack after the struct pt_regs. */
childksp = (unsigned long) childregs;
p->thread.cp0_status = (read_c0_status() & ~(ST0_CU2|ST0_CU1)) | ST0_KERNEL_CUMASK;
if (unlikely(args->fn)) {
/* kernel thread */
unsigned long status = p->thread.cp0_status;
memset(childregs, 0, sizeof(struct pt_regs));
p->thread.reg16 = (unsigned long)args->fn;
p->thread.reg17 = (unsigned long)args->fn_arg;
p->thread.reg29 = childksp;
p->thread.reg31 = (unsigned long) ret_from_kernel_thread;
#if defined(CONFIG_CPU_R3000)
status = (status & ~(ST0_KUP | ST0_IEP | ST0_IEC)) |
((status & (ST0_KUC | ST0_IEC)) << 2);
#else
status |= ST0_EXL;
#endif
childregs->cp0_status = status;
return 0;
}
/* user thread */
*childregs = *regs;
childregs->regs[7] = 0; /* Clear error flag */
childregs->regs[2] = 0; /* Child gets zero as return value */
if (usp)
childregs->regs[29] = usp;
p->thread.reg29 = (unsigned long) childregs;
p->thread.reg31 = (unsigned long) ret_from_fork;
/*
* New tasks lose permission to use the fpu. This accelerates context
* switching for most programs since they don't use the fpu.
*/
childregs->cp0_status &= ~(ST0_CU2|ST0_CU1);
clear_tsk_thread_flag(p, TIF_USEDFPU);
clear_tsk_thread_flag(p, TIF_USEDMSA);
clear_tsk_thread_flag(p, TIF_MSA_CTX_LIVE);
#ifdef CONFIG_MIPS_MT_FPAFF
clear_tsk_thread_flag(p, TIF_FPUBOUND);
#endif /* CONFIG_MIPS_MT_FPAFF */
#ifdef CONFIG_MIPS_FP_SUPPORT
atomic_set(&p->thread.bd_emu_frame, BD_EMUFRAME_NONE);
#endif
if (clone_flags & CLONE_SETTLS)
ti->tp_value = tls;
return 0;
}
#ifdef CONFIG_STACKPROTECTOR
#include <linux/stackprotector.h>
unsigned long __stack_chk_guard __read_mostly;
EXPORT_SYMBOL(__stack_chk_guard);
#endif
struct mips_frame_info {
void *func;
unsigned long func_size;
int frame_size;
int pc_offset;
};
#define J_TARGET(pc,target) \
(((unsigned long)(pc) & 0xf0000000) | ((target) << 2))
static inline int is_jr_ra_ins(union mips_instruction *ip)
{
#ifdef CONFIG_CPU_MICROMIPS
/*
* jr16 ra
* jr ra
*/
if (mm_insn_16bit(ip->word >> 16)) {
if (ip->mm16_r5_format.opcode == mm_pool16c_op &&
ip->mm16_r5_format.rt == mm_jr16_op &&
ip->mm16_r5_format.imm == 31)
return 1;
return 0;
}
if (ip->r_format.opcode == mm_pool32a_op &&
ip->r_format.func == mm_pool32axf_op &&
((ip->u_format.uimmediate >> 6) & GENMASK(9, 0)) == mm_jalr_op &&
ip->r_format.rt == 31)
return 1;
return 0;
#else
if (ip->r_format.opcode == spec_op &&
ip->r_format.func == jr_op &&
ip->r_format.rs == 31)
return 1;
return 0;
#endif
}
static inline int is_ra_save_ins(union mips_instruction *ip, int *poff)
{
#ifdef CONFIG_CPU_MICROMIPS
/*
* swsp ra,offset
* swm16 reglist,offset(sp)
* swm32 reglist,offset(sp)
* sw32 ra,offset(sp)
* jradiussp - NOT SUPPORTED
*
* microMIPS is way more fun...
*/
if (mm_insn_16bit(ip->word >> 16)) {
switch (ip->mm16_r5_format.opcode) {
case mm_swsp16_op:
if (ip->mm16_r5_format.rt != 31)
return 0;
*poff = ip->mm16_r5_format.imm;
*poff = (*poff << 2) / sizeof(ulong);
return 1;
case mm_pool16c_op:
switch (ip->mm16_m_format.func) {
case mm_swm16_op:
*poff = ip->mm16_m_format.imm;
*poff += 1 + ip->mm16_m_format.rlist;
*poff = (*poff << 2) / sizeof(ulong);
return 1;
default:
return 0;
}
default:
return 0;
}
}
switch (ip->i_format.opcode) {
case mm_sw32_op:
if (ip->i_format.rs != 29)
return 0;
if (ip->i_format.rt != 31)
return 0;
*poff = ip->i_format.simmediate / sizeof(ulong);
return 1;
case mm_pool32b_op:
switch (ip->mm_m_format.func) {
case mm_swm32_func:
if (ip->mm_m_format.rd < 0x10)
return 0;
if (ip->mm_m_format.base != 29)
return 0;
*poff = ip->mm_m_format.simmediate;
*poff += (ip->mm_m_format.rd & 0xf) * sizeof(u32);
*poff /= sizeof(ulong);
return 1;
default:
return 0;
}
default:
return 0;
}
#else
/* sw / sd $ra, offset($sp) */
if ((ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) &&
ip->i_format.rs == 29 && ip->i_format.rt == 31) {
*poff = ip->i_format.simmediate / sizeof(ulong);
return 1;
}
#ifdef CONFIG_CPU_LOONGSON64
if ((ip->loongson3_lswc2_format.opcode == swc2_op) &&
(ip->loongson3_lswc2_format.ls == 1) &&
(ip->loongson3_lswc2_format.fr == 0) &&
(ip->loongson3_lswc2_format.base == 29)) {
if (ip->loongson3_lswc2_format.rt == 31) {
*poff = ip->loongson3_lswc2_format.offset << 1;
return 1;
}
if (ip->loongson3_lswc2_format.rq == 31) {
*poff = (ip->loongson3_lswc2_format.offset << 1) + 1;
return 1;
}
}
#endif
return 0;
#endif
}
static inline int is_jump_ins(union mips_instruction *ip)
{
#ifdef CONFIG_CPU_MICROMIPS
/*
* jr16,jrc,jalr16,jalr16
* jal
* jalr/jr,jalr.hb/jr.hb,jalrs,jalrs.hb
* jraddiusp - NOT SUPPORTED
*
* microMIPS is kind of more fun...
*/
if (mm_insn_16bit(ip->word >> 16)) {
if ((ip->mm16_r5_format.opcode == mm_pool16c_op &&
(ip->mm16_r5_format.rt & mm_jr16_op) == mm_jr16_op))
return 1;
return 0;
}
if (ip->j_format.opcode == mm_j32_op)
return 1;
if (ip->j_format.opcode == mm_jal32_op)
return 1;
if (ip->r_format.opcode != mm_pool32a_op ||
ip->r_format.func != mm_pool32axf_op)
return 0;
return ((ip->u_format.uimmediate >> 6) & mm_jalr_op) == mm_jalr_op;
#else
if (ip->j_format.opcode == j_op)
return 1;
if (ip->j_format.opcode == jal_op)
return 1;
if (ip->r_format.opcode != spec_op)
return 0;
return ip->r_format.func == jalr_op || ip->r_format.func == jr_op;
#endif
}
static inline int is_sp_move_ins(union mips_instruction *ip, int *frame_size)
{
#ifdef CONFIG_CPU_MICROMIPS
unsigned short tmp;
/*
* addiusp -imm
* addius5 sp,-imm
* addiu32 sp,sp,-imm
* jradiussp - NOT SUPPORTED
*
* microMIPS is not more fun...
*/
if (mm_insn_16bit(ip->word >> 16)) {
if (ip->mm16_r3_format.opcode == mm_pool16d_op &&
ip->mm16_r3_format.simmediate & mm_addiusp_func) {
tmp = ip->mm_b0_format.simmediate >> 1;
tmp = ((tmp & 0x1ff) ^ 0x100) - 0x100;
if ((tmp + 2) < 4) /* 0x0,0x1,0x1fe,0x1ff are special */
tmp ^= 0x100;
*frame_size = -(signed short)(tmp << 2);
return 1;
}
if (ip->mm16_r5_format.opcode == mm_pool16d_op &&
ip->mm16_r5_format.rt == 29) {
tmp = ip->mm16_r5_format.imm >> 1;
*frame_size = -(signed short)(tmp & 0xf);
return 1;
}
return 0;
}
if (ip->mm_i_format.opcode == mm_addiu32_op &&
ip->mm_i_format.rt == 29 && ip->mm_i_format.rs == 29) {
*frame_size = -ip->i_format.simmediate;
return 1;
}
#else
/* addiu/daddiu sp,sp,-imm */
if (ip->i_format.rs != 29 || ip->i_format.rt != 29)
return 0;
if (ip->i_format.opcode == addiu_op ||
ip->i_format.opcode == daddiu_op) {
*frame_size = -ip->i_format.simmediate;
return 1;
}
#endif
return 0;
}
static int get_frame_info(struct mips_frame_info *info)
{
bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS);
union mips_instruction insn, *ip, *ip_end;
unsigned int last_insn_size = 0;
bool saw_jump = false;
info->pc_offset = -1;
info->frame_size = 0;
ip = (void *)msk_isa16_mode((ulong)info->func);
if (!ip)
goto err;
ip_end = (void *)ip + (info->func_size ? info->func_size : 512);
while (ip < ip_end) {
ip = (void *)ip + last_insn_size;
if (is_mmips && mm_insn_16bit(ip->halfword[0])) {
insn.word = ip->halfword[0] << 16;
last_insn_size = 2;
} else if (is_mmips) {
insn.word = ip->halfword[0] << 16 | ip->halfword[1];
last_insn_size = 4;
} else {
insn.word = ip->word;
last_insn_size = 4;
}
if (is_jr_ra_ins(ip)) {
break;
} else if (!info->frame_size) {
is_sp_move_ins(&insn, &info->frame_size);
continue;
} else if (!saw_jump && is_jump_ins(ip)) {
/*
* If we see a jump instruction, we are finished
* with the frame save.
*
* Some functions can have a shortcut return at
* the beginning of the function, so don't start
* looking for jump instruction until we see the
* frame setup.
*
* The RA save instruction can get put into the
* delay slot of the jump instruction, so look
* at the next instruction, too.
*/
saw_jump = true;
continue;
}
if (info->pc_offset == -1 &&
is_ra_save_ins(&insn, &info->pc_offset))
break;
if (saw_jump)
break;
}
if (info->frame_size && info->pc_offset >= 0) /* nested */
return 0;
if (info->pc_offset < 0) /* leaf */
return 1;
/* prologue seems bogus... */
err:
return -1;
}
static struct mips_frame_info schedule_mfi __read_mostly;
#ifdef CONFIG_KALLSYMS
static unsigned long get___schedule_addr(void)
{
return kallsyms_lookup_name("__schedule");
}
#else
static unsigned long get___schedule_addr(void)
{
union mips_instruction *ip = (void *)schedule;
int max_insns = 8;
int i;
for (i = 0; i < max_insns; i++, ip++) {
if (ip->j_format.opcode == j_op)
return J_TARGET(ip, ip->j_format.target);
}
return 0;
}
#endif
static int __init frame_info_init(void)
{
unsigned long size = 0;
#ifdef CONFIG_KALLSYMS
unsigned long ofs;
#endif
unsigned long addr;
addr = get___schedule_addr();
if (!addr)
addr = (unsigned long)schedule;
#ifdef CONFIG_KALLSYMS
kallsyms_lookup_size_offset(addr, &size, &ofs);
#endif
schedule_mfi.func = (void *)addr;
schedule_mfi.func_size = size;
get_frame_info(&schedule_mfi);
/*
* Without schedule() frame info, result given by
* thread_saved_pc() and __get_wchan() are not reliable.
*/
if (schedule_mfi.pc_offset < 0)
printk("Can't analyze schedule() prologue at %p\n", schedule);
return 0;
}
arch_initcall(frame_info_init);
/*
* Return saved PC of a blocked thread.
*/
static unsigned long thread_saved_pc(struct task_struct *tsk)
{
struct thread_struct *t = &tsk->thread;
/* New born processes are a special case */
if (t->reg31 == (unsigned long) ret_from_fork)
return t->reg31;
if (schedule_mfi.pc_offset < 0)
return 0;
return ((unsigned long *)t->reg29)[schedule_mfi.pc_offset];
}
#ifdef CONFIG_KALLSYMS
/* generic stack unwinding function */
unsigned long notrace unwind_stack_by_address(unsigned long stack_page,
unsigned long *sp,
unsigned long pc,
unsigned long *ra)
{
unsigned long low, high, irq_stack_high;
struct mips_frame_info info;
unsigned long size, ofs;
struct pt_regs *regs;
int leaf;
if (!stack_page)
return 0;
/*
* IRQ stacks start at IRQ_STACK_START
* task stacks at THREAD_SIZE - 32
*/
low = stack_page;
if (!preemptible() && on_irq_stack(raw_smp_processor_id(), *sp)) {
high = stack_page + IRQ_STACK_START;
irq_stack_high = high;
} else {
high = stack_page + THREAD_SIZE - 32;
irq_stack_high = 0;
}
/*
* If we reached the top of the interrupt stack, start unwinding
* the interrupted task stack.
*/
if (unlikely(*sp == irq_stack_high)) {
unsigned long task_sp = *(unsigned long *)*sp;
/*
* Check that the pointer saved in the IRQ stack head points to
* something within the stack of the current task
*/
if (!object_is_on_stack((void *)task_sp))
return 0;
/*
* Follow pointer to tasks kernel stack frame where interrupted
* state was saved.
*/
regs = (struct pt_regs *)task_sp;
pc = regs->cp0_epc;
if (!user_mode(regs) && __kernel_text_address(pc)) {
*sp = regs->regs[29];
*ra = regs->regs[31];
return pc;
}
return 0;
}
if (!kallsyms_lookup_size_offset(pc, &size, &ofs))
return 0;
/*
* Return ra if an exception occurred at the first instruction
*/
if (unlikely(ofs == 0)) {
pc = *ra;
*ra = 0;
return pc;
}
info.func = (void *)(pc - ofs);
info.func_size = ofs; /* analyze from start to ofs */
leaf = get_frame_info(&info);
if (leaf < 0)
return 0;
if (*sp < low || *sp + info.frame_size > high)
return 0;
if (leaf)
/*
* For some extreme cases, get_frame_info() can
* consider wrongly a nested function as a leaf
* one. In that cases avoid to return always the
* same value.
*/
pc = pc != *ra ? *ra : 0;
else
pc = ((unsigned long *)(*sp))[info.pc_offset];
*sp += info.frame_size;
*ra = 0;
return __kernel_text_address(pc) ? pc : 0;
}
EXPORT_SYMBOL(unwind_stack_by_address);
/* used by show_backtrace() */
unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
unsigned long pc, unsigned long *ra)
{
unsigned long stack_page = 0;
int cpu;
for_each_possible_cpu(cpu) {
if (on_irq_stack(cpu, *sp)) {
stack_page = (unsigned long)irq_stack[cpu];
break;
}
}
if (!stack_page)
stack_page = (unsigned long)task_stack_page(task);
return unwind_stack_by_address(stack_page, sp, pc, ra);
}
#endif
/*
* __get_wchan - a maintenance nightmare^W^Wpain in the ass ...
*/
unsigned long __get_wchan(struct task_struct *task)
{
unsigned long pc = 0;
#ifdef CONFIG_KALLSYMS
unsigned long sp;
unsigned long ra = 0;
#endif
if (!task_stack_page(task))
goto out;
pc = thread_saved_pc(task);
#ifdef CONFIG_KALLSYMS
sp = task->thread.reg29 + schedule_mfi.frame_size;
while (in_sched_functions(pc))
pc = unwind_stack(task, &sp, pc, &ra);
#endif
out:
return pc;
}
unsigned long mips_stack_top(void)
{
unsigned long top = TASK_SIZE & PAGE_MASK;
if (IS_ENABLED(CONFIG_MIPS_FP_SUPPORT)) {
/* One page for branch delay slot "emulation" */
top -= PAGE_SIZE;
}
/* Space for the VDSO, data page & GIC user page */
top -= PAGE_ALIGN(current->thread.abi->vdso->size);
top -= PAGE_SIZE;
top -= mips_gic_present() ? PAGE_SIZE : 0;
/* Space for cache colour alignment */
if (cpu_has_dc_aliases)
top -= shm_align_mask + 1;
/* Space to randomize the VDSO base */
if (current->flags & PF_RANDOMIZE)
top -= VDSO_RANDOMIZE_SIZE;
return top;
}
/*
* Don't forget that the stack pointer must be aligned on a 8 bytes
* boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
*/
unsigned long arch_align_stack(unsigned long sp)
{
if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
sp -= get_random_u32_below(PAGE_SIZE);
return sp & ALMASK;
}
static struct cpumask backtrace_csd_busy;
static void handle_backtrace(void *info)
{
nmi_cpu_backtrace(get_irq_regs());
cpumask_clear_cpu(smp_processor_id(), &backtrace_csd_busy);
}
static DEFINE_PER_CPU(call_single_data_t, backtrace_csd) =
CSD_INIT(handle_backtrace, NULL);
static void raise_backtrace(cpumask_t *mask)
{
call_single_data_t *csd;
int cpu;
for_each_cpu(cpu, mask) {
/*
* If we previously sent an IPI to the target CPU & it hasn't
* cleared its bit in the busy cpumask then it didn't handle
* our previous IPI & it's not safe for us to reuse the
* call_single_data_t.
*/
if (cpumask_test_and_set_cpu(cpu, &backtrace_csd_busy)) {
pr_warn("Unable to send backtrace IPI to CPU%u - perhaps it hung?\n",
cpu);
continue;
}
csd = &per_cpu(backtrace_csd, cpu);
smp_call_function_single_async(cpu, csd);
}
}
void arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu)
{
nmi_trigger_cpumask_backtrace(mask, exclude_cpu, raise_backtrace);
}
int mips_get_process_fp_mode(struct task_struct *task)
{
int value = 0;
if (!test_tsk_thread_flag(task, TIF_32BIT_FPREGS))
value |= PR_FP_MODE_FR;
if (test_tsk_thread_flag(task, TIF_HYBRID_FPREGS))
value |= PR_FP_MODE_FRE;
return value;
}
static long prepare_for_fp_mode_switch(void *unused)
{
/*
* This is icky, but we use this to simply ensure that all CPUs have
* context switched, regardless of whether they were previously running
* kernel or user code. This ensures that no CPU that a mode-switching
* program may execute on keeps its FPU enabled (& in the old mode)
* throughout the mode switch.
*/
return 0;
}
int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
{
const unsigned int known_bits = PR_FP_MODE_FR | PR_FP_MODE_FRE;
struct task_struct *t;
struct cpumask process_cpus;
int cpu;
/* If nothing to change, return right away, successfully. */
if (value == mips_get_process_fp_mode(task))
return 0;
/* Only accept a mode change if 64-bit FP enabled for o32. */
if (!IS_ENABLED(CONFIG_MIPS_O32_FP64_SUPPORT))
return -EOPNOTSUPP;
/* And only for o32 tasks. */
if (IS_ENABLED(CONFIG_64BIT) && !test_thread_flag(TIF_32BIT_REGS))
return -EOPNOTSUPP;
/* Check the value is valid */
if (value & ~known_bits)
return -EOPNOTSUPP;
/* Setting FRE without FR is not supported. */
if ((value & (PR_FP_MODE_FR | PR_FP_MODE_FRE)) == PR_FP_MODE_FRE)
return -EOPNOTSUPP;
/* Avoid inadvertently triggering emulation */
if ((value & PR_FP_MODE_FR) && raw_cpu_has_fpu &&
!(raw_current_cpu_data.fpu_id & MIPS_FPIR_F64))
return -EOPNOTSUPP;
if ((value & PR_FP_MODE_FRE) && raw_cpu_has_fpu && !cpu_has_fre)
return -EOPNOTSUPP;
/* FR = 0 not supported in MIPS R6 */
if (!(value & PR_FP_MODE_FR) && raw_cpu_has_fpu && cpu_has_mips_r6)
return -EOPNOTSUPP;
/* Indicate the new FP mode in each thread */
for_each_thread(task, t) {
/* Update desired FP register width */
if (value & PR_FP_MODE_FR) {
clear_tsk_thread_flag(t, TIF_32BIT_FPREGS);
} else {
set_tsk_thread_flag(t, TIF_32BIT_FPREGS);
clear_tsk_thread_flag(t, TIF_MSA_CTX_LIVE);
}
/* Update desired FP single layout */
if (value & PR_FP_MODE_FRE)
set_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
else
clear_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
}
/*
* We need to ensure that all threads in the process have switched mode
* before returning, in order to allow userland to not worry about
* races. We can do this by forcing all CPUs that any thread in the
* process may be running on to schedule something else - in this case
* prepare_for_fp_mode_switch().
*
* We begin by generating a mask of all CPUs that any thread in the
* process may be running on.
*/
cpumask_clear(&process_cpus);
for_each_thread(task, t)
cpumask_set_cpu(task_cpu(t), &process_cpus);
/*
* Now we schedule prepare_for_fp_mode_switch() on each of those CPUs.
*
* The CPUs may have rescheduled already since we switched mode or
* generated the cpumask, but that doesn't matter. If the task in this
* process is scheduled out then our scheduling
* prepare_for_fp_mode_switch() will simply be redundant. If it's
* scheduled in then it will already have picked up the new FP mode
* whilst doing so.
*/
cpus_read_lock();
for_each_cpu_and(cpu, &process_cpus, cpu_online_mask)
work_on_cpu(cpu, prepare_for_fp_mode_switch, NULL);
cpus_read_unlock();
return 0;
}
#if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
void mips_dump_regs32(u32 *uregs, const struct pt_regs *regs)
{
unsigned int i;
for (i = MIPS32_EF_R1; i <= MIPS32_EF_R31; i++) {
/* k0/k1 are copied as zero. */
if (i == MIPS32_EF_R26 || i == MIPS32_EF_R27)
uregs[i] = 0;
else
uregs[i] = regs->regs[i - MIPS32_EF_R0];
}
uregs[MIPS32_EF_LO] = regs->lo;
uregs[MIPS32_EF_HI] = regs->hi;
uregs[MIPS32_EF_CP0_EPC] = regs->cp0_epc;
uregs[MIPS32_EF_CP0_BADVADDR] = regs->cp0_badvaddr;
uregs[MIPS32_EF_CP0_STATUS] = regs->cp0_status;
uregs[MIPS32_EF_CP0_CAUSE] = regs->cp0_cause;
}
#endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
#ifdef CONFIG_64BIT
void mips_dump_regs64(u64 *uregs, const struct pt_regs *regs)
{
unsigned int i;
for (i = MIPS64_EF_R1; i <= MIPS64_EF_R31; i++) {
/* k0/k1 are copied as zero. */
if (i == MIPS64_EF_R26 || i == MIPS64_EF_R27)
uregs[i] = 0;
else
uregs[i] = regs->regs[i - MIPS64_EF_R0];
}
uregs[MIPS64_EF_LO] = regs->lo;
uregs[MIPS64_EF_HI] = regs->hi;
uregs[MIPS64_EF_CP0_EPC] = regs->cp0_epc;
uregs[MIPS64_EF_CP0_BADVADDR] = regs->cp0_badvaddr;
uregs[MIPS64_EF_CP0_STATUS] = regs->cp0_status;
uregs[MIPS64_EF_CP0_CAUSE] = regs->cp0_cause;
}
#endif /* CONFIG_64BIT */
| linux-master | arch/mips/kernel/process.c |
// SPDX-License-Identifier: GPL-2.0
/*
* MIPS specific sysrq operations.
*
* Copyright (C) 2015 Imagination Technologies Ltd.
*/
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/sysrq.h>
#include <linux/workqueue.h>
#include <asm/cpu-features.h>
#include <asm/mipsregs.h>
#include <asm/tlbdebug.h>
/*
* Dump TLB entries on all CPUs.
*/
static DEFINE_SPINLOCK(show_lock);
static void sysrq_tlbdump_single(void *dummy)
{
unsigned long flags;
spin_lock_irqsave(&show_lock, flags);
pr_info("CPU%d:\n", smp_processor_id());
dump_tlb_regs();
pr_info("\n");
dump_tlb_all();
pr_info("\n");
spin_unlock_irqrestore(&show_lock, flags);
}
#ifdef CONFIG_SMP
static void sysrq_tlbdump_othercpus(struct work_struct *dummy)
{
smp_call_function(sysrq_tlbdump_single, NULL, 0);
}
static DECLARE_WORK(sysrq_tlbdump, sysrq_tlbdump_othercpus);
#endif
static void sysrq_handle_tlbdump(u8 key)
{
sysrq_tlbdump_single(NULL);
#ifdef CONFIG_SMP
schedule_work(&sysrq_tlbdump);
#endif
}
static const struct sysrq_key_op sysrq_tlbdump_op = {
.handler = sysrq_handle_tlbdump,
.help_msg = "show-tlbs(x)",
.action_msg = "Show TLB entries",
.enable_mask = SYSRQ_ENABLE_DUMP,
};
static int __init mips_sysrq_init(void)
{
return register_sysrq_key('x', &sysrq_tlbdump_op);
}
arch_initcall(mips_sysrq_init);
| linux-master | arch/mips/kernel/sysrq.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2014 Imagination Technologies Ltd.
* Author: Leonid Yegoshin <[email protected]>
* Author: Markos Chandras <[email protected]>
*
* MIPS R2 user space instruction emulator for MIPS R6
*
*/
#include <linux/bug.h>
#include <linux/compiler.h>
#include <linux/debugfs.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/ptrace.h>
#include <linux/seq_file.h>
#include <asm/asm.h>
#include <asm/branch.h>
#include <asm/break.h>
#include <asm/debug.h>
#include <asm/fpu.h>
#include <asm/fpu_emulator.h>
#include <asm/inst.h>
#include <asm/mips-r2-to-r6-emul.h>
#include <asm/local.h>
#include <asm/mipsregs.h>
#include <asm/ptrace.h>
#include <linux/uaccess.h>
#ifdef CONFIG_64BIT
#define ADDIU "daddiu "
#define INS "dins "
#define EXT "dext "
#else
#define ADDIU "addiu "
#define INS "ins "
#define EXT "ext "
#endif /* CONFIG_64BIT */
#define SB "sb "
#define LB "lb "
#define LL "ll "
#define SC "sc "
#ifdef CONFIG_DEBUG_FS
static DEFINE_PER_CPU(struct mips_r2_emulator_stats, mipsr2emustats);
static DEFINE_PER_CPU(struct mips_r2_emulator_stats, mipsr2bdemustats);
static DEFINE_PER_CPU(struct mips_r2br_emulator_stats, mipsr2bremustats);
#endif
extern const unsigned int fpucondbit[8];
#define MIPS_R2_EMUL_TOTAL_PASS 10
int mipsr2_emulation = 0;
static int __init mipsr2emu_enable(char *s)
{
mipsr2_emulation = 1;
pr_info("MIPS R2-to-R6 Emulator Enabled!");
return 1;
}
__setup("mipsr2emu", mipsr2emu_enable);
/**
* mipsr6_emul - Emulate some frequent R2/R5/R6 instructions in delay slot
* for performance instead of the traditional way of using a stack trampoline
* which is rather slow.
* @regs: Process register set
* @ir: Instruction
*/
static inline int mipsr6_emul(struct pt_regs *regs, u32 ir)
{
switch (MIPSInst_OPCODE(ir)) {
case addiu_op:
if (MIPSInst_RT(ir))
regs->regs[MIPSInst_RT(ir)] =
(s32)regs->regs[MIPSInst_RS(ir)] +
(s32)MIPSInst_SIMM(ir);
return 0;
case daddiu_op:
if (IS_ENABLED(CONFIG_32BIT))
break;
if (MIPSInst_RT(ir))
regs->regs[MIPSInst_RT(ir)] =
(s64)regs->regs[MIPSInst_RS(ir)] +
(s64)MIPSInst_SIMM(ir);
return 0;
case lwc1_op:
case swc1_op:
case cop1_op:
case cop1x_op:
/* FPU instructions in delay slot */
return -SIGFPE;
case spec_op:
switch (MIPSInst_FUNC(ir)) {
case or_op:
if (MIPSInst_RD(ir))
regs->regs[MIPSInst_RD(ir)] =
regs->regs[MIPSInst_RS(ir)] |
regs->regs[MIPSInst_RT(ir)];
return 0;
case sll_op:
if (MIPSInst_RS(ir))
break;
if (MIPSInst_RD(ir))
regs->regs[MIPSInst_RD(ir)] =
(s32)(((u32)regs->regs[MIPSInst_RT(ir)]) <<
MIPSInst_FD(ir));
return 0;
case srl_op:
if (MIPSInst_RS(ir))
break;
if (MIPSInst_RD(ir))
regs->regs[MIPSInst_RD(ir)] =
(s32)(((u32)regs->regs[MIPSInst_RT(ir)]) >>
MIPSInst_FD(ir));
return 0;
case addu_op:
if (MIPSInst_FD(ir))
break;
if (MIPSInst_RD(ir))
regs->regs[MIPSInst_RD(ir)] =
(s32)((u32)regs->regs[MIPSInst_RS(ir)] +
(u32)regs->regs[MIPSInst_RT(ir)]);
return 0;
case subu_op:
if (MIPSInst_FD(ir))
break;
if (MIPSInst_RD(ir))
regs->regs[MIPSInst_RD(ir)] =
(s32)((u32)regs->regs[MIPSInst_RS(ir)] -
(u32)regs->regs[MIPSInst_RT(ir)]);
return 0;
case dsll_op:
if (IS_ENABLED(CONFIG_32BIT) || MIPSInst_RS(ir))
break;
if (MIPSInst_RD(ir))
regs->regs[MIPSInst_RD(ir)] =
(s64)(((u64)regs->regs[MIPSInst_RT(ir)]) <<
MIPSInst_FD(ir));
return 0;
case dsrl_op:
if (IS_ENABLED(CONFIG_32BIT) || MIPSInst_RS(ir))
break;
if (MIPSInst_RD(ir))
regs->regs[MIPSInst_RD(ir)] =
(s64)(((u64)regs->regs[MIPSInst_RT(ir)]) >>
MIPSInst_FD(ir));
return 0;
case daddu_op:
if (IS_ENABLED(CONFIG_32BIT) || MIPSInst_FD(ir))
break;
if (MIPSInst_RD(ir))
regs->regs[MIPSInst_RD(ir)] =
(u64)regs->regs[MIPSInst_RS(ir)] +
(u64)regs->regs[MIPSInst_RT(ir)];
return 0;
case dsubu_op:
if (IS_ENABLED(CONFIG_32BIT) || MIPSInst_FD(ir))
break;
if (MIPSInst_RD(ir))
regs->regs[MIPSInst_RD(ir)] =
(s64)((u64)regs->regs[MIPSInst_RS(ir)] -
(u64)regs->regs[MIPSInst_RT(ir)]);
return 0;
}
break;
default:
pr_debug("No fastpath BD emulation for instruction 0x%08x (op: %02x)\n",
ir, MIPSInst_OPCODE(ir));
}
return SIGILL;
}
/**
* movf_func - Emulate a MOVF instruction
* @regs: Process register set
* @ir: Instruction
*
* Returns 0 since it always succeeds.
*/
static int movf_func(struct pt_regs *regs, u32 ir)
{
u32 csr;
u32 cond;
csr = current->thread.fpu.fcr31;
cond = fpucondbit[MIPSInst_RT(ir) >> 2];
if (((csr & cond) == 0) && MIPSInst_RD(ir))
regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)];
MIPS_R2_STATS(movs);
return 0;
}
/**
* movt_func - Emulate a MOVT instruction
* @regs: Process register set
* @ir: Instruction
*
* Returns 0 since it always succeeds.
*/
static int movt_func(struct pt_regs *regs, u32 ir)
{
u32 csr;
u32 cond;
csr = current->thread.fpu.fcr31;
cond = fpucondbit[MIPSInst_RT(ir) >> 2];
if (((csr & cond) != 0) && MIPSInst_RD(ir))
regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)];
MIPS_R2_STATS(movs);
return 0;
}
/**
* jr_func - Emulate a JR instruction.
* @pt_regs: Process register set
* @ir: Instruction
*
* Returns SIGILL if JR was in delay slot, SIGEMT if we
* can't compute the EPC, SIGSEGV if we can't access the
* userland instruction or 0 on success.
*/
static int jr_func(struct pt_regs *regs, u32 ir)
{
int err;
unsigned long cepc, epc, nepc;
u32 nir;
if (delay_slot(regs))
return SIGILL;
/* EPC after the RI/JR instruction */
nepc = regs->cp0_epc;
/* Roll back to the reserved R2 JR instruction */
regs->cp0_epc -= 4;
epc = regs->cp0_epc;
err = __compute_return_epc(regs);
if (err < 0)
return SIGEMT;
/* Computed EPC */
cepc = regs->cp0_epc;
/* Get DS instruction */
err = __get_user(nir, (u32 __user *)nepc);
if (err)
return SIGSEGV;
MIPS_R2BR_STATS(jrs);
/* If nir == 0(NOP), then nothing else to do */
if (nir) {
/*
* Negative err means FPU instruction in BD-slot,
* Zero err means 'BD-slot emulation done'
* For anything else we go back to trampoline emulation.
*/
err = mipsr6_emul(regs, nir);
if (err > 0) {
regs->cp0_epc = nepc;
err = mips_dsemul(regs, nir, epc, cepc);
if (err == SIGILL)
err = SIGEMT;
MIPS_R2_STATS(dsemul);
}
}
return err;
}
/**
* movz_func - Emulate a MOVZ instruction
* @regs: Process register set
* @ir: Instruction
*
* Returns 0 since it always succeeds.
*/
static int movz_func(struct pt_regs *regs, u32 ir)
{
if (((regs->regs[MIPSInst_RT(ir)]) == 0) && MIPSInst_RD(ir))
regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)];
MIPS_R2_STATS(movs);
return 0;
}
/**
* movn_func - Emulate a MOVZ instruction
* @regs: Process register set
* @ir: Instruction
*
* Returns 0 since it always succeeds.
*/
static int movn_func(struct pt_regs *regs, u32 ir)
{
if (((regs->regs[MIPSInst_RT(ir)]) != 0) && MIPSInst_RD(ir))
regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)];
MIPS_R2_STATS(movs);
return 0;
}
/**
* mfhi_func - Emulate a MFHI instruction
* @regs: Process register set
* @ir: Instruction
*
* Returns 0 since it always succeeds.
*/
static int mfhi_func(struct pt_regs *regs, u32 ir)
{
if (MIPSInst_RD(ir))
regs->regs[MIPSInst_RD(ir)] = regs->hi;
MIPS_R2_STATS(hilo);
return 0;
}
/**
* mthi_func - Emulate a MTHI instruction
* @regs: Process register set
* @ir: Instruction
*
* Returns 0 since it always succeeds.
*/
static int mthi_func(struct pt_regs *regs, u32 ir)
{
regs->hi = regs->regs[MIPSInst_RS(ir)];
MIPS_R2_STATS(hilo);
return 0;
}
/**
* mflo_func - Emulate a MFLO instruction
* @regs: Process register set
* @ir: Instruction
*
* Returns 0 since it always succeeds.
*/
static int mflo_func(struct pt_regs *regs, u32 ir)
{
if (MIPSInst_RD(ir))
regs->regs[MIPSInst_RD(ir)] = regs->lo;
MIPS_R2_STATS(hilo);
return 0;
}
/**
* mtlo_func - Emulate a MTLO instruction
* @regs: Process register set
* @ir: Instruction
*
* Returns 0 since it always succeeds.
*/
static int mtlo_func(struct pt_regs *regs, u32 ir)
{
regs->lo = regs->regs[MIPSInst_RS(ir)];
MIPS_R2_STATS(hilo);
return 0;
}
/**
* mult_func - Emulate a MULT instruction
* @regs: Process register set
* @ir: Instruction
*
* Returns 0 since it always succeeds.
*/
static int mult_func(struct pt_regs *regs, u32 ir)
{
s64 res;
s32 rt, rs;
rt = regs->regs[MIPSInst_RT(ir)];
rs = regs->regs[MIPSInst_RS(ir)];
res = (s64)rt * (s64)rs;
rs = res;
regs->lo = (s64)rs;
rt = res >> 32;
res = (s64)rt;
regs->hi = res;
MIPS_R2_STATS(muls);
return 0;
}
/**
* multu_func - Emulate a MULTU instruction
* @regs: Process register set
* @ir: Instruction
*
* Returns 0 since it always succeeds.
*/
static int multu_func(struct pt_regs *regs, u32 ir)
{
u64 res;
u32 rt, rs;
rt = regs->regs[MIPSInst_RT(ir)];
rs = regs->regs[MIPSInst_RS(ir)];
res = (u64)rt * (u64)rs;
rt = res;
regs->lo = (s64)(s32)rt;
regs->hi = (s64)(s32)(res >> 32);
MIPS_R2_STATS(muls);
return 0;
}
/**
* div_func - Emulate a DIV instruction
* @regs: Process register set
* @ir: Instruction
*
* Returns 0 since it always succeeds.
*/
static int div_func(struct pt_regs *regs, u32 ir)
{
s32 rt, rs;
rt = regs->regs[MIPSInst_RT(ir)];
rs = regs->regs[MIPSInst_RS(ir)];
regs->lo = (s64)(rs / rt);
regs->hi = (s64)(rs % rt);
MIPS_R2_STATS(divs);
return 0;
}
/**
* divu_func - Emulate a DIVU instruction
* @regs: Process register set
* @ir: Instruction
*
* Returns 0 since it always succeeds.
*/
static int divu_func(struct pt_regs *regs, u32 ir)
{
u32 rt, rs;
rt = regs->regs[MIPSInst_RT(ir)];
rs = regs->regs[MIPSInst_RS(ir)];
regs->lo = (s64)(rs / rt);
regs->hi = (s64)(rs % rt);
MIPS_R2_STATS(divs);
return 0;
}
/**
* dmult_func - Emulate a DMULT instruction
* @regs: Process register set
* @ir: Instruction
*
* Returns 0 on success or SIGILL for 32-bit kernels.
*/
static int dmult_func(struct pt_regs *regs, u32 ir)
{
s64 res;
s64 rt, rs;
if (IS_ENABLED(CONFIG_32BIT))
return SIGILL;
rt = regs->regs[MIPSInst_RT(ir)];
rs = regs->regs[MIPSInst_RS(ir)];
res = rt * rs;
regs->lo = res;
__asm__ __volatile__(
"dmuh %0, %1, %2\t\n"
: "=r"(res)
: "r"(rt), "r"(rs));
regs->hi = res;
MIPS_R2_STATS(muls);
return 0;
}
/**
* dmultu_func - Emulate a DMULTU instruction
* @regs: Process register set
* @ir: Instruction
*
* Returns 0 on success or SIGILL for 32-bit kernels.
*/
static int dmultu_func(struct pt_regs *regs, u32 ir)
{
u64 res;
u64 rt, rs;
if (IS_ENABLED(CONFIG_32BIT))
return SIGILL;
rt = regs->regs[MIPSInst_RT(ir)];
rs = regs->regs[MIPSInst_RS(ir)];
res = rt * rs;
regs->lo = res;
__asm__ __volatile__(
"dmuhu %0, %1, %2\t\n"
: "=r"(res)
: "r"(rt), "r"(rs));
regs->hi = res;
MIPS_R2_STATS(muls);
return 0;
}
/**
* ddiv_func - Emulate a DDIV instruction
* @regs: Process register set
* @ir: Instruction
*
* Returns 0 on success or SIGILL for 32-bit kernels.
*/
static int ddiv_func(struct pt_regs *regs, u32 ir)
{
s64 rt, rs;
if (IS_ENABLED(CONFIG_32BIT))
return SIGILL;
rt = regs->regs[MIPSInst_RT(ir)];
rs = regs->regs[MIPSInst_RS(ir)];
regs->lo = rs / rt;
regs->hi = rs % rt;
MIPS_R2_STATS(divs);
return 0;
}
/**
* ddivu_func - Emulate a DDIVU instruction
* @regs: Process register set
* @ir: Instruction
*
* Returns 0 on success or SIGILL for 32-bit kernels.
*/
static int ddivu_func(struct pt_regs *regs, u32 ir)
{
u64 rt, rs;
if (IS_ENABLED(CONFIG_32BIT))
return SIGILL;
rt = regs->regs[MIPSInst_RT(ir)];
rs = regs->regs[MIPSInst_RS(ir)];
regs->lo = rs / rt;
regs->hi = rs % rt;
MIPS_R2_STATS(divs);
return 0;
}
/* R6 removed instructions for the SPECIAL opcode */
static const struct r2_decoder_table spec_op_table[] = {
{ 0xfc1ff83f, 0x00000008, jr_func },
{ 0xfc00ffff, 0x00000018, mult_func },
{ 0xfc00ffff, 0x00000019, multu_func },
{ 0xfc00ffff, 0x0000001c, dmult_func },
{ 0xfc00ffff, 0x0000001d, dmultu_func },
{ 0xffff07ff, 0x00000010, mfhi_func },
{ 0xfc1fffff, 0x00000011, mthi_func },
{ 0xffff07ff, 0x00000012, mflo_func },
{ 0xfc1fffff, 0x00000013, mtlo_func },
{ 0xfc0307ff, 0x00000001, movf_func },
{ 0xfc0307ff, 0x00010001, movt_func },
{ 0xfc0007ff, 0x0000000a, movz_func },
{ 0xfc0007ff, 0x0000000b, movn_func },
{ 0xfc00ffff, 0x0000001a, div_func },
{ 0xfc00ffff, 0x0000001b, divu_func },
{ 0xfc00ffff, 0x0000001e, ddiv_func },
{ 0xfc00ffff, 0x0000001f, ddivu_func },
{}
};
/**
* madd_func - Emulate a MADD instruction
* @regs: Process register set
* @ir: Instruction
*
* Returns 0 since it always succeeds.
*/
static int madd_func(struct pt_regs *regs, u32 ir)
{
s64 res;
s32 rt, rs;
rt = regs->regs[MIPSInst_RT(ir)];
rs = regs->regs[MIPSInst_RS(ir)];
res = (s64)rt * (s64)rs;
rt = regs->hi;
rs = regs->lo;
res += ((((s64)rt) << 32) | (u32)rs);
rt = res;
regs->lo = (s64)rt;
rs = res >> 32;
regs->hi = (s64)rs;
MIPS_R2_STATS(dsps);
return 0;
}
/**
* maddu_func - Emulate a MADDU instruction
* @regs: Process register set
* @ir: Instruction
*
* Returns 0 since it always succeeds.
*/
static int maddu_func(struct pt_regs *regs, u32 ir)
{
u64 res;
u32 rt, rs;
rt = regs->regs[MIPSInst_RT(ir)];
rs = regs->regs[MIPSInst_RS(ir)];
res = (u64)rt * (u64)rs;
rt = regs->hi;
rs = regs->lo;
res += ((((s64)rt) << 32) | (u32)rs);
rt = res;
regs->lo = (s64)(s32)rt;
rs = res >> 32;
regs->hi = (s64)(s32)rs;
MIPS_R2_STATS(dsps);
return 0;
}
/**
* msub_func - Emulate a MSUB instruction
* @regs: Process register set
* @ir: Instruction
*
* Returns 0 since it always succeeds.
*/
static int msub_func(struct pt_regs *regs, u32 ir)
{
s64 res;
s32 rt, rs;
rt = regs->regs[MIPSInst_RT(ir)];
rs = regs->regs[MIPSInst_RS(ir)];
res = (s64)rt * (s64)rs;
rt = regs->hi;
rs = regs->lo;
res = ((((s64)rt) << 32) | (u32)rs) - res;
rt = res;
regs->lo = (s64)rt;
rs = res >> 32;
regs->hi = (s64)rs;
MIPS_R2_STATS(dsps);
return 0;
}
/**
* msubu_func - Emulate a MSUBU instruction
* @regs: Process register set
* @ir: Instruction
*
* Returns 0 since it always succeeds.
*/
static int msubu_func(struct pt_regs *regs, u32 ir)
{
u64 res;
u32 rt, rs;
rt = regs->regs[MIPSInst_RT(ir)];
rs = regs->regs[MIPSInst_RS(ir)];
res = (u64)rt * (u64)rs;
rt = regs->hi;
rs = regs->lo;
res = ((((s64)rt) << 32) | (u32)rs) - res;
rt = res;
regs->lo = (s64)(s32)rt;
rs = res >> 32;
regs->hi = (s64)(s32)rs;
MIPS_R2_STATS(dsps);
return 0;
}
/**
* mul_func - Emulate a MUL instruction
* @regs: Process register set
* @ir: Instruction
*
* Returns 0 since it always succeeds.
*/
static int mul_func(struct pt_regs *regs, u32 ir)
{
s64 res;
s32 rt, rs;
if (!MIPSInst_RD(ir))
return 0;
rt = regs->regs[MIPSInst_RT(ir)];
rs = regs->regs[MIPSInst_RS(ir)];
res = (s64)rt * (s64)rs;
rs = res;
regs->regs[MIPSInst_RD(ir)] = (s64)rs;
MIPS_R2_STATS(muls);
return 0;
}
/**
* clz_func - Emulate a CLZ instruction
* @regs: Process register set
* @ir: Instruction
*
* Returns 0 since it always succeeds.
*/
static int clz_func(struct pt_regs *regs, u32 ir)
{
u32 res;
u32 rs;
if (!MIPSInst_RD(ir))
return 0;
rs = regs->regs[MIPSInst_RS(ir)];
__asm__ __volatile__("clz %0, %1" : "=r"(res) : "r"(rs));
regs->regs[MIPSInst_RD(ir)] = res;
MIPS_R2_STATS(bops);
return 0;
}
/**
* clo_func - Emulate a CLO instruction
* @regs: Process register set
* @ir: Instruction
*
* Returns 0 since it always succeeds.
*/
static int clo_func(struct pt_regs *regs, u32 ir)
{
u32 res;
u32 rs;
if (!MIPSInst_RD(ir))
return 0;
rs = regs->regs[MIPSInst_RS(ir)];
__asm__ __volatile__("clo %0, %1" : "=r"(res) : "r"(rs));
regs->regs[MIPSInst_RD(ir)] = res;
MIPS_R2_STATS(bops);
return 0;
}
/**
* dclz_func - Emulate a DCLZ instruction
* @regs: Process register set
* @ir: Instruction
*
* Returns 0 since it always succeeds.
*/
static int dclz_func(struct pt_regs *regs, u32 ir)
{
u64 res;
u64 rs;
if (IS_ENABLED(CONFIG_32BIT))
return SIGILL;
if (!MIPSInst_RD(ir))
return 0;
rs = regs->regs[MIPSInst_RS(ir)];
__asm__ __volatile__("dclz %0, %1" : "=r"(res) : "r"(rs));
regs->regs[MIPSInst_RD(ir)] = res;
MIPS_R2_STATS(bops);
return 0;
}
/**
* dclo_func - Emulate a DCLO instruction
* @regs: Process register set
* @ir: Instruction
*
* Returns 0 since it always succeeds.
*/
static int dclo_func(struct pt_regs *regs, u32 ir)
{
u64 res;
u64 rs;
if (IS_ENABLED(CONFIG_32BIT))
return SIGILL;
if (!MIPSInst_RD(ir))
return 0;
rs = regs->regs[MIPSInst_RS(ir)];
__asm__ __volatile__("dclo %0, %1" : "=r"(res) : "r"(rs));
regs->regs[MIPSInst_RD(ir)] = res;
MIPS_R2_STATS(bops);
return 0;
}
/* R6 removed instructions for the SPECIAL2 opcode */
static const struct r2_decoder_table spec2_op_table[] = {
{ 0xfc00ffff, 0x70000000, madd_func },
{ 0xfc00ffff, 0x70000001, maddu_func },
{ 0xfc0007ff, 0x70000002, mul_func },
{ 0xfc00ffff, 0x70000004, msub_func },
{ 0xfc00ffff, 0x70000005, msubu_func },
{ 0xfc0007ff, 0x70000020, clz_func },
{ 0xfc0007ff, 0x70000021, clo_func },
{ 0xfc0007ff, 0x70000024, dclz_func },
{ 0xfc0007ff, 0x70000025, dclo_func },
{ }
};
static inline int mipsr2_find_op_func(struct pt_regs *regs, u32 inst,
const struct r2_decoder_table *table)
{
const struct r2_decoder_table *p;
int err;
for (p = table; p->func; p++) {
if ((inst & p->mask) == p->code) {
err = (p->func)(regs, inst);
return err;
}
}
return SIGILL;
}
/**
* mipsr2_decoder: Decode and emulate a MIPS R2 instruction
* @regs: Process register set
* @inst: Instruction to decode and emulate
* @fcr31: Floating Point Control and Status Register Cause bits returned
*/
int mipsr2_decoder(struct pt_regs *regs, u32 inst, unsigned long *fcr31)
{
int err = 0;
unsigned long vaddr;
u32 nir;
unsigned long cpc, epc, nepc, r31, res, rs, rt;
void __user *fault_addr = NULL;
int pass = 0;
repeat:
r31 = regs->regs[31];
epc = regs->cp0_epc;
err = compute_return_epc(regs);
if (err < 0) {
BUG();
return SIGEMT;
}
pr_debug("Emulating the 0x%08x R2 instruction @ 0x%08lx (pass=%d))\n",
inst, epc, pass);
switch (MIPSInst_OPCODE(inst)) {
case spec_op:
err = mipsr2_find_op_func(regs, inst, spec_op_table);
if (err < 0) {
/* FPU instruction under JR */
regs->cp0_cause |= CAUSEF_BD;
goto fpu_emul;
}
break;
case spec2_op:
err = mipsr2_find_op_func(regs, inst, spec2_op_table);
break;
case bcond_op:
rt = MIPSInst_RT(inst);
rs = MIPSInst_RS(inst);
switch (rt) {
case tgei_op:
if ((long)regs->regs[rs] >= MIPSInst_SIMM(inst))
do_trap_or_bp(regs, 0, 0, "TGEI");
MIPS_R2_STATS(traps);
break;
case tgeiu_op:
if (regs->regs[rs] >= MIPSInst_UIMM(inst))
do_trap_or_bp(regs, 0, 0, "TGEIU");
MIPS_R2_STATS(traps);
break;
case tlti_op:
if ((long)regs->regs[rs] < MIPSInst_SIMM(inst))
do_trap_or_bp(regs, 0, 0, "TLTI");
MIPS_R2_STATS(traps);
break;
case tltiu_op:
if (regs->regs[rs] < MIPSInst_UIMM(inst))
do_trap_or_bp(regs, 0, 0, "TLTIU");
MIPS_R2_STATS(traps);
break;
case teqi_op:
if (regs->regs[rs] == MIPSInst_SIMM(inst))
do_trap_or_bp(regs, 0, 0, "TEQI");
MIPS_R2_STATS(traps);
break;
case tnei_op:
if (regs->regs[rs] != MIPSInst_SIMM(inst))
do_trap_or_bp(regs, 0, 0, "TNEI");
MIPS_R2_STATS(traps);
break;
case bltzl_op:
case bgezl_op:
case bltzall_op:
case bgezall_op:
if (delay_slot(regs)) {
err = SIGILL;
break;
}
regs->regs[31] = r31;
regs->cp0_epc = epc;
err = __compute_return_epc(regs);
if (err < 0)
return SIGEMT;
if (err != BRANCH_LIKELY_TAKEN)
break;
cpc = regs->cp0_epc;
nepc = epc + 4;
err = __get_user(nir, (u32 __user *)nepc);
if (err) {
err = SIGSEGV;
break;
}
/*
* This will probably be optimized away when
* CONFIG_DEBUG_FS is not enabled
*/
switch (rt) {
case bltzl_op:
MIPS_R2BR_STATS(bltzl);
break;
case bgezl_op:
MIPS_R2BR_STATS(bgezl);
break;
case bltzall_op:
MIPS_R2BR_STATS(bltzall);
break;
case bgezall_op:
MIPS_R2BR_STATS(bgezall);
break;
}
switch (MIPSInst_OPCODE(nir)) {
case cop1_op:
case cop1x_op:
case lwc1_op:
case swc1_op:
regs->cp0_cause |= CAUSEF_BD;
goto fpu_emul;
}
if (nir) {
err = mipsr6_emul(regs, nir);
if (err > 0) {
err = mips_dsemul(regs, nir, epc, cpc);
if (err == SIGILL)
err = SIGEMT;
MIPS_R2_STATS(dsemul);
}
}
break;
case bltzal_op:
case bgezal_op:
if (delay_slot(regs)) {
err = SIGILL;
break;
}
regs->regs[31] = r31;
regs->cp0_epc = epc;
err = __compute_return_epc(regs);
if (err < 0)
return SIGEMT;
cpc = regs->cp0_epc;
nepc = epc + 4;
err = __get_user(nir, (u32 __user *)nepc);
if (err) {
err = SIGSEGV;
break;
}
/*
* This will probably be optimized away when
* CONFIG_DEBUG_FS is not enabled
*/
switch (rt) {
case bltzal_op:
MIPS_R2BR_STATS(bltzal);
break;
case bgezal_op:
MIPS_R2BR_STATS(bgezal);
break;
}
switch (MIPSInst_OPCODE(nir)) {
case cop1_op:
case cop1x_op:
case lwc1_op:
case swc1_op:
regs->cp0_cause |= CAUSEF_BD;
goto fpu_emul;
}
if (nir) {
err = mipsr6_emul(regs, nir);
if (err > 0) {
err = mips_dsemul(regs, nir, epc, cpc);
if (err == SIGILL)
err = SIGEMT;
MIPS_R2_STATS(dsemul);
}
}
break;
default:
regs->regs[31] = r31;
regs->cp0_epc = epc;
err = SIGILL;
break;
}
break;
case blezl_op:
case bgtzl_op:
/*
* For BLEZL and BGTZL, rt field must be set to 0. If this
* is not the case, this may be an encoding of a MIPS R6
* instruction, so return to CPU execution if this occurs
*/
if (MIPSInst_RT(inst)) {
err = SIGILL;
break;
}
fallthrough;
case beql_op:
case bnel_op:
if (delay_slot(regs)) {
err = SIGILL;
break;
}
regs->regs[31] = r31;
regs->cp0_epc = epc;
err = __compute_return_epc(regs);
if (err < 0)
return SIGEMT;
if (err != BRANCH_LIKELY_TAKEN)
break;
cpc = regs->cp0_epc;
nepc = epc + 4;
err = __get_user(nir, (u32 __user *)nepc);
if (err) {
err = SIGSEGV;
break;
}
/*
* This will probably be optimized away when
* CONFIG_DEBUG_FS is not enabled
*/
switch (MIPSInst_OPCODE(inst)) {
case beql_op:
MIPS_R2BR_STATS(beql);
break;
case bnel_op:
MIPS_R2BR_STATS(bnel);
break;
case blezl_op:
MIPS_R2BR_STATS(blezl);
break;
case bgtzl_op:
MIPS_R2BR_STATS(bgtzl);
break;
}
switch (MIPSInst_OPCODE(nir)) {
case cop1_op:
case cop1x_op:
case lwc1_op:
case swc1_op:
regs->cp0_cause |= CAUSEF_BD;
goto fpu_emul;
}
if (nir) {
err = mipsr6_emul(regs, nir);
if (err > 0) {
err = mips_dsemul(regs, nir, epc, cpc);
if (err == SIGILL)
err = SIGEMT;
MIPS_R2_STATS(dsemul);
}
}
break;
case lwc1_op:
case swc1_op:
case cop1_op:
case cop1x_op:
fpu_emul:
regs->regs[31] = r31;
regs->cp0_epc = epc;
err = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 0,
&fault_addr);
/*
* We can't allow the emulated instruction to leave any
* enabled Cause bits set in $fcr31.
*/
*fcr31 = res = mask_fcr31_x(current->thread.fpu.fcr31);
current->thread.fpu.fcr31 &= ~res;
/*
* this is a tricky issue - lose_fpu() uses LL/SC atomics
* if FPU is owned and effectively cancels user level LL/SC.
* So, it could be logical to don't restore FPU ownership here.
* But the sequence of multiple FPU instructions is much much
* more often than LL-FPU-SC and I prefer loop here until
* next scheduler cycle cancels FPU ownership
*/
own_fpu(1); /* Restore FPU state. */
if (err)
current->thread.cp0_baduaddr = (unsigned long)fault_addr;
MIPS_R2_STATS(fpus);
break;
case lwl_op:
rt = regs->regs[MIPSInst_RT(inst)];
vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
if (!access_ok((void __user *)vaddr, 4)) {
current->thread.cp0_baduaddr = vaddr;
err = SIGSEGV;
break;
}
__asm__ __volatile__(
" .set push\n"
" .set reorder\n"
#ifdef CONFIG_CPU_LITTLE_ENDIAN
"1:" LB "%1, 0(%2)\n"
INS "%0, %1, 24, 8\n"
" andi %1, %2, 0x3\n"
" beq $0, %1, 9f\n"
ADDIU "%2, %2, -1\n"
"2:" LB "%1, 0(%2)\n"
INS "%0, %1, 16, 8\n"
" andi %1, %2, 0x3\n"
" beq $0, %1, 9f\n"
ADDIU "%2, %2, -1\n"
"3:" LB "%1, 0(%2)\n"
INS "%0, %1, 8, 8\n"
" andi %1, %2, 0x3\n"
" beq $0, %1, 9f\n"
ADDIU "%2, %2, -1\n"
"4:" LB "%1, 0(%2)\n"
INS "%0, %1, 0, 8\n"
#else /* !CONFIG_CPU_LITTLE_ENDIAN */
"1:" LB "%1, 0(%2)\n"
INS "%0, %1, 24, 8\n"
ADDIU "%2, %2, 1\n"
" andi %1, %2, 0x3\n"
" beq $0, %1, 9f\n"
"2:" LB "%1, 0(%2)\n"
INS "%0, %1, 16, 8\n"
ADDIU "%2, %2, 1\n"
" andi %1, %2, 0x3\n"
" beq $0, %1, 9f\n"
"3:" LB "%1, 0(%2)\n"
INS "%0, %1, 8, 8\n"
ADDIU "%2, %2, 1\n"
" andi %1, %2, 0x3\n"
" beq $0, %1, 9f\n"
"4:" LB "%1, 0(%2)\n"
INS "%0, %1, 0, 8\n"
#endif /* CONFIG_CPU_LITTLE_ENDIAN */
"9: sll %0, %0, 0\n"
"10:\n"
" .insn\n"
" .section .fixup,\"ax\"\n"
"8: li %3,%4\n"
" j 10b\n"
" .previous\n"
" .section __ex_table,\"a\"\n"
STR(PTR_WD) " 1b,8b\n"
STR(PTR_WD) " 2b,8b\n"
STR(PTR_WD) " 3b,8b\n"
STR(PTR_WD) " 4b,8b\n"
" .previous\n"
" .set pop\n"
: "+&r"(rt), "=&r"(rs),
"+&r"(vaddr), "+&r"(err)
: "i"(SIGSEGV));
if (MIPSInst_RT(inst) && !err)
regs->regs[MIPSInst_RT(inst)] = rt;
MIPS_R2_STATS(loads);
break;
case lwr_op:
rt = regs->regs[MIPSInst_RT(inst)];
vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
if (!access_ok((void __user *)vaddr, 4)) {
current->thread.cp0_baduaddr = vaddr;
err = SIGSEGV;
break;
}
__asm__ __volatile__(
" .set push\n"
" .set reorder\n"
#ifdef CONFIG_CPU_LITTLE_ENDIAN
"1:" LB "%1, 0(%2)\n"
INS "%0, %1, 0, 8\n"
ADDIU "%2, %2, 1\n"
" andi %1, %2, 0x3\n"
" beq $0, %1, 9f\n"
"2:" LB "%1, 0(%2)\n"
INS "%0, %1, 8, 8\n"
ADDIU "%2, %2, 1\n"
" andi %1, %2, 0x3\n"
" beq $0, %1, 9f\n"
"3:" LB "%1, 0(%2)\n"
INS "%0, %1, 16, 8\n"
ADDIU "%2, %2, 1\n"
" andi %1, %2, 0x3\n"
" beq $0, %1, 9f\n"
"4:" LB "%1, 0(%2)\n"
INS "%0, %1, 24, 8\n"
" sll %0, %0, 0\n"
#else /* !CONFIG_CPU_LITTLE_ENDIAN */
"1:" LB "%1, 0(%2)\n"
INS "%0, %1, 0, 8\n"
" andi %1, %2, 0x3\n"
" beq $0, %1, 9f\n"
ADDIU "%2, %2, -1\n"
"2:" LB "%1, 0(%2)\n"
INS "%0, %1, 8, 8\n"
" andi %1, %2, 0x3\n"
" beq $0, %1, 9f\n"
ADDIU "%2, %2, -1\n"
"3:" LB "%1, 0(%2)\n"
INS "%0, %1, 16, 8\n"
" andi %1, %2, 0x3\n"
" beq $0, %1, 9f\n"
ADDIU "%2, %2, -1\n"
"4:" LB "%1, 0(%2)\n"
INS "%0, %1, 24, 8\n"
" sll %0, %0, 0\n"
#endif /* CONFIG_CPU_LITTLE_ENDIAN */
"9:\n"
"10:\n"
" .insn\n"
" .section .fixup,\"ax\"\n"
"8: li %3,%4\n"
" j 10b\n"
" .previous\n"
" .section __ex_table,\"a\"\n"
STR(PTR_WD) " 1b,8b\n"
STR(PTR_WD) " 2b,8b\n"
STR(PTR_WD) " 3b,8b\n"
STR(PTR_WD) " 4b,8b\n"
" .previous\n"
" .set pop\n"
: "+&r"(rt), "=&r"(rs),
"+&r"(vaddr), "+&r"(err)
: "i"(SIGSEGV));
if (MIPSInst_RT(inst) && !err)
regs->regs[MIPSInst_RT(inst)] = rt;
MIPS_R2_STATS(loads);
break;
case swl_op:
rt = regs->regs[MIPSInst_RT(inst)];
vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
if (!access_ok((void __user *)vaddr, 4)) {
current->thread.cp0_baduaddr = vaddr;
err = SIGSEGV;
break;
}
__asm__ __volatile__(
" .set push\n"
" .set reorder\n"
#ifdef CONFIG_CPU_LITTLE_ENDIAN
EXT "%1, %0, 24, 8\n"
"1:" SB "%1, 0(%2)\n"
" andi %1, %2, 0x3\n"
" beq $0, %1, 9f\n"
ADDIU "%2, %2, -1\n"
EXT "%1, %0, 16, 8\n"
"2:" SB "%1, 0(%2)\n"
" andi %1, %2, 0x3\n"
" beq $0, %1, 9f\n"
ADDIU "%2, %2, -1\n"
EXT "%1, %0, 8, 8\n"
"3:" SB "%1, 0(%2)\n"
" andi %1, %2, 0x3\n"
" beq $0, %1, 9f\n"
ADDIU "%2, %2, -1\n"
EXT "%1, %0, 0, 8\n"
"4:" SB "%1, 0(%2)\n"
#else /* !CONFIG_CPU_LITTLE_ENDIAN */
EXT "%1, %0, 24, 8\n"
"1:" SB "%1, 0(%2)\n"
ADDIU "%2, %2, 1\n"
" andi %1, %2, 0x3\n"
" beq $0, %1, 9f\n"
EXT "%1, %0, 16, 8\n"
"2:" SB "%1, 0(%2)\n"
ADDIU "%2, %2, 1\n"
" andi %1, %2, 0x3\n"
" beq $0, %1, 9f\n"
EXT "%1, %0, 8, 8\n"
"3:" SB "%1, 0(%2)\n"
ADDIU "%2, %2, 1\n"
" andi %1, %2, 0x3\n"
" beq $0, %1, 9f\n"
EXT "%1, %0, 0, 8\n"
"4:" SB "%1, 0(%2)\n"
#endif /* CONFIG_CPU_LITTLE_ENDIAN */
"9:\n"
" .insn\n"
" .section .fixup,\"ax\"\n"
"8: li %3,%4\n"
" j 9b\n"
" .previous\n"
" .section __ex_table,\"a\"\n"
STR(PTR_WD) " 1b,8b\n"
STR(PTR_WD) " 2b,8b\n"
STR(PTR_WD) " 3b,8b\n"
STR(PTR_WD) " 4b,8b\n"
" .previous\n"
" .set pop\n"
: "+&r"(rt), "=&r"(rs),
"+&r"(vaddr), "+&r"(err)
: "i"(SIGSEGV)
: "memory");
MIPS_R2_STATS(stores);
break;
case swr_op:
rt = regs->regs[MIPSInst_RT(inst)];
vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
if (!access_ok((void __user *)vaddr, 4)) {
current->thread.cp0_baduaddr = vaddr;
err = SIGSEGV;
break;
}
__asm__ __volatile__(
" .set push\n"
" .set reorder\n"
#ifdef CONFIG_CPU_LITTLE_ENDIAN
EXT "%1, %0, 0, 8\n"
"1:" SB "%1, 0(%2)\n"
ADDIU "%2, %2, 1\n"
" andi %1, %2, 0x3\n"
" beq $0, %1, 9f\n"
EXT "%1, %0, 8, 8\n"
"2:" SB "%1, 0(%2)\n"
ADDIU "%2, %2, 1\n"
" andi %1, %2, 0x3\n"
" beq $0, %1, 9f\n"
EXT "%1, %0, 16, 8\n"
"3:" SB "%1, 0(%2)\n"
ADDIU "%2, %2, 1\n"
" andi %1, %2, 0x3\n"
" beq $0, %1, 9f\n"
EXT "%1, %0, 24, 8\n"
"4:" SB "%1, 0(%2)\n"
#else /* !CONFIG_CPU_LITTLE_ENDIAN */
EXT "%1, %0, 0, 8\n"
"1:" SB "%1, 0(%2)\n"
" andi %1, %2, 0x3\n"
" beq $0, %1, 9f\n"
ADDIU "%2, %2, -1\n"
EXT "%1, %0, 8, 8\n"
"2:" SB "%1, 0(%2)\n"
" andi %1, %2, 0x3\n"
" beq $0, %1, 9f\n"
ADDIU "%2, %2, -1\n"
EXT "%1, %0, 16, 8\n"
"3:" SB "%1, 0(%2)\n"
" andi %1, %2, 0x3\n"
" beq $0, %1, 9f\n"
ADDIU "%2, %2, -1\n"
EXT "%1, %0, 24, 8\n"
"4:" SB "%1, 0(%2)\n"
#endif /* CONFIG_CPU_LITTLE_ENDIAN */
"9:\n"
" .insn\n"
" .section .fixup,\"ax\"\n"
"8: li %3,%4\n"
" j 9b\n"
" .previous\n"
" .section __ex_table,\"a\"\n"
STR(PTR_WD) " 1b,8b\n"
STR(PTR_WD) " 2b,8b\n"
STR(PTR_WD) " 3b,8b\n"
STR(PTR_WD) " 4b,8b\n"
" .previous\n"
" .set pop\n"
: "+&r"(rt), "=&r"(rs),
"+&r"(vaddr), "+&r"(err)
: "i"(SIGSEGV)
: "memory");
MIPS_R2_STATS(stores);
break;
case ldl_op:
if (IS_ENABLED(CONFIG_32BIT)) {
err = SIGILL;
break;
}
rt = regs->regs[MIPSInst_RT(inst)];
vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
if (!access_ok((void __user *)vaddr, 8)) {
current->thread.cp0_baduaddr = vaddr;
err = SIGSEGV;
break;
}
__asm__ __volatile__(
" .set push\n"
" .set reorder\n"
#ifdef CONFIG_CPU_LITTLE_ENDIAN
"1: lb %1, 0(%2)\n"
" dinsu %0, %1, 56, 8\n"
" andi %1, %2, 0x7\n"
" beq $0, %1, 9f\n"
" daddiu %2, %2, -1\n"
"2: lb %1, 0(%2)\n"
" dinsu %0, %1, 48, 8\n"
" andi %1, %2, 0x7\n"
" beq $0, %1, 9f\n"
" daddiu %2, %2, -1\n"
"3: lb %1, 0(%2)\n"
" dinsu %0, %1, 40, 8\n"
" andi %1, %2, 0x7\n"
" beq $0, %1, 9f\n"
" daddiu %2, %2, -1\n"
"4: lb %1, 0(%2)\n"
" dinsu %0, %1, 32, 8\n"
" andi %1, %2, 0x7\n"
" beq $0, %1, 9f\n"
" daddiu %2, %2, -1\n"
"5: lb %1, 0(%2)\n"
" dins %0, %1, 24, 8\n"
" andi %1, %2, 0x7\n"
" beq $0, %1, 9f\n"
" daddiu %2, %2, -1\n"
"6: lb %1, 0(%2)\n"
" dins %0, %1, 16, 8\n"
" andi %1, %2, 0x7\n"
" beq $0, %1, 9f\n"
" daddiu %2, %2, -1\n"
"7: lb %1, 0(%2)\n"
" dins %0, %1, 8, 8\n"
" andi %1, %2, 0x7\n"
" beq $0, %1, 9f\n"
" daddiu %2, %2, -1\n"
"0: lb %1, 0(%2)\n"
" dins %0, %1, 0, 8\n"
#else /* !CONFIG_CPU_LITTLE_ENDIAN */
"1: lb %1, 0(%2)\n"
" dinsu %0, %1, 56, 8\n"
" daddiu %2, %2, 1\n"
" andi %1, %2, 0x7\n"
" beq $0, %1, 9f\n"
"2: lb %1, 0(%2)\n"
" dinsu %0, %1, 48, 8\n"
" daddiu %2, %2, 1\n"
" andi %1, %2, 0x7\n"
" beq $0, %1, 9f\n"
"3: lb %1, 0(%2)\n"
" dinsu %0, %1, 40, 8\n"
" daddiu %2, %2, 1\n"
" andi %1, %2, 0x7\n"
" beq $0, %1, 9f\n"
"4: lb %1, 0(%2)\n"
" dinsu %0, %1, 32, 8\n"
" daddiu %2, %2, 1\n"
" andi %1, %2, 0x7\n"
" beq $0, %1, 9f\n"
"5: lb %1, 0(%2)\n"
" dins %0, %1, 24, 8\n"
" daddiu %2, %2, 1\n"
" andi %1, %2, 0x7\n"
" beq $0, %1, 9f\n"
"6: lb %1, 0(%2)\n"
" dins %0, %1, 16, 8\n"
" daddiu %2, %2, 1\n"
" andi %1, %2, 0x7\n"
" beq $0, %1, 9f\n"
"7: lb %1, 0(%2)\n"
" dins %0, %1, 8, 8\n"
" daddiu %2, %2, 1\n"
" andi %1, %2, 0x7\n"
" beq $0, %1, 9f\n"
"0: lb %1, 0(%2)\n"
" dins %0, %1, 0, 8\n"
#endif /* CONFIG_CPU_LITTLE_ENDIAN */
"9:\n"
" .insn\n"
" .section .fixup,\"ax\"\n"
"8: li %3,%4\n"
" j 9b\n"
" .previous\n"
" .section __ex_table,\"a\"\n"
STR(PTR_WD) " 1b,8b\n"
STR(PTR_WD) " 2b,8b\n"
STR(PTR_WD) " 3b,8b\n"
STR(PTR_WD) " 4b,8b\n"
STR(PTR_WD) " 5b,8b\n"
STR(PTR_WD) " 6b,8b\n"
STR(PTR_WD) " 7b,8b\n"
STR(PTR_WD) " 0b,8b\n"
" .previous\n"
" .set pop\n"
: "+&r"(rt), "=&r"(rs),
"+&r"(vaddr), "+&r"(err)
: "i"(SIGSEGV));
if (MIPSInst_RT(inst) && !err)
regs->regs[MIPSInst_RT(inst)] = rt;
MIPS_R2_STATS(loads);
break;
case ldr_op:
if (IS_ENABLED(CONFIG_32BIT)) {
err = SIGILL;
break;
}
rt = regs->regs[MIPSInst_RT(inst)];
vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
if (!access_ok((void __user *)vaddr, 8)) {
current->thread.cp0_baduaddr = vaddr;
err = SIGSEGV;
break;
}
__asm__ __volatile__(
" .set push\n"
" .set reorder\n"
#ifdef CONFIG_CPU_LITTLE_ENDIAN
"1: lb %1, 0(%2)\n"
" dins %0, %1, 0, 8\n"
" daddiu %2, %2, 1\n"
" andi %1, %2, 0x7\n"
" beq $0, %1, 9f\n"
"2: lb %1, 0(%2)\n"
" dins %0, %1, 8, 8\n"
" daddiu %2, %2, 1\n"
" andi %1, %2, 0x7\n"
" beq $0, %1, 9f\n"
"3: lb %1, 0(%2)\n"
" dins %0, %1, 16, 8\n"
" daddiu %2, %2, 1\n"
" andi %1, %2, 0x7\n"
" beq $0, %1, 9f\n"
"4: lb %1, 0(%2)\n"
" dins %0, %1, 24, 8\n"
" daddiu %2, %2, 1\n"
" andi %1, %2, 0x7\n"
" beq $0, %1, 9f\n"
"5: lb %1, 0(%2)\n"
" dinsu %0, %1, 32, 8\n"
" daddiu %2, %2, 1\n"
" andi %1, %2, 0x7\n"
" beq $0, %1, 9f\n"
"6: lb %1, 0(%2)\n"
" dinsu %0, %1, 40, 8\n"
" daddiu %2, %2, 1\n"
" andi %1, %2, 0x7\n"
" beq $0, %1, 9f\n"
"7: lb %1, 0(%2)\n"
" dinsu %0, %1, 48, 8\n"
" daddiu %2, %2, 1\n"
" andi %1, %2, 0x7\n"
" beq $0, %1, 9f\n"
"0: lb %1, 0(%2)\n"
" dinsu %0, %1, 56, 8\n"
#else /* !CONFIG_CPU_LITTLE_ENDIAN */
"1: lb %1, 0(%2)\n"
" dins %0, %1, 0, 8\n"
" andi %1, %2, 0x7\n"
" beq $0, %1, 9f\n"
" daddiu %2, %2, -1\n"
"2: lb %1, 0(%2)\n"
" dins %0, %1, 8, 8\n"
" andi %1, %2, 0x7\n"
" beq $0, %1, 9f\n"
" daddiu %2, %2, -1\n"
"3: lb %1, 0(%2)\n"
" dins %0, %1, 16, 8\n"
" andi %1, %2, 0x7\n"
" beq $0, %1, 9f\n"
" daddiu %2, %2, -1\n"
"4: lb %1, 0(%2)\n"
" dins %0, %1, 24, 8\n"
" andi %1, %2, 0x7\n"
" beq $0, %1, 9f\n"
" daddiu %2, %2, -1\n"
"5: lb %1, 0(%2)\n"
" dinsu %0, %1, 32, 8\n"
" andi %1, %2, 0x7\n"
" beq $0, %1, 9f\n"
" daddiu %2, %2, -1\n"
"6: lb %1, 0(%2)\n"
" dinsu %0, %1, 40, 8\n"
" andi %1, %2, 0x7\n"
" beq $0, %1, 9f\n"
" daddiu %2, %2, -1\n"
"7: lb %1, 0(%2)\n"
" dinsu %0, %1, 48, 8\n"
" andi %1, %2, 0x7\n"
" beq $0, %1, 9f\n"
" daddiu %2, %2, -1\n"
"0: lb %1, 0(%2)\n"
" dinsu %0, %1, 56, 8\n"
#endif /* CONFIG_CPU_LITTLE_ENDIAN */
"9:\n"
" .insn\n"
" .section .fixup,\"ax\"\n"
"8: li %3,%4\n"
" j 9b\n"
" .previous\n"
" .section __ex_table,\"a\"\n"
STR(PTR_WD) " 1b,8b\n"
STR(PTR_WD) " 2b,8b\n"
STR(PTR_WD) " 3b,8b\n"
STR(PTR_WD) " 4b,8b\n"
STR(PTR_WD) " 5b,8b\n"
STR(PTR_WD) " 6b,8b\n"
STR(PTR_WD) " 7b,8b\n"
STR(PTR_WD) " 0b,8b\n"
" .previous\n"
" .set pop\n"
: "+&r"(rt), "=&r"(rs),
"+&r"(vaddr), "+&r"(err)
: "i"(SIGSEGV));
if (MIPSInst_RT(inst) && !err)
regs->regs[MIPSInst_RT(inst)] = rt;
MIPS_R2_STATS(loads);
break;
case sdl_op:
if (IS_ENABLED(CONFIG_32BIT)) {
err = SIGILL;
break;
}
rt = regs->regs[MIPSInst_RT(inst)];
vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
if (!access_ok((void __user *)vaddr, 8)) {
current->thread.cp0_baduaddr = vaddr;
err = SIGSEGV;
break;
}
__asm__ __volatile__(
" .set push\n"
" .set reorder\n"
#ifdef CONFIG_CPU_LITTLE_ENDIAN
" dextu %1, %0, 56, 8\n"
"1: sb %1, 0(%2)\n"
" andi %1, %2, 0x7\n"
" beq $0, %1, 9f\n"
" daddiu %2, %2, -1\n"
" dextu %1, %0, 48, 8\n"
"2: sb %1, 0(%2)\n"
" andi %1, %2, 0x7\n"
" beq $0, %1, 9f\n"
" daddiu %2, %2, -1\n"
" dextu %1, %0, 40, 8\n"
"3: sb %1, 0(%2)\n"
" andi %1, %2, 0x7\n"
" beq $0, %1, 9f\n"
" daddiu %2, %2, -1\n"
" dextu %1, %0, 32, 8\n"
"4: sb %1, 0(%2)\n"
" andi %1, %2, 0x7\n"
" beq $0, %1, 9f\n"
" daddiu %2, %2, -1\n"
" dext %1, %0, 24, 8\n"
"5: sb %1, 0(%2)\n"
" andi %1, %2, 0x7\n"
" beq $0, %1, 9f\n"
" daddiu %2, %2, -1\n"
" dext %1, %0, 16, 8\n"
"6: sb %1, 0(%2)\n"
" andi %1, %2, 0x7\n"
" beq $0, %1, 9f\n"
" daddiu %2, %2, -1\n"
" dext %1, %0, 8, 8\n"
"7: sb %1, 0(%2)\n"
" andi %1, %2, 0x7\n"
" beq $0, %1, 9f\n"
" daddiu %2, %2, -1\n"
" dext %1, %0, 0, 8\n"
"0: sb %1, 0(%2)\n"
#else /* !CONFIG_CPU_LITTLE_ENDIAN */
" dextu %1, %0, 56, 8\n"
"1: sb %1, 0(%2)\n"
" daddiu %2, %2, 1\n"
" andi %1, %2, 0x7\n"
" beq $0, %1, 9f\n"
" dextu %1, %0, 48, 8\n"
"2: sb %1, 0(%2)\n"
" daddiu %2, %2, 1\n"
" andi %1, %2, 0x7\n"
" beq $0, %1, 9f\n"
" dextu %1, %0, 40, 8\n"
"3: sb %1, 0(%2)\n"
" daddiu %2, %2, 1\n"
" andi %1, %2, 0x7\n"
" beq $0, %1, 9f\n"
" dextu %1, %0, 32, 8\n"
"4: sb %1, 0(%2)\n"
" daddiu %2, %2, 1\n"
" andi %1, %2, 0x7\n"
" beq $0, %1, 9f\n"
" dext %1, %0, 24, 8\n"
"5: sb %1, 0(%2)\n"
" daddiu %2, %2, 1\n"
" andi %1, %2, 0x7\n"
" beq $0, %1, 9f\n"
" dext %1, %0, 16, 8\n"
"6: sb %1, 0(%2)\n"
" daddiu %2, %2, 1\n"
" andi %1, %2, 0x7\n"
" beq $0, %1, 9f\n"
" dext %1, %0, 8, 8\n"
"7: sb %1, 0(%2)\n"
" daddiu %2, %2, 1\n"
" andi %1, %2, 0x7\n"
" beq $0, %1, 9f\n"
" dext %1, %0, 0, 8\n"
"0: sb %1, 0(%2)\n"
#endif /* CONFIG_CPU_LITTLE_ENDIAN */
"9:\n"
" .insn\n"
" .section .fixup,\"ax\"\n"
"8: li %3,%4\n"
" j 9b\n"
" .previous\n"
" .section __ex_table,\"a\"\n"
STR(PTR_WD) " 1b,8b\n"
STR(PTR_WD) " 2b,8b\n"
STR(PTR_WD) " 3b,8b\n"
STR(PTR_WD) " 4b,8b\n"
STR(PTR_WD) " 5b,8b\n"
STR(PTR_WD) " 6b,8b\n"
STR(PTR_WD) " 7b,8b\n"
STR(PTR_WD) " 0b,8b\n"
" .previous\n"
" .set pop\n"
: "+&r"(rt), "=&r"(rs),
"+&r"(vaddr), "+&r"(err)
: "i"(SIGSEGV)
: "memory");
MIPS_R2_STATS(stores);
break;
case sdr_op:
if (IS_ENABLED(CONFIG_32BIT)) {
err = SIGILL;
break;
}
rt = regs->regs[MIPSInst_RT(inst)];
vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
if (!access_ok((void __user *)vaddr, 8)) {
current->thread.cp0_baduaddr = vaddr;
err = SIGSEGV;
break;
}
__asm__ __volatile__(
" .set push\n"
" .set reorder\n"
#ifdef CONFIG_CPU_LITTLE_ENDIAN
" dext %1, %0, 0, 8\n"
"1: sb %1, 0(%2)\n"
" daddiu %2, %2, 1\n"
" andi %1, %2, 0x7\n"
" beq $0, %1, 9f\n"
" dext %1, %0, 8, 8\n"
"2: sb %1, 0(%2)\n"
" daddiu %2, %2, 1\n"
" andi %1, %2, 0x7\n"
" beq $0, %1, 9f\n"
" dext %1, %0, 16, 8\n"
"3: sb %1, 0(%2)\n"
" daddiu %2, %2, 1\n"
" andi %1, %2, 0x7\n"
" beq $0, %1, 9f\n"
" dext %1, %0, 24, 8\n"
"4: sb %1, 0(%2)\n"
" daddiu %2, %2, 1\n"
" andi %1, %2, 0x7\n"
" beq $0, %1, 9f\n"
" dextu %1, %0, 32, 8\n"
"5: sb %1, 0(%2)\n"
" daddiu %2, %2, 1\n"
" andi %1, %2, 0x7\n"
" beq $0, %1, 9f\n"
" dextu %1, %0, 40, 8\n"
"6: sb %1, 0(%2)\n"
" daddiu %2, %2, 1\n"
" andi %1, %2, 0x7\n"
" beq $0, %1, 9f\n"
" dextu %1, %0, 48, 8\n"
"7: sb %1, 0(%2)\n"
" daddiu %2, %2, 1\n"
" andi %1, %2, 0x7\n"
" beq $0, %1, 9f\n"
" dextu %1, %0, 56, 8\n"
"0: sb %1, 0(%2)\n"
#else /* !CONFIG_CPU_LITTLE_ENDIAN */
" dext %1, %0, 0, 8\n"
"1: sb %1, 0(%2)\n"
" andi %1, %2, 0x7\n"
" beq $0, %1, 9f\n"
" daddiu %2, %2, -1\n"
" dext %1, %0, 8, 8\n"
"2: sb %1, 0(%2)\n"
" andi %1, %2, 0x7\n"
" beq $0, %1, 9f\n"
" daddiu %2, %2, -1\n"
" dext %1, %0, 16, 8\n"
"3: sb %1, 0(%2)\n"
" andi %1, %2, 0x7\n"
" beq $0, %1, 9f\n"
" daddiu %2, %2, -1\n"
" dext %1, %0, 24, 8\n"
"4: sb %1, 0(%2)\n"
" andi %1, %2, 0x7\n"
" beq $0, %1, 9f\n"
" daddiu %2, %2, -1\n"
" dextu %1, %0, 32, 8\n"
"5: sb %1, 0(%2)\n"
" andi %1, %2, 0x7\n"
" beq $0, %1, 9f\n"
" daddiu %2, %2, -1\n"
" dextu %1, %0, 40, 8\n"
"6: sb %1, 0(%2)\n"
" andi %1, %2, 0x7\n"
" beq $0, %1, 9f\n"
" daddiu %2, %2, -1\n"
" dextu %1, %0, 48, 8\n"
"7: sb %1, 0(%2)\n"
" andi %1, %2, 0x7\n"
" beq $0, %1, 9f\n"
" daddiu %2, %2, -1\n"
" dextu %1, %0, 56, 8\n"
"0: sb %1, 0(%2)\n"
#endif /* CONFIG_CPU_LITTLE_ENDIAN */
"9:\n"
" .insn\n"
" .section .fixup,\"ax\"\n"
"8: li %3,%4\n"
" j 9b\n"
" .previous\n"
" .section __ex_table,\"a\"\n"
STR(PTR_WD) " 1b,8b\n"
STR(PTR_WD) " 2b,8b\n"
STR(PTR_WD) " 3b,8b\n"
STR(PTR_WD) " 4b,8b\n"
STR(PTR_WD) " 5b,8b\n"
STR(PTR_WD) " 6b,8b\n"
STR(PTR_WD) " 7b,8b\n"
STR(PTR_WD) " 0b,8b\n"
" .previous\n"
" .set pop\n"
: "+&r"(rt), "=&r"(rs),
"+&r"(vaddr), "+&r"(err)
: "i"(SIGSEGV)
: "memory");
MIPS_R2_STATS(stores);
break;
case ll_op:
vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
if (vaddr & 0x3) {
current->thread.cp0_baduaddr = vaddr;
err = SIGBUS;
break;
}
if (!access_ok((void __user *)vaddr, 4)) {
current->thread.cp0_baduaddr = vaddr;
err = SIGBUS;
break;
}
if (!cpu_has_rw_llb) {
/*
* An LL/SC block can't be safely emulated without
* a Config5/LLB availability. So it's probably time to
* kill our process before things get any worse. This is
* because Config5/LLB allows us to use ERETNC so that
* the LLAddr/LLB bit is not cleared when we return from
* an exception. MIPS R2 LL/SC instructions trap with an
* RI exception so once we emulate them here, we return
* back to userland with ERETNC. That preserves the
* LLAddr/LLB so the subsequent SC instruction will
* succeed preserving the atomic semantics of the LL/SC
* block. Without that, there is no safe way to emulate
* an LL/SC block in MIPSR2 userland.
*/
pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n");
err = SIGKILL;
break;
}
__asm__ __volatile__(
"1:\n"
"ll %0, 0(%2)\n"
"2:\n"
".insn\n"
".section .fixup,\"ax\"\n"
"3:\n"
"li %1, %3\n"
"j 2b\n"
".previous\n"
".section __ex_table,\"a\"\n"
STR(PTR_WD) " 1b,3b\n"
".previous\n"
: "=&r"(res), "+&r"(err)
: "r"(vaddr), "i"(SIGSEGV)
: "memory");
if (MIPSInst_RT(inst) && !err)
regs->regs[MIPSInst_RT(inst)] = res;
MIPS_R2_STATS(llsc);
break;
case sc_op:
vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
if (vaddr & 0x3) {
current->thread.cp0_baduaddr = vaddr;
err = SIGBUS;
break;
}
if (!access_ok((void __user *)vaddr, 4)) {
current->thread.cp0_baduaddr = vaddr;
err = SIGBUS;
break;
}
if (!cpu_has_rw_llb) {
/*
* An LL/SC block can't be safely emulated without
* a Config5/LLB availability. So it's probably time to
* kill our process before things get any worse. This is
* because Config5/LLB allows us to use ERETNC so that
* the LLAddr/LLB bit is not cleared when we return from
* an exception. MIPS R2 LL/SC instructions trap with an
* RI exception so once we emulate them here, we return
* back to userland with ERETNC. That preserves the
* LLAddr/LLB so the subsequent SC instruction will
* succeed preserving the atomic semantics of the LL/SC
* block. Without that, there is no safe way to emulate
* an LL/SC block in MIPSR2 userland.
*/
pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n");
err = SIGKILL;
break;
}
res = regs->regs[MIPSInst_RT(inst)];
__asm__ __volatile__(
"1:\n"
"sc %0, 0(%2)\n"
"2:\n"
".insn\n"
".section .fixup,\"ax\"\n"
"3:\n"
"li %1, %3\n"
"j 2b\n"
".previous\n"
".section __ex_table,\"a\"\n"
STR(PTR_WD) " 1b,3b\n"
".previous\n"
: "+&r"(res), "+&r"(err)
: "r"(vaddr), "i"(SIGSEGV));
if (MIPSInst_RT(inst) && !err)
regs->regs[MIPSInst_RT(inst)] = res;
MIPS_R2_STATS(llsc);
break;
case lld_op:
if (IS_ENABLED(CONFIG_32BIT)) {
err = SIGILL;
break;
}
vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
if (vaddr & 0x7) {
current->thread.cp0_baduaddr = vaddr;
err = SIGBUS;
break;
}
if (!access_ok((void __user *)vaddr, 8)) {
current->thread.cp0_baduaddr = vaddr;
err = SIGBUS;
break;
}
if (!cpu_has_rw_llb) {
/*
* An LL/SC block can't be safely emulated without
* a Config5/LLB availability. So it's probably time to
* kill our process before things get any worse. This is
* because Config5/LLB allows us to use ERETNC so that
* the LLAddr/LLB bit is not cleared when we return from
* an exception. MIPS R2 LL/SC instructions trap with an
* RI exception so once we emulate them here, we return
* back to userland with ERETNC. That preserves the
* LLAddr/LLB so the subsequent SC instruction will
* succeed preserving the atomic semantics of the LL/SC
* block. Without that, there is no safe way to emulate
* an LL/SC block in MIPSR2 userland.
*/
pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n");
err = SIGKILL;
break;
}
__asm__ __volatile__(
"1:\n"
"lld %0, 0(%2)\n"
"2:\n"
".insn\n"
".section .fixup,\"ax\"\n"
"3:\n"
"li %1, %3\n"
"j 2b\n"
".previous\n"
".section __ex_table,\"a\"\n"
STR(PTR_WD) " 1b,3b\n"
".previous\n"
: "=&r"(res), "+&r"(err)
: "r"(vaddr), "i"(SIGSEGV)
: "memory");
if (MIPSInst_RT(inst) && !err)
regs->regs[MIPSInst_RT(inst)] = res;
MIPS_R2_STATS(llsc);
break;
case scd_op:
if (IS_ENABLED(CONFIG_32BIT)) {
err = SIGILL;
break;
}
vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
if (vaddr & 0x7) {
current->thread.cp0_baduaddr = vaddr;
err = SIGBUS;
break;
}
if (!access_ok((void __user *)vaddr, 8)) {
current->thread.cp0_baduaddr = vaddr;
err = SIGBUS;
break;
}
if (!cpu_has_rw_llb) {
/*
* An LL/SC block can't be safely emulated without
* a Config5/LLB availability. So it's probably time to
* kill our process before things get any worse. This is
* because Config5/LLB allows us to use ERETNC so that
* the LLAddr/LLB bit is not cleared when we return from
* an exception. MIPS R2 LL/SC instructions trap with an
* RI exception so once we emulate them here, we return
* back to userland with ERETNC. That preserves the
* LLAddr/LLB so the subsequent SC instruction will
* succeed preserving the atomic semantics of the LL/SC
* block. Without that, there is no safe way to emulate
* an LL/SC block in MIPSR2 userland.
*/
pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n");
err = SIGKILL;
break;
}
res = regs->regs[MIPSInst_RT(inst)];
__asm__ __volatile__(
"1:\n"
"scd %0, 0(%2)\n"
"2:\n"
".insn\n"
".section .fixup,\"ax\"\n"
"3:\n"
"li %1, %3\n"
"j 2b\n"
".previous\n"
".section __ex_table,\"a\"\n"
STR(PTR_WD) " 1b,3b\n"
".previous\n"
: "+&r"(res), "+&r"(err)
: "r"(vaddr), "i"(SIGSEGV));
if (MIPSInst_RT(inst) && !err)
regs->regs[MIPSInst_RT(inst)] = res;
MIPS_R2_STATS(llsc);
break;
case pref_op:
/* skip it */
break;
default:
err = SIGILL;
}
/*
* Let's not return to userland just yet. It's costly and
* it's likely we have more R2 instructions to emulate
*/
if (!err && (pass++ < MIPS_R2_EMUL_TOTAL_PASS)) {
regs->cp0_cause &= ~CAUSEF_BD;
err = get_user(inst, (u32 __user *)regs->cp0_epc);
if (!err)
goto repeat;
if (err < 0)
err = SIGSEGV;
}
if (err && (err != SIGEMT)) {
regs->regs[31] = r31;
regs->cp0_epc = epc;
}
/* Likely a MIPS R6 compatible instruction */
if (pass && (err == SIGILL))
err = 0;
return err;
}
#ifdef CONFIG_DEBUG_FS
static int mipsr2_emul_show(struct seq_file *s, void *unused)
{
seq_printf(s, "Instruction\tTotal\tBDslot\n------------------------------\n");
seq_printf(s, "movs\t\t%ld\t%ld\n",
(unsigned long)__this_cpu_read(mipsr2emustats.movs),
(unsigned long)__this_cpu_read(mipsr2bdemustats.movs));
seq_printf(s, "hilo\t\t%ld\t%ld\n",
(unsigned long)__this_cpu_read(mipsr2emustats.hilo),
(unsigned long)__this_cpu_read(mipsr2bdemustats.hilo));
seq_printf(s, "muls\t\t%ld\t%ld\n",
(unsigned long)__this_cpu_read(mipsr2emustats.muls),
(unsigned long)__this_cpu_read(mipsr2bdemustats.muls));
seq_printf(s, "divs\t\t%ld\t%ld\n",
(unsigned long)__this_cpu_read(mipsr2emustats.divs),
(unsigned long)__this_cpu_read(mipsr2bdemustats.divs));
seq_printf(s, "dsps\t\t%ld\t%ld\n",
(unsigned long)__this_cpu_read(mipsr2emustats.dsps),
(unsigned long)__this_cpu_read(mipsr2bdemustats.dsps));
seq_printf(s, "bops\t\t%ld\t%ld\n",
(unsigned long)__this_cpu_read(mipsr2emustats.bops),
(unsigned long)__this_cpu_read(mipsr2bdemustats.bops));
seq_printf(s, "traps\t\t%ld\t%ld\n",
(unsigned long)__this_cpu_read(mipsr2emustats.traps),
(unsigned long)__this_cpu_read(mipsr2bdemustats.traps));
seq_printf(s, "fpus\t\t%ld\t%ld\n",
(unsigned long)__this_cpu_read(mipsr2emustats.fpus),
(unsigned long)__this_cpu_read(mipsr2bdemustats.fpus));
seq_printf(s, "loads\t\t%ld\t%ld\n",
(unsigned long)__this_cpu_read(mipsr2emustats.loads),
(unsigned long)__this_cpu_read(mipsr2bdemustats.loads));
seq_printf(s, "stores\t\t%ld\t%ld\n",
(unsigned long)__this_cpu_read(mipsr2emustats.stores),
(unsigned long)__this_cpu_read(mipsr2bdemustats.stores));
seq_printf(s, "llsc\t\t%ld\t%ld\n",
(unsigned long)__this_cpu_read(mipsr2emustats.llsc),
(unsigned long)__this_cpu_read(mipsr2bdemustats.llsc));
seq_printf(s, "dsemul\t\t%ld\t%ld\n",
(unsigned long)__this_cpu_read(mipsr2emustats.dsemul),
(unsigned long)__this_cpu_read(mipsr2bdemustats.dsemul));
seq_printf(s, "jr\t\t%ld\n",
(unsigned long)__this_cpu_read(mipsr2bremustats.jrs));
seq_printf(s, "bltzl\t\t%ld\n",
(unsigned long)__this_cpu_read(mipsr2bremustats.bltzl));
seq_printf(s, "bgezl\t\t%ld\n",
(unsigned long)__this_cpu_read(mipsr2bremustats.bgezl));
seq_printf(s, "bltzll\t\t%ld\n",
(unsigned long)__this_cpu_read(mipsr2bremustats.bltzll));
seq_printf(s, "bgezll\t\t%ld\n",
(unsigned long)__this_cpu_read(mipsr2bremustats.bgezll));
seq_printf(s, "bltzal\t\t%ld\n",
(unsigned long)__this_cpu_read(mipsr2bremustats.bltzal));
seq_printf(s, "bgezal\t\t%ld\n",
(unsigned long)__this_cpu_read(mipsr2bremustats.bgezal));
seq_printf(s, "beql\t\t%ld\n",
(unsigned long)__this_cpu_read(mipsr2bremustats.beql));
seq_printf(s, "bnel\t\t%ld\n",
(unsigned long)__this_cpu_read(mipsr2bremustats.bnel));
seq_printf(s, "blezl\t\t%ld\n",
(unsigned long)__this_cpu_read(mipsr2bremustats.blezl));
seq_printf(s, "bgtzl\t\t%ld\n",
(unsigned long)__this_cpu_read(mipsr2bremustats.bgtzl));
return 0;
}
static int mipsr2_clear_show(struct seq_file *s, void *unused)
{
mipsr2_emul_show(s, unused);
__this_cpu_write((mipsr2emustats).movs, 0);
__this_cpu_write((mipsr2bdemustats).movs, 0);
__this_cpu_write((mipsr2emustats).hilo, 0);
__this_cpu_write((mipsr2bdemustats).hilo, 0);
__this_cpu_write((mipsr2emustats).muls, 0);
__this_cpu_write((mipsr2bdemustats).muls, 0);
__this_cpu_write((mipsr2emustats).divs, 0);
__this_cpu_write((mipsr2bdemustats).divs, 0);
__this_cpu_write((mipsr2emustats).dsps, 0);
__this_cpu_write((mipsr2bdemustats).dsps, 0);
__this_cpu_write((mipsr2emustats).bops, 0);
__this_cpu_write((mipsr2bdemustats).bops, 0);
__this_cpu_write((mipsr2emustats).traps, 0);
__this_cpu_write((mipsr2bdemustats).traps, 0);
__this_cpu_write((mipsr2emustats).fpus, 0);
__this_cpu_write((mipsr2bdemustats).fpus, 0);
__this_cpu_write((mipsr2emustats).loads, 0);
__this_cpu_write((mipsr2bdemustats).loads, 0);
__this_cpu_write((mipsr2emustats).stores, 0);
__this_cpu_write((mipsr2bdemustats).stores, 0);
__this_cpu_write((mipsr2emustats).llsc, 0);
__this_cpu_write((mipsr2bdemustats).llsc, 0);
__this_cpu_write((mipsr2emustats).dsemul, 0);
__this_cpu_write((mipsr2bdemustats).dsemul, 0);
__this_cpu_write((mipsr2bremustats).jrs, 0);
__this_cpu_write((mipsr2bremustats).bltzl, 0);
__this_cpu_write((mipsr2bremustats).bgezl, 0);
__this_cpu_write((mipsr2bremustats).bltzll, 0);
__this_cpu_write((mipsr2bremustats).bgezll, 0);
__this_cpu_write((mipsr2bremustats).bltzall, 0);
__this_cpu_write((mipsr2bremustats).bgezall, 0);
__this_cpu_write((mipsr2bremustats).bltzal, 0);
__this_cpu_write((mipsr2bremustats).bgezal, 0);
__this_cpu_write((mipsr2bremustats).beql, 0);
__this_cpu_write((mipsr2bremustats).bnel, 0);
__this_cpu_write((mipsr2bremustats).blezl, 0);
__this_cpu_write((mipsr2bremustats).bgtzl, 0);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(mipsr2_emul);
DEFINE_SHOW_ATTRIBUTE(mipsr2_clear);
static int __init mipsr2_init_debugfs(void)
{
debugfs_create_file("r2_emul_stats", S_IRUGO, mips_debugfs_dir, NULL,
&mipsr2_emul_fops);
debugfs_create_file("r2_emul_stats_clear", S_IRUGO, mips_debugfs_dir,
NULL, &mipsr2_clear_fops);
return 0;
}
device_initcall(mipsr2_init_debugfs);
#endif /* CONFIG_DEBUG_FS */
| linux-master | arch/mips/kernel/mips-r2-to-r6-emul.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Code to handle x86 style IRQs plus some generic interrupt stuff.
*
* Copyright (C) 1992 Linus Torvalds
* Copyright (C) 1994 - 2000 Ralf Baechle
*/
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/proc_fs.h>
#include <linux/mm.h>
#include <linux/random.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/kallsyms.h>
#include <linux/kgdb.h>
#include <linux/ftrace.h>
#include <linux/irqdomain.h>
#include <linux/atomic.h>
#include <linux/uaccess.h>
void *irq_stack[NR_CPUS];
/*
* 'what should we do if we get a hw irq event on an illegal vector'.
* each architecture has to answer this themselves.
*/
void ack_bad_irq(unsigned int irq)
{
printk("unexpected IRQ # %d\n", irq);
}
atomic_t irq_err_count;
int arch_show_interrupts(struct seq_file *p, int prec)
{
seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
return 0;
}
asmlinkage void spurious_interrupt(void)
{
atomic_inc(&irq_err_count);
}
void __init init_IRQ(void)
{
int i;
unsigned int order = get_order(IRQ_STACK_SIZE);
for (i = 0; i < NR_IRQS; i++)
irq_set_noprobe(i);
if (cpu_has_veic)
clear_c0_status(ST0_IM);
arch_init_irq();
for_each_possible_cpu(i) {
void *s = (void *)__get_free_pages(GFP_KERNEL, order);
irq_stack[i] = s;
pr_debug("CPU%d IRQ stack at 0x%p - 0x%p\n", i,
irq_stack[i], irq_stack[i] + IRQ_STACK_SIZE);
}
}
#ifdef CONFIG_DEBUG_STACKOVERFLOW
static inline void check_stack_overflow(void)
{
unsigned long sp;
__asm__ __volatile__("move %0, $sp" : "=r" (sp));
sp &= THREAD_MASK;
/*
* Check for stack overflow: is there less than STACK_WARN free?
* STACK_WARN is defined as 1/8 of THREAD_SIZE by default.
*/
if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
printk("do_IRQ: stack overflow: %ld\n",
sp - sizeof(struct thread_info));
dump_stack();
}
}
#else
static inline void check_stack_overflow(void) {}
#endif
/*
* do_IRQ handles all normal device IRQ's (the special
* SMP cross-CPU interrupts have their own specific
* handlers).
*/
void __irq_entry do_IRQ(unsigned int irq)
{
irq_enter();
check_stack_overflow();
generic_handle_irq(irq);
irq_exit();
}
#ifdef CONFIG_IRQ_DOMAIN
void __irq_entry do_domain_IRQ(struct irq_domain *domain, unsigned int hwirq)
{
irq_enter();
check_stack_overflow();
generic_handle_domain_irq(domain, hwirq);
irq_exit();
}
#endif
| linux-master | arch/mips/kernel/irq.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* DS1287 clockevent driver
*
* Copyright (C) 2008 Yoichi Yuasa <[email protected]>
*/
#include <linux/clockchips.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/mc146818rtc.h>
#include <linux/irq.h>
#include <asm/time.h>
int ds1287_timer_state(void)
{
return (CMOS_READ(RTC_REG_C) & RTC_PF) != 0;
}
int ds1287_set_base_clock(unsigned int hz)
{
u8 rate;
switch (hz) {
case 128:
rate = 0x9;
break;
case 256:
rate = 0x8;
break;
case 1024:
rate = 0x6;
break;
default:
return -EINVAL;
}
CMOS_WRITE(RTC_REF_CLCK_32KHZ | rate, RTC_REG_A);
return 0;
}
static int ds1287_set_next_event(unsigned long delta,
struct clock_event_device *evt)
{
return -EINVAL;
}
static int ds1287_shutdown(struct clock_event_device *evt)
{
u8 val;
spin_lock(&rtc_lock);
val = CMOS_READ(RTC_REG_B);
val &= ~RTC_PIE;
CMOS_WRITE(val, RTC_REG_B);
spin_unlock(&rtc_lock);
return 0;
}
static int ds1287_set_periodic(struct clock_event_device *evt)
{
u8 val;
spin_lock(&rtc_lock);
val = CMOS_READ(RTC_REG_B);
val |= RTC_PIE;
CMOS_WRITE(val, RTC_REG_B);
spin_unlock(&rtc_lock);
return 0;
}
static void ds1287_event_handler(struct clock_event_device *dev)
{
}
static struct clock_event_device ds1287_clockevent = {
.name = "ds1287",
.features = CLOCK_EVT_FEAT_PERIODIC,
.set_next_event = ds1287_set_next_event,
.set_state_shutdown = ds1287_shutdown,
.set_state_periodic = ds1287_set_periodic,
.tick_resume = ds1287_shutdown,
.event_handler = ds1287_event_handler,
};
static irqreturn_t ds1287_interrupt(int irq, void *dev_id)
{
struct clock_event_device *cd = &ds1287_clockevent;
/* Ack the RTC interrupt. */
CMOS_READ(RTC_REG_C);
cd->event_handler(cd);
return IRQ_HANDLED;
}
int __init ds1287_clockevent_init(int irq)
{
unsigned long flags = IRQF_PERCPU | IRQF_TIMER;
struct clock_event_device *cd;
cd = &ds1287_clockevent;
cd->rating = 100;
cd->irq = irq;
clockevent_set_clock(cd, 32768);
cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
cd->max_delta_ticks = 0x7fffffff;
cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
cd->min_delta_ticks = 0x300;
cd->cpumask = cpumask_of(0);
clockevents_register_device(&ds1287_clockevent);
return request_irq(irq, ds1287_interrupt, flags, "ds1287", NULL);
}
| linux-master | arch/mips/kernel/cevt-ds1287.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/smp.h>
#include <linux/reboot.h>
#include <linux/kexec.h>
#include <linux/memblock.h>
#include <linux/crash_dump.h>
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
/* This keeps a track of which one is crashing cpu. */
static int crashing_cpu = -1;
static cpumask_t cpus_in_crash = CPU_MASK_NONE;
#ifdef CONFIG_SMP
static void crash_shutdown_secondary(void *passed_regs)
{
struct pt_regs *regs = passed_regs;
int cpu = smp_processor_id();
/*
* If we are passed registers, use those. Otherwise get the
* regs from the last interrupt, which should be correct, as
* we are in an interrupt. But if the regs are not there,
* pull them from the top of the stack. They are probably
* wrong, but we need something to keep from crashing again.
*/
if (!regs)
regs = get_irq_regs();
if (!regs)
regs = task_pt_regs(current);
if (!cpu_online(cpu))
return;
/* We won't be sent IPIs any more. */
set_cpu_online(cpu, false);
local_irq_disable();
if (!cpumask_test_cpu(cpu, &cpus_in_crash))
crash_save_cpu(regs, cpu);
cpumask_set_cpu(cpu, &cpus_in_crash);
while (!atomic_read(&kexec_ready_to_reboot))
cpu_relax();
kexec_reboot();
/* NOTREACHED */
}
static void crash_kexec_prepare_cpus(void)
{
static int cpus_stopped;
unsigned int msecs;
unsigned int ncpus;
if (cpus_stopped)
return;
ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */
smp_call_function(crash_shutdown_secondary, NULL, 0);
smp_wmb();
/*
* The crash CPU sends an IPI and wait for other CPUs to
* respond. Delay of at least 10 seconds.
*/
pr_emerg("Sending IPI to other cpus...\n");
msecs = 10000;
while ((cpumask_weight(&cpus_in_crash) < ncpus) && (--msecs > 0)) {
cpu_relax();
mdelay(1);
}
cpus_stopped = 1;
}
/* Override the weak function in kernel/panic.c */
void crash_smp_send_stop(void)
{
if (_crash_smp_send_stop)
_crash_smp_send_stop();
crash_kexec_prepare_cpus();
}
#else /* !defined(CONFIG_SMP) */
static void crash_kexec_prepare_cpus(void) {}
#endif /* !defined(CONFIG_SMP) */
void default_machine_crash_shutdown(struct pt_regs *regs)
{
local_irq_disable();
crashing_cpu = smp_processor_id();
crash_save_cpu(regs, crashing_cpu);
crash_kexec_prepare_cpus();
cpumask_set_cpu(crashing_cpu, &cpus_in_crash);
}
| linux-master | arch/mips/kernel/crash.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992 Ross Biro
* Copyright (C) Linus Torvalds
* Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle
* Copyright (C) 1996 David S. Miller
* Kevin D. Kissell, [email protected] and Carsten Langgaard, [email protected]
* Copyright (C) 1999 MIPS Technologies, Inc.
* Copyright (C) 2000 Ulf Carlsson
*
* At this time Linux/MIPS64 only supports syscall tracing, even for 32-bit
* binaries.
*/
#include <linux/compiler.h>
#include <linux/context_tracking.h>
#include <linux/elf.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
#include <linux/mm.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/regset.h>
#include <linux/smp.h>
#include <linux/security.h>
#include <linux/stddef.h>
#include <linux/audit.h>
#include <linux/seccomp.h>
#include <linux/ftrace.h>
#include <asm/byteorder.h>
#include <asm/cpu.h>
#include <asm/cpu-info.h>
#include <asm/dsp.h>
#include <asm/fpu.h>
#include <asm/mipsregs.h>
#include <asm/mipsmtregs.h>
#include <asm/page.h>
#include <asm/processor.h>
#include <asm/syscall.h>
#include <linux/uaccess.h>
#include <asm/bootinfo.h>
#include <asm/reg.h>
#define CREATE_TRACE_POINTS
#include <trace/events/syscalls.h>
/*
* Called by kernel/ptrace.c when detaching..
*
* Make sure single step bits etc are not set.
*/
void ptrace_disable(struct task_struct *child)
{
/* Don't load the watchpoint registers for the ex-child. */
clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
}
/*
* Read a general register set. We always use the 64-bit format, even
* for 32-bit kernels and for 32-bit processes on a 64-bit kernel.
* Registers are sign extended to fill the available space.
*/
int ptrace_getregs(struct task_struct *child, struct user_pt_regs __user *data)
{
struct pt_regs *regs;
int i;
if (!access_ok(data, 38 * 8))
return -EIO;
regs = task_pt_regs(child);
for (i = 0; i < 32; i++)
__put_user((long)regs->regs[i], (__s64 __user *)&data->regs[i]);
__put_user((long)regs->lo, (__s64 __user *)&data->lo);
__put_user((long)regs->hi, (__s64 __user *)&data->hi);
__put_user((long)regs->cp0_epc, (__s64 __user *)&data->cp0_epc);
__put_user((long)regs->cp0_badvaddr, (__s64 __user *)&data->cp0_badvaddr);
__put_user((long)regs->cp0_status, (__s64 __user *)&data->cp0_status);
__put_user((long)regs->cp0_cause, (__s64 __user *)&data->cp0_cause);
return 0;
}
/*
* Write a general register set. As for PTRACE_GETREGS, we always use
* the 64-bit format. On a 32-bit kernel only the lower order half
* (according to endianness) will be used.
*/
int ptrace_setregs(struct task_struct *child, struct user_pt_regs __user *data)
{
struct pt_regs *regs;
int i;
if (!access_ok(data, 38 * 8))
return -EIO;
regs = task_pt_regs(child);
for (i = 0; i < 32; i++)
__get_user(regs->regs[i], (__s64 __user *)&data->regs[i]);
__get_user(regs->lo, (__s64 __user *)&data->lo);
__get_user(regs->hi, (__s64 __user *)&data->hi);
__get_user(regs->cp0_epc, (__s64 __user *)&data->cp0_epc);
/* badvaddr, status, and cause may not be written. */
/* System call number may have been changed */
mips_syscall_update_nr(child, regs);
return 0;
}
int ptrace_get_watch_regs(struct task_struct *child,
struct pt_watch_regs __user *addr)
{
enum pt_watch_style style;
int i;
if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0)
return -EIO;
if (!access_ok(addr, sizeof(struct pt_watch_regs)))
return -EIO;
#ifdef CONFIG_32BIT
style = pt_watch_style_mips32;
#define WATCH_STYLE mips32
#else
style = pt_watch_style_mips64;
#define WATCH_STYLE mips64
#endif
__put_user(style, &addr->style);
__put_user(boot_cpu_data.watch_reg_use_cnt,
&addr->WATCH_STYLE.num_valid);
for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
__put_user(child->thread.watch.mips3264.watchlo[i],
&addr->WATCH_STYLE.watchlo[i]);
__put_user(child->thread.watch.mips3264.watchhi[i] &
(MIPS_WATCHHI_MASK | MIPS_WATCHHI_IRW),
&addr->WATCH_STYLE.watchhi[i]);
__put_user(boot_cpu_data.watch_reg_masks[i],
&addr->WATCH_STYLE.watch_masks[i]);
}
for (; i < 8; i++) {
__put_user(0, &addr->WATCH_STYLE.watchlo[i]);
__put_user(0, &addr->WATCH_STYLE.watchhi[i]);
__put_user(0, &addr->WATCH_STYLE.watch_masks[i]);
}
return 0;
}
int ptrace_set_watch_regs(struct task_struct *child,
struct pt_watch_regs __user *addr)
{
int i;
int watch_active = 0;
unsigned long lt[NUM_WATCH_REGS];
u16 ht[NUM_WATCH_REGS];
if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0)
return -EIO;
if (!access_ok(addr, sizeof(struct pt_watch_regs)))
return -EIO;
/* Check the values. */
for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
__get_user(lt[i], &addr->WATCH_STYLE.watchlo[i]);
#ifdef CONFIG_32BIT
if (lt[i] & __UA_LIMIT)
return -EINVAL;
#else
if (test_tsk_thread_flag(child, TIF_32BIT_ADDR)) {
if (lt[i] & 0xffffffff80000000UL)
return -EINVAL;
} else {
if (lt[i] & __UA_LIMIT)
return -EINVAL;
}
#endif
__get_user(ht[i], &addr->WATCH_STYLE.watchhi[i]);
if (ht[i] & ~MIPS_WATCHHI_MASK)
return -EINVAL;
}
/* Install them. */
for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
if (lt[i] & MIPS_WATCHLO_IRW)
watch_active = 1;
child->thread.watch.mips3264.watchlo[i] = lt[i];
/* Set the G bit. */
child->thread.watch.mips3264.watchhi[i] = ht[i];
}
if (watch_active)
set_tsk_thread_flag(child, TIF_LOAD_WATCH);
else
clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
return 0;
}
/* regset get/set implementations */
#if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
static int gpr32_get(struct task_struct *target,
const struct user_regset *regset,
struct membuf to)
{
struct pt_regs *regs = task_pt_regs(target);
u32 uregs[ELF_NGREG] = {};
mips_dump_regs32(uregs, regs);
return membuf_write(&to, uregs, sizeof(uregs));
}
static int gpr32_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
struct pt_regs *regs = task_pt_regs(target);
u32 uregs[ELF_NGREG];
unsigned start, num_regs, i;
int err;
start = pos / sizeof(u32);
num_regs = count / sizeof(u32);
if (start + num_regs > ELF_NGREG)
return -EIO;
err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
sizeof(uregs));
if (err)
return err;
for (i = start; i < num_regs; i++) {
/*
* Cast all values to signed here so that if this is a 64-bit
* kernel, the supplied 32-bit values will be sign extended.
*/
switch (i) {
case MIPS32_EF_R1 ... MIPS32_EF_R25:
/* k0/k1 are ignored. */
case MIPS32_EF_R28 ... MIPS32_EF_R31:
regs->regs[i - MIPS32_EF_R0] = (s32)uregs[i];
break;
case MIPS32_EF_LO:
regs->lo = (s32)uregs[i];
break;
case MIPS32_EF_HI:
regs->hi = (s32)uregs[i];
break;
case MIPS32_EF_CP0_EPC:
regs->cp0_epc = (s32)uregs[i];
break;
}
}
/* System call number may have been changed */
mips_syscall_update_nr(target, regs);
return 0;
}
#endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
#ifdef CONFIG_64BIT
static int gpr64_get(struct task_struct *target,
const struct user_regset *regset,
struct membuf to)
{
struct pt_regs *regs = task_pt_regs(target);
u64 uregs[ELF_NGREG] = {};
mips_dump_regs64(uregs, regs);
return membuf_write(&to, uregs, sizeof(uregs));
}
static int gpr64_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
struct pt_regs *regs = task_pt_regs(target);
u64 uregs[ELF_NGREG];
unsigned start, num_regs, i;
int err;
start = pos / sizeof(u64);
num_regs = count / sizeof(u64);
if (start + num_regs > ELF_NGREG)
return -EIO;
err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
sizeof(uregs));
if (err)
return err;
for (i = start; i < num_regs; i++) {
switch (i) {
case MIPS64_EF_R1 ... MIPS64_EF_R25:
/* k0/k1 are ignored. */
case MIPS64_EF_R28 ... MIPS64_EF_R31:
regs->regs[i - MIPS64_EF_R0] = uregs[i];
break;
case MIPS64_EF_LO:
regs->lo = uregs[i];
break;
case MIPS64_EF_HI:
regs->hi = uregs[i];
break;
case MIPS64_EF_CP0_EPC:
regs->cp0_epc = uregs[i];
break;
}
}
/* System call number may have been changed */
mips_syscall_update_nr(target, regs);
return 0;
}
#endif /* CONFIG_64BIT */
#ifdef CONFIG_MIPS_FP_SUPPORT
/*
* Poke at FCSR according to its mask. Set the Cause bits even
* if a corresponding Enable bit is set. This will be noticed at
* the time the thread is switched to and SIGFPE thrown accordingly.
*/
static void ptrace_setfcr31(struct task_struct *child, u32 value)
{
u32 fcr31;
u32 mask;
fcr31 = child->thread.fpu.fcr31;
mask = boot_cpu_data.fpu_msk31;
child->thread.fpu.fcr31 = (value & ~mask) | (fcr31 & mask);
}
int ptrace_getfpregs(struct task_struct *child, __u32 __user *data)
{
int i;
if (!access_ok(data, 33 * 8))
return -EIO;
if (tsk_used_math(child)) {
union fpureg *fregs = get_fpu_regs(child);
for (i = 0; i < 32; i++)
__put_user(get_fpr64(&fregs[i], 0),
i + (__u64 __user *)data);
} else {
for (i = 0; i < 32; i++)
__put_user((__u64) -1, i + (__u64 __user *) data);
}
__put_user(child->thread.fpu.fcr31, data + 64);
__put_user(boot_cpu_data.fpu_id, data + 65);
return 0;
}
int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
{
union fpureg *fregs;
u64 fpr_val;
u32 value;
int i;
if (!access_ok(data, 33 * 8))
return -EIO;
init_fp_ctx(child);
fregs = get_fpu_regs(child);
for (i = 0; i < 32; i++) {
__get_user(fpr_val, i + (__u64 __user *)data);
set_fpr64(&fregs[i], 0, fpr_val);
}
__get_user(value, data + 64);
ptrace_setfcr31(child, value);
/* FIR may not be written. */
return 0;
}
/*
* Copy the floating-point context to the supplied NT_PRFPREG buffer,
* !CONFIG_CPU_HAS_MSA variant. FP context's general register slots
* correspond 1:1 to buffer slots. Only general registers are copied.
*/
static void fpr_get_fpa(struct task_struct *target,
struct membuf *to)
{
membuf_write(to, &target->thread.fpu,
NUM_FPU_REGS * sizeof(elf_fpreg_t));
}
/*
* Copy the floating-point context to the supplied NT_PRFPREG buffer,
* CONFIG_CPU_HAS_MSA variant. Only lower 64 bits of FP context's
* general register slots are copied to buffer slots. Only general
* registers are copied.
*/
static void fpr_get_msa(struct task_struct *target, struct membuf *to)
{
unsigned int i;
BUILD_BUG_ON(sizeof(u64) != sizeof(elf_fpreg_t));
for (i = 0; i < NUM_FPU_REGS; i++)
membuf_store(to, get_fpr64(&target->thread.fpu.fpr[i], 0));
}
/*
* Copy the floating-point context to the supplied NT_PRFPREG buffer.
* Choose the appropriate helper for general registers, and then copy
* the FCSR and FIR registers separately.
*/
static int fpr_get(struct task_struct *target,
const struct user_regset *regset,
struct membuf to)
{
if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
fpr_get_fpa(target, &to);
else
fpr_get_msa(target, &to);
membuf_write(&to, &target->thread.fpu.fcr31, sizeof(u32));
membuf_write(&to, &boot_cpu_data.fpu_id, sizeof(u32));
return 0;
}
/*
* Copy the supplied NT_PRFPREG buffer to the floating-point context,
* !CONFIG_CPU_HAS_MSA variant. Buffer slots correspond 1:1 to FP
* context's general register slots. Only general registers are copied.
*/
static int fpr_set_fpa(struct task_struct *target,
unsigned int *pos, unsigned int *count,
const void **kbuf, const void __user **ubuf)
{
return user_regset_copyin(pos, count, kbuf, ubuf,
&target->thread.fpu,
0, NUM_FPU_REGS * sizeof(elf_fpreg_t));
}
/*
* Copy the supplied NT_PRFPREG buffer to the floating-point context,
* CONFIG_CPU_HAS_MSA variant. Buffer slots are copied to lower 64
* bits only of FP context's general register slots. Only general
* registers are copied.
*/
static int fpr_set_msa(struct task_struct *target,
unsigned int *pos, unsigned int *count,
const void **kbuf, const void __user **ubuf)
{
unsigned int i;
u64 fpr_val;
int err;
BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
for (i = 0; i < NUM_FPU_REGS && *count > 0; i++) {
err = user_regset_copyin(pos, count, kbuf, ubuf,
&fpr_val, i * sizeof(elf_fpreg_t),
(i + 1) * sizeof(elf_fpreg_t));
if (err)
return err;
set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val);
}
return 0;
}
/*
* Copy the supplied NT_PRFPREG buffer to the floating-point context.
* Choose the appropriate helper for general registers, and then copy
* the FCSR register separately. Ignore the incoming FIR register
* contents though, as the register is read-only.
*
* We optimize for the case where `count % sizeof(elf_fpreg_t) == 0',
* which is supposed to have been guaranteed by the kernel before
* calling us, e.g. in `ptrace_regset'. We enforce that requirement,
* so that we can safely avoid preinitializing temporaries for
* partial register writes.
*/
static int fpr_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t);
const int fir_pos = fcr31_pos + sizeof(u32);
u32 fcr31;
int err;
BUG_ON(count % sizeof(elf_fpreg_t));
if (pos + count > sizeof(elf_fpregset_t))
return -EIO;
init_fp_ctx(target);
if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
err = fpr_set_fpa(target, &pos, &count, &kbuf, &ubuf);
else
err = fpr_set_msa(target, &pos, &count, &kbuf, &ubuf);
if (err)
return err;
if (count > 0) {
err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&fcr31,
fcr31_pos, fcr31_pos + sizeof(u32));
if (err)
return err;
ptrace_setfcr31(target, fcr31);
}
if (count > 0) {
user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
fir_pos, fir_pos + sizeof(u32));
return 0;
}
return err;
}
/* Copy the FP mode setting to the supplied NT_MIPS_FP_MODE buffer. */
static int fp_mode_get(struct task_struct *target,
const struct user_regset *regset,
struct membuf to)
{
return membuf_store(&to, (int)mips_get_process_fp_mode(target));
}
/*
* Copy the supplied NT_MIPS_FP_MODE buffer to the FP mode setting.
*
* We optimize for the case where `count % sizeof(int) == 0', which
* is supposed to have been guaranteed by the kernel before calling
* us, e.g. in `ptrace_regset'. We enforce that requirement, so
* that we can safely avoid preinitializing temporaries for partial
* mode writes.
*/
static int fp_mode_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
int fp_mode;
int err;
BUG_ON(count % sizeof(int));
if (pos + count > sizeof(fp_mode))
return -EIO;
err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fp_mode, 0,
sizeof(fp_mode));
if (err)
return err;
if (count > 0)
err = mips_set_process_fp_mode(target, fp_mode);
return err;
}
#endif /* CONFIG_MIPS_FP_SUPPORT */
#ifdef CONFIG_CPU_HAS_MSA
struct msa_control_regs {
unsigned int fir;
unsigned int fcsr;
unsigned int msair;
unsigned int msacsr;
};
static void copy_pad_fprs(struct task_struct *target,
const struct user_regset *regset,
struct membuf *to,
unsigned int live_sz)
{
int i, j;
unsigned long long fill = ~0ull;
unsigned int cp_sz, pad_sz;
cp_sz = min(regset->size, live_sz);
pad_sz = regset->size - cp_sz;
WARN_ON(pad_sz % sizeof(fill));
for (i = 0; i < NUM_FPU_REGS; i++) {
membuf_write(to, &target->thread.fpu.fpr[i], cp_sz);
for (j = 0; j < (pad_sz / sizeof(fill)); j++)
membuf_store(to, fill);
}
}
static int msa_get(struct task_struct *target,
const struct user_regset *regset,
struct membuf to)
{
const unsigned int wr_size = NUM_FPU_REGS * regset->size;
const struct msa_control_regs ctrl_regs = {
.fir = boot_cpu_data.fpu_id,
.fcsr = target->thread.fpu.fcr31,
.msair = boot_cpu_data.msa_id,
.msacsr = target->thread.fpu.msacsr,
};
if (!tsk_used_math(target)) {
/* The task hasn't used FP or MSA, fill with 0xff */
copy_pad_fprs(target, regset, &to, 0);
} else if (!test_tsk_thread_flag(target, TIF_MSA_CTX_LIVE)) {
/* Copy scalar FP context, fill the rest with 0xff */
copy_pad_fprs(target, regset, &to, 8);
} else if (sizeof(target->thread.fpu.fpr[0]) == regset->size) {
/* Trivially copy the vector registers */
membuf_write(&to, &target->thread.fpu.fpr, wr_size);
} else {
/* Copy as much context as possible, fill the rest with 0xff */
copy_pad_fprs(target, regset, &to,
sizeof(target->thread.fpu.fpr[0]));
}
return membuf_write(&to, &ctrl_regs, sizeof(ctrl_regs));
}
static int msa_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
const unsigned int wr_size = NUM_FPU_REGS * regset->size;
struct msa_control_regs ctrl_regs;
unsigned int cp_sz;
int i, err, start;
init_fp_ctx(target);
if (sizeof(target->thread.fpu.fpr[0]) == regset->size) {
/* Trivially copy the vector registers */
err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.fpu.fpr,
0, wr_size);
} else {
/* Copy as much context as possible */
cp_sz = min_t(unsigned int, regset->size,
sizeof(target->thread.fpu.fpr[0]));
i = start = err = 0;
for (; i < NUM_FPU_REGS; i++, start += regset->size) {
err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.fpu.fpr[i],
start, start + cp_sz);
}
}
if (!err)
err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl_regs,
wr_size, wr_size + sizeof(ctrl_regs));
if (!err) {
target->thread.fpu.fcr31 = ctrl_regs.fcsr & ~FPU_CSR_ALL_X;
target->thread.fpu.msacsr = ctrl_regs.msacsr & ~MSA_CSR_CAUSEF;
}
return err;
}
#endif /* CONFIG_CPU_HAS_MSA */
#if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
/*
* Copy the DSP context to the supplied 32-bit NT_MIPS_DSP buffer.
*/
static int dsp32_get(struct task_struct *target,
const struct user_regset *regset,
struct membuf to)
{
u32 dspregs[NUM_DSP_REGS + 1];
unsigned int i;
BUG_ON(to.left % sizeof(u32));
if (!cpu_has_dsp)
return -EIO;
for (i = 0; i < NUM_DSP_REGS; i++)
dspregs[i] = target->thread.dsp.dspr[i];
dspregs[NUM_DSP_REGS] = target->thread.dsp.dspcontrol;
return membuf_write(&to, dspregs, sizeof(dspregs));
}
/*
* Copy the supplied 32-bit NT_MIPS_DSP buffer to the DSP context.
*/
static int dsp32_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
unsigned int start, num_regs, i;
u32 dspregs[NUM_DSP_REGS + 1];
int err;
BUG_ON(count % sizeof(u32));
if (!cpu_has_dsp)
return -EIO;
start = pos / sizeof(u32);
num_regs = count / sizeof(u32);
if (start + num_regs > NUM_DSP_REGS + 1)
return -EIO;
err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, dspregs, 0,
sizeof(dspregs));
if (err)
return err;
for (i = start; i < num_regs; i++)
switch (i) {
case 0 ... NUM_DSP_REGS - 1:
target->thread.dsp.dspr[i] = (s32)dspregs[i];
break;
case NUM_DSP_REGS:
target->thread.dsp.dspcontrol = (s32)dspregs[i];
break;
}
return 0;
}
#endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
#ifdef CONFIG_64BIT
/*
* Copy the DSP context to the supplied 64-bit NT_MIPS_DSP buffer.
*/
static int dsp64_get(struct task_struct *target,
const struct user_regset *regset,
struct membuf to)
{
u64 dspregs[NUM_DSP_REGS + 1];
unsigned int i;
BUG_ON(to.left % sizeof(u64));
if (!cpu_has_dsp)
return -EIO;
for (i = 0; i < NUM_DSP_REGS; i++)
dspregs[i] = target->thread.dsp.dspr[i];
dspregs[NUM_DSP_REGS] = target->thread.dsp.dspcontrol;
return membuf_write(&to, dspregs, sizeof(dspregs));
}
/*
* Copy the supplied 64-bit NT_MIPS_DSP buffer to the DSP context.
*/
static int dsp64_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
unsigned int start, num_regs, i;
u64 dspregs[NUM_DSP_REGS + 1];
int err;
BUG_ON(count % sizeof(u64));
if (!cpu_has_dsp)
return -EIO;
start = pos / sizeof(u64);
num_regs = count / sizeof(u64);
if (start + num_regs > NUM_DSP_REGS + 1)
return -EIO;
err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, dspregs, 0,
sizeof(dspregs));
if (err)
return err;
for (i = start; i < num_regs; i++)
switch (i) {
case 0 ... NUM_DSP_REGS - 1:
target->thread.dsp.dspr[i] = dspregs[i];
break;
case NUM_DSP_REGS:
target->thread.dsp.dspcontrol = dspregs[i];
break;
}
return 0;
}
#endif /* CONFIG_64BIT */
/*
* Determine whether the DSP context is present.
*/
static int dsp_active(struct task_struct *target,
const struct user_regset *regset)
{
return cpu_has_dsp ? NUM_DSP_REGS + 1 : -ENODEV;
}
enum mips_regset {
REGSET_GPR,
REGSET_DSP,
#ifdef CONFIG_MIPS_FP_SUPPORT
REGSET_FPR,
REGSET_FP_MODE,
#endif
#ifdef CONFIG_CPU_HAS_MSA
REGSET_MSA,
#endif
};
struct pt_regs_offset {
const char *name;
int offset;
};
#define REG_OFFSET_NAME(reg, r) { \
.name = #reg, \
.offset = offsetof(struct pt_regs, r) \
}
#define REG_OFFSET_END { \
.name = NULL, \
.offset = 0 \
}
static const struct pt_regs_offset regoffset_table[] = {
REG_OFFSET_NAME(r0, regs[0]),
REG_OFFSET_NAME(r1, regs[1]),
REG_OFFSET_NAME(r2, regs[2]),
REG_OFFSET_NAME(r3, regs[3]),
REG_OFFSET_NAME(r4, regs[4]),
REG_OFFSET_NAME(r5, regs[5]),
REG_OFFSET_NAME(r6, regs[6]),
REG_OFFSET_NAME(r7, regs[7]),
REG_OFFSET_NAME(r8, regs[8]),
REG_OFFSET_NAME(r9, regs[9]),
REG_OFFSET_NAME(r10, regs[10]),
REG_OFFSET_NAME(r11, regs[11]),
REG_OFFSET_NAME(r12, regs[12]),
REG_OFFSET_NAME(r13, regs[13]),
REG_OFFSET_NAME(r14, regs[14]),
REG_OFFSET_NAME(r15, regs[15]),
REG_OFFSET_NAME(r16, regs[16]),
REG_OFFSET_NAME(r17, regs[17]),
REG_OFFSET_NAME(r18, regs[18]),
REG_OFFSET_NAME(r19, regs[19]),
REG_OFFSET_NAME(r20, regs[20]),
REG_OFFSET_NAME(r21, regs[21]),
REG_OFFSET_NAME(r22, regs[22]),
REG_OFFSET_NAME(r23, regs[23]),
REG_OFFSET_NAME(r24, regs[24]),
REG_OFFSET_NAME(r25, regs[25]),
REG_OFFSET_NAME(r26, regs[26]),
REG_OFFSET_NAME(r27, regs[27]),
REG_OFFSET_NAME(r28, regs[28]),
REG_OFFSET_NAME(r29, regs[29]),
REG_OFFSET_NAME(r30, regs[30]),
REG_OFFSET_NAME(r31, regs[31]),
REG_OFFSET_NAME(c0_status, cp0_status),
REG_OFFSET_NAME(hi, hi),
REG_OFFSET_NAME(lo, lo),
#ifdef CONFIG_CPU_HAS_SMARTMIPS
REG_OFFSET_NAME(acx, acx),
#endif
REG_OFFSET_NAME(c0_badvaddr, cp0_badvaddr),
REG_OFFSET_NAME(c0_cause, cp0_cause),
REG_OFFSET_NAME(c0_epc, cp0_epc),
#ifdef CONFIG_CPU_CAVIUM_OCTEON
REG_OFFSET_NAME(mpl0, mpl[0]),
REG_OFFSET_NAME(mpl1, mpl[1]),
REG_OFFSET_NAME(mpl2, mpl[2]),
REG_OFFSET_NAME(mtp0, mtp[0]),
REG_OFFSET_NAME(mtp1, mtp[1]),
REG_OFFSET_NAME(mtp2, mtp[2]),
#endif
REG_OFFSET_END,
};
/**
* regs_query_register_offset() - query register offset from its name
* @name: the name of a register
*
* regs_query_register_offset() returns the offset of a register in struct
* pt_regs from its name. If the name is invalid, this returns -EINVAL;
*/
int regs_query_register_offset(const char *name)
{
const struct pt_regs_offset *roff;
for (roff = regoffset_table; roff->name != NULL; roff++)
if (!strcmp(roff->name, name))
return roff->offset;
return -EINVAL;
}
#if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
static const struct user_regset mips_regsets[] = {
[REGSET_GPR] = {
.core_note_type = NT_PRSTATUS,
.n = ELF_NGREG,
.size = sizeof(unsigned int),
.align = sizeof(unsigned int),
.regset_get = gpr32_get,
.set = gpr32_set,
},
[REGSET_DSP] = {
.core_note_type = NT_MIPS_DSP,
.n = NUM_DSP_REGS + 1,
.size = sizeof(u32),
.align = sizeof(u32),
.regset_get = dsp32_get,
.set = dsp32_set,
.active = dsp_active,
},
#ifdef CONFIG_MIPS_FP_SUPPORT
[REGSET_FPR] = {
.core_note_type = NT_PRFPREG,
.n = ELF_NFPREG,
.size = sizeof(elf_fpreg_t),
.align = sizeof(elf_fpreg_t),
.regset_get = fpr_get,
.set = fpr_set,
},
[REGSET_FP_MODE] = {
.core_note_type = NT_MIPS_FP_MODE,
.n = 1,
.size = sizeof(int),
.align = sizeof(int),
.regset_get = fp_mode_get,
.set = fp_mode_set,
},
#endif
#ifdef CONFIG_CPU_HAS_MSA
[REGSET_MSA] = {
.core_note_type = NT_MIPS_MSA,
.n = NUM_FPU_REGS + 1,
.size = 16,
.align = 16,
.regset_get = msa_get,
.set = msa_set,
},
#endif
};
static const struct user_regset_view user_mips_view = {
.name = "mips",
.e_machine = ELF_ARCH,
.ei_osabi = ELF_OSABI,
.regsets = mips_regsets,
.n = ARRAY_SIZE(mips_regsets),
};
#endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
#ifdef CONFIG_64BIT
static const struct user_regset mips64_regsets[] = {
[REGSET_GPR] = {
.core_note_type = NT_PRSTATUS,
.n = ELF_NGREG,
.size = sizeof(unsigned long),
.align = sizeof(unsigned long),
.regset_get = gpr64_get,
.set = gpr64_set,
},
[REGSET_DSP] = {
.core_note_type = NT_MIPS_DSP,
.n = NUM_DSP_REGS + 1,
.size = sizeof(u64),
.align = sizeof(u64),
.regset_get = dsp64_get,
.set = dsp64_set,
.active = dsp_active,
},
#ifdef CONFIG_MIPS_FP_SUPPORT
[REGSET_FP_MODE] = {
.core_note_type = NT_MIPS_FP_MODE,
.n = 1,
.size = sizeof(int),
.align = sizeof(int),
.regset_get = fp_mode_get,
.set = fp_mode_set,
},
[REGSET_FPR] = {
.core_note_type = NT_PRFPREG,
.n = ELF_NFPREG,
.size = sizeof(elf_fpreg_t),
.align = sizeof(elf_fpreg_t),
.regset_get = fpr_get,
.set = fpr_set,
},
#endif
#ifdef CONFIG_CPU_HAS_MSA
[REGSET_MSA] = {
.core_note_type = NT_MIPS_MSA,
.n = NUM_FPU_REGS + 1,
.size = 16,
.align = 16,
.regset_get = msa_get,
.set = msa_set,
},
#endif
};
static const struct user_regset_view user_mips64_view = {
.name = "mips64",
.e_machine = ELF_ARCH,
.ei_osabi = ELF_OSABI,
.regsets = mips64_regsets,
.n = ARRAY_SIZE(mips64_regsets),
};
#ifdef CONFIG_MIPS32_N32
static const struct user_regset_view user_mipsn32_view = {
.name = "mipsn32",
.e_flags = EF_MIPS_ABI2,
.e_machine = ELF_ARCH,
.ei_osabi = ELF_OSABI,
.regsets = mips64_regsets,
.n = ARRAY_SIZE(mips64_regsets),
};
#endif /* CONFIG_MIPS32_N32 */
#endif /* CONFIG_64BIT */
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
{
#ifdef CONFIG_32BIT
return &user_mips_view;
#else
#ifdef CONFIG_MIPS32_O32
if (test_tsk_thread_flag(task, TIF_32BIT_REGS))
return &user_mips_view;
#endif
#ifdef CONFIG_MIPS32_N32
if (test_tsk_thread_flag(task, TIF_32BIT_ADDR))
return &user_mipsn32_view;
#endif
return &user_mips64_view;
#endif
}
long arch_ptrace(struct task_struct *child, long request,
unsigned long addr, unsigned long data)
{
int ret;
void __user *addrp = (void __user *) addr;
void __user *datavp = (void __user *) data;
unsigned long __user *datalp = (void __user *) data;
switch (request) {
/* when I and D space are separate, these will need to be fixed. */
case PTRACE_PEEKTEXT: /* read word at location addr. */
case PTRACE_PEEKDATA:
ret = generic_ptrace_peekdata(child, addr, data);
break;
/* Read the word at location addr in the USER area. */
case PTRACE_PEEKUSR: {
struct pt_regs *regs;
unsigned long tmp = 0;
regs = task_pt_regs(child);
ret = 0; /* Default return value. */
switch (addr) {
case 0 ... 31:
tmp = regs->regs[addr];
break;
#ifdef CONFIG_MIPS_FP_SUPPORT
case FPR_BASE ... FPR_BASE + 31: {
union fpureg *fregs;
if (!tsk_used_math(child)) {
/* FP not yet used */
tmp = -1;
break;
}
fregs = get_fpu_regs(child);
#ifdef CONFIG_32BIT
if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
/*
* The odd registers are actually the high
* order bits of the values stored in the even
* registers.
*/
tmp = get_fpr32(&fregs[(addr & ~1) - FPR_BASE],
addr & 1);
break;
}
#endif
tmp = get_fpr64(&fregs[addr - FPR_BASE], 0);
break;
}
case FPC_CSR:
tmp = child->thread.fpu.fcr31;
break;
case FPC_EIR:
/* implementation / version register */
tmp = boot_cpu_data.fpu_id;
break;
#endif
case PC:
tmp = regs->cp0_epc;
break;
case CAUSE:
tmp = regs->cp0_cause;
break;
case BADVADDR:
tmp = regs->cp0_badvaddr;
break;
case MMHI:
tmp = regs->hi;
break;
case MMLO:
tmp = regs->lo;
break;
#ifdef CONFIG_CPU_HAS_SMARTMIPS
case ACX:
tmp = regs->acx;
break;
#endif
case DSP_BASE ... DSP_BASE + 5: {
dspreg_t *dregs;
if (!cpu_has_dsp) {
tmp = 0;
ret = -EIO;
goto out;
}
dregs = __get_dsp_regs(child);
tmp = dregs[addr - DSP_BASE];
break;
}
case DSP_CONTROL:
if (!cpu_has_dsp) {
tmp = 0;
ret = -EIO;
goto out;
}
tmp = child->thread.dsp.dspcontrol;
break;
default:
tmp = 0;
ret = -EIO;
goto out;
}
ret = put_user(tmp, datalp);
break;
}
/* when I and D space are separate, this will have to be fixed. */
case PTRACE_POKETEXT: /* write the word at location addr. */
case PTRACE_POKEDATA:
ret = generic_ptrace_pokedata(child, addr, data);
break;
case PTRACE_POKEUSR: {
struct pt_regs *regs;
ret = 0;
regs = task_pt_regs(child);
switch (addr) {
case 0 ... 31:
regs->regs[addr] = data;
/* System call number may have been changed */
if (addr == 2)
mips_syscall_update_nr(child, regs);
else if (addr == 4 &&
mips_syscall_is_indirect(child, regs))
mips_syscall_update_nr(child, regs);
break;
#ifdef CONFIG_MIPS_FP_SUPPORT
case FPR_BASE ... FPR_BASE + 31: {
union fpureg *fregs = get_fpu_regs(child);
init_fp_ctx(child);
#ifdef CONFIG_32BIT
if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
/*
* The odd registers are actually the high
* order bits of the values stored in the even
* registers.
*/
set_fpr32(&fregs[(addr & ~1) - FPR_BASE],
addr & 1, data);
break;
}
#endif
set_fpr64(&fregs[addr - FPR_BASE], 0, data);
break;
}
case FPC_CSR:
init_fp_ctx(child);
ptrace_setfcr31(child, data);
break;
#endif
case PC:
regs->cp0_epc = data;
break;
case MMHI:
regs->hi = data;
break;
case MMLO:
regs->lo = data;
break;
#ifdef CONFIG_CPU_HAS_SMARTMIPS
case ACX:
regs->acx = data;
break;
#endif
case DSP_BASE ... DSP_BASE + 5: {
dspreg_t *dregs;
if (!cpu_has_dsp) {
ret = -EIO;
break;
}
dregs = __get_dsp_regs(child);
dregs[addr - DSP_BASE] = data;
break;
}
case DSP_CONTROL:
if (!cpu_has_dsp) {
ret = -EIO;
break;
}
child->thread.dsp.dspcontrol = data;
break;
default:
/* The rest are not allowed. */
ret = -EIO;
break;
}
break;
}
case PTRACE_GETREGS:
ret = ptrace_getregs(child, datavp);
break;
case PTRACE_SETREGS:
ret = ptrace_setregs(child, datavp);
break;
#ifdef CONFIG_MIPS_FP_SUPPORT
case PTRACE_GETFPREGS:
ret = ptrace_getfpregs(child, datavp);
break;
case PTRACE_SETFPREGS:
ret = ptrace_setfpregs(child, datavp);
break;
#endif
case PTRACE_GET_THREAD_AREA:
ret = put_user(task_thread_info(child)->tp_value, datalp);
break;
case PTRACE_GET_WATCH_REGS:
ret = ptrace_get_watch_regs(child, addrp);
break;
case PTRACE_SET_WATCH_REGS:
ret = ptrace_set_watch_regs(child, addrp);
break;
default:
ret = ptrace_request(child, request, addr, data);
break;
}
out:
return ret;
}
/*
* Notification of system call entry/exit
* - triggered by current->work.syscall_trace
*/
asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
{
user_exit();
current_thread_info()->syscall = syscall;
if (test_thread_flag(TIF_SYSCALL_TRACE)) {
if (ptrace_report_syscall_entry(regs))
return -1;
syscall = current_thread_info()->syscall;
}
#ifdef CONFIG_SECCOMP
if (unlikely(test_thread_flag(TIF_SECCOMP))) {
int ret, i;
struct seccomp_data sd;
unsigned long args[6];
sd.nr = syscall;
sd.arch = syscall_get_arch(current);
syscall_get_arguments(current, regs, args);
for (i = 0; i < 6; i++)
sd.args[i] = args[i];
sd.instruction_pointer = KSTK_EIP(current);
ret = __secure_computing(&sd);
if (ret == -1)
return ret;
syscall = current_thread_info()->syscall;
}
#endif
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_enter(regs, regs->regs[2]);
audit_syscall_entry(syscall, regs->regs[4], regs->regs[5],
regs->regs[6], regs->regs[7]);
/*
* Negative syscall numbers are mistaken for rejected syscalls, but
* won't have had the return value set appropriately, so we do so now.
*/
if (syscall < 0)
syscall_set_return_value(current, regs, -ENOSYS, 0);
return syscall;
}
/*
* Notification of system call entry/exit
* - triggered by current->work.syscall_trace
*/
asmlinkage void syscall_trace_leave(struct pt_regs *regs)
{
/*
* We may come here right after calling schedule_user()
* or do_notify_resume(), in which case we can be in RCU
* user mode.
*/
user_exit();
audit_syscall_exit(regs);
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_exit(regs, regs_return_value(regs));
if (test_thread_flag(TIF_SYSCALL_TRACE))
ptrace_report_syscall_exit(regs, 0);
user_enter();
}
| linux-master | arch/mips/kernel/ptrace.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1994 - 1999, 2000, 01, 06 Ralf Baechle
* Copyright (C) 1995, 1996 Paul M. Antoine
* Copyright (C) 1998 Ulf Carlsson
* Copyright (C) 1999 Silicon Graphics, Inc.
* Kevin D. Kissell, [email protected] and Carsten Langgaard, [email protected]
* Copyright (C) 2002, 2003, 2004, 2005, 2007 Maciej W. Rozycki
* Copyright (C) 2000, 2001, 2012 MIPS Technologies, Inc. All rights reserved.
* Copyright (C) 2014, Imagination Technologies Ltd.
*/
#include <linux/bitops.h>
#include <linux/bug.h>
#include <linux/compiler.h>
#include <linux/context_tracking.h>
#include <linux/cpu_pm.h>
#include <linux/kexec.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/extable.h>
#include <linux/mm.h>
#include <linux/sched/mm.h>
#include <linux/sched/debug.h>
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/kallsyms.h>
#include <linux/memblock.h>
#include <linux/interrupt.h>
#include <linux/ptrace.h>
#include <linux/kgdb.h>
#include <linux/kdebug.h>
#include <linux/kprobes.h>
#include <linux/notifier.h>
#include <linux/kdb.h>
#include <linux/irq.h>
#include <linux/perf_event.h>
#include <asm/addrspace.h>
#include <asm/bootinfo.h>
#include <asm/branch.h>
#include <asm/break.h>
#include <asm/cop2.h>
#include <asm/cpu.h>
#include <asm/cpu-type.h>
#include <asm/dsp.h>
#include <asm/fpu.h>
#include <asm/fpu_emulator.h>
#include <asm/idle.h>
#include <asm/isa-rev.h>
#include <asm/mips-cps.h>
#include <asm/mips-r2-to-r6-emul.h>
#include <asm/mipsregs.h>
#include <asm/mipsmtregs.h>
#include <asm/module.h>
#include <asm/msa.h>
#include <asm/ptrace.h>
#include <asm/sections.h>
#include <asm/siginfo.h>
#include <asm/tlbdebug.h>
#include <asm/traps.h>
#include <linux/uaccess.h>
#include <asm/watch.h>
#include <asm/mmu_context.h>
#include <asm/types.h>
#include <asm/stacktrace.h>
#include <asm/tlbex.h>
#include <asm/uasm.h>
#include <asm/mach-loongson64/cpucfg-emul.h>
#include "access-helper.h"
extern void check_wait(void);
extern asmlinkage void rollback_handle_int(void);
extern asmlinkage void handle_int(void);
extern asmlinkage void handle_adel(void);
extern asmlinkage void handle_ades(void);
extern asmlinkage void handle_ibe(void);
extern asmlinkage void handle_dbe(void);
extern asmlinkage void handle_sys(void);
extern asmlinkage void handle_bp(void);
extern asmlinkage void handle_ri(void);
extern asmlinkage void handle_ri_rdhwr_tlbp(void);
extern asmlinkage void handle_ri_rdhwr(void);
extern asmlinkage void handle_cpu(void);
extern asmlinkage void handle_ov(void);
extern asmlinkage void handle_tr(void);
extern asmlinkage void handle_msa_fpe(void);
extern asmlinkage void handle_fpe(void);
extern asmlinkage void handle_ftlb(void);
extern asmlinkage void handle_gsexc(void);
extern asmlinkage void handle_msa(void);
extern asmlinkage void handle_mdmx(void);
extern asmlinkage void handle_watch(void);
extern asmlinkage void handle_mt(void);
extern asmlinkage void handle_dsp(void);
extern asmlinkage void handle_mcheck(void);
extern asmlinkage void handle_reserved(void);
extern void tlb_do_page_fault_0(void);
void (*board_be_init)(void);
static int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
void (*board_nmi_handler_setup)(void);
void (*board_ejtag_handler_setup)(void);
void (*board_bind_eic_interrupt)(int irq, int regset);
void (*board_ebase_setup)(void);
void(*board_cache_error_setup)(void);
void mips_set_be_handler(int (*handler)(struct pt_regs *regs, int is_fixup))
{
board_be_handler = handler;
}
EXPORT_SYMBOL_GPL(mips_set_be_handler);
static void show_raw_backtrace(unsigned long reg29, const char *loglvl,
bool user)
{
unsigned long *sp = (unsigned long *)(reg29 & ~3);
unsigned long addr;
printk("%sCall Trace:", loglvl);
#ifdef CONFIG_KALLSYMS
printk("%s\n", loglvl);
#endif
while (!kstack_end(sp)) {
if (__get_addr(&addr, sp++, user)) {
printk("%s (Bad stack address)", loglvl);
break;
}
if (__kernel_text_address(addr))
print_ip_sym(loglvl, addr);
}
printk("%s\n", loglvl);
}
#ifdef CONFIG_KALLSYMS
int raw_show_trace;
static int __init set_raw_show_trace(char *str)
{
raw_show_trace = 1;
return 1;
}
__setup("raw_show_trace", set_raw_show_trace);
#endif
static void show_backtrace(struct task_struct *task, const struct pt_regs *regs,
const char *loglvl, bool user)
{
unsigned long sp = regs->regs[29];
unsigned long ra = regs->regs[31];
unsigned long pc = regs->cp0_epc;
if (!task)
task = current;
if (raw_show_trace || user_mode(regs) || !__kernel_text_address(pc)) {
show_raw_backtrace(sp, loglvl, user);
return;
}
printk("%sCall Trace:\n", loglvl);
do {
print_ip_sym(loglvl, pc);
pc = unwind_stack(task, &sp, pc, &ra);
} while (pc);
pr_cont("\n");
}
/*
* This routine abuses get_user()/put_user() to reference pointers
* with at least a bit of error checking ...
*/
static void show_stacktrace(struct task_struct *task,
const struct pt_regs *regs, const char *loglvl, bool user)
{
const int field = 2 * sizeof(unsigned long);
unsigned long stackdata;
int i;
unsigned long *sp = (unsigned long *)regs->regs[29];
printk("%sStack :", loglvl);
i = 0;
while ((unsigned long) sp & (PAGE_SIZE - 1)) {
if (i && ((i % (64 / field)) == 0)) {
pr_cont("\n");
printk("%s ", loglvl);
}
if (i > 39) {
pr_cont(" ...");
break;
}
if (__get_addr(&stackdata, sp++, user)) {
pr_cont(" (Bad stack address)");
break;
}
pr_cont(" %0*lx", field, stackdata);
i++;
}
pr_cont("\n");
show_backtrace(task, regs, loglvl, user);
}
void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
{
struct pt_regs regs;
regs.cp0_status = KSU_KERNEL;
if (sp) {
regs.regs[29] = (unsigned long)sp;
regs.regs[31] = 0;
regs.cp0_epc = 0;
} else {
if (task && task != current) {
regs.regs[29] = task->thread.reg29;
regs.regs[31] = 0;
regs.cp0_epc = task->thread.reg31;
} else {
prepare_frametrace(®s);
}
}
show_stacktrace(task, ®s, loglvl, false);
}
static void show_code(void *pc, bool user)
{
long i;
unsigned short *pc16 = NULL;
printk("Code:");
if ((unsigned long)pc & 1)
pc16 = (u16 *)((unsigned long)pc & ~1);
for(i = -3 ; i < 6 ; i++) {
if (pc16) {
u16 insn16;
if (__get_inst16(&insn16, pc16 + i, user))
goto bad_address;
pr_cont("%c%04x%c", (i?' ':'<'), insn16, (i?' ':'>'));
} else {
u32 insn32;
if (__get_inst32(&insn32, (u32 *)pc + i, user))
goto bad_address;
pr_cont("%c%08x%c", (i?' ':'<'), insn32, (i?' ':'>'));
}
}
pr_cont("\n");
return;
bad_address:
pr_cont(" (Bad address in epc)\n\n");
}
static void __show_regs(const struct pt_regs *regs)
{
const int field = 2 * sizeof(unsigned long);
unsigned int cause = regs->cp0_cause;
unsigned int exccode;
int i;
show_regs_print_info(KERN_DEFAULT);
/*
* Saved main processor registers
*/
for (i = 0; i < 32; ) {
if ((i % 4) == 0)
printk("$%2d :", i);
if (i == 0)
pr_cont(" %0*lx", field, 0UL);
else if (i == 26 || i == 27)
pr_cont(" %*s", field, "");
else
pr_cont(" %0*lx", field, regs->regs[i]);
i++;
if ((i % 4) == 0)
pr_cont("\n");
}
#ifdef CONFIG_CPU_HAS_SMARTMIPS
printk("Acx : %0*lx\n", field, regs->acx);
#endif
if (MIPS_ISA_REV < 6) {
printk("Hi : %0*lx\n", field, regs->hi);
printk("Lo : %0*lx\n", field, regs->lo);
}
/*
* Saved cp0 registers
*/
printk("epc : %0*lx %pS\n", field, regs->cp0_epc,
(void *) regs->cp0_epc);
printk("ra : %0*lx %pS\n", field, regs->regs[31],
(void *) regs->regs[31]);
printk("Status: %08x ", (uint32_t) regs->cp0_status);
if (cpu_has_3kex) {
if (regs->cp0_status & ST0_KUO)
pr_cont("KUo ");
if (regs->cp0_status & ST0_IEO)
pr_cont("IEo ");
if (regs->cp0_status & ST0_KUP)
pr_cont("KUp ");
if (regs->cp0_status & ST0_IEP)
pr_cont("IEp ");
if (regs->cp0_status & ST0_KUC)
pr_cont("KUc ");
if (regs->cp0_status & ST0_IEC)
pr_cont("IEc ");
} else if (cpu_has_4kex) {
if (regs->cp0_status & ST0_KX)
pr_cont("KX ");
if (regs->cp0_status & ST0_SX)
pr_cont("SX ");
if (regs->cp0_status & ST0_UX)
pr_cont("UX ");
switch (regs->cp0_status & ST0_KSU) {
case KSU_USER:
pr_cont("USER ");
break;
case KSU_SUPERVISOR:
pr_cont("SUPERVISOR ");
break;
case KSU_KERNEL:
pr_cont("KERNEL ");
break;
default:
pr_cont("BAD_MODE ");
break;
}
if (regs->cp0_status & ST0_ERL)
pr_cont("ERL ");
if (regs->cp0_status & ST0_EXL)
pr_cont("EXL ");
if (regs->cp0_status & ST0_IE)
pr_cont("IE ");
}
pr_cont("\n");
exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
printk("Cause : %08x (ExcCode %02x)\n", cause, exccode);
if (1 <= exccode && exccode <= 5)
printk("BadVA : %0*lx\n", field, regs->cp0_badvaddr);
printk("PrId : %08x (%s)\n", read_c0_prid(),
cpu_name_string());
}
/*
* FIXME: really the generic show_regs should take a const pointer argument.
*/
void show_regs(struct pt_regs *regs)
{
__show_regs(regs);
dump_stack();
}
void show_registers(struct pt_regs *regs)
{
const int field = 2 * sizeof(unsigned long);
__show_regs(regs);
print_modules();
printk("Process %s (pid: %d, threadinfo=%p, task=%p, tls=%0*lx)\n",
current->comm, current->pid, current_thread_info(), current,
field, current_thread_info()->tp_value);
if (cpu_has_userlocal) {
unsigned long tls;
tls = read_c0_userlocal();
if (tls != current_thread_info()->tp_value)
printk("*HwTLS: %0*lx\n", field, tls);
}
show_stacktrace(current, regs, KERN_DEFAULT, user_mode(regs));
show_code((void *)regs->cp0_epc, user_mode(regs));
printk("\n");
}
static DEFINE_RAW_SPINLOCK(die_lock);
void __noreturn die(const char *str, struct pt_regs *regs)
{
static int die_counter;
int sig = SIGSEGV;
oops_enter();
if (notify_die(DIE_OOPS, str, regs, 0, current->thread.trap_nr,
SIGSEGV) == NOTIFY_STOP)
sig = 0;
console_verbose();
raw_spin_lock_irq(&die_lock);
bust_spinlocks(1);
printk("%s[#%d]:\n", str, ++die_counter);
show_registers(regs);
add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
raw_spin_unlock_irq(&die_lock);
oops_exit();
if (in_interrupt())
panic("Fatal exception in interrupt");
if (panic_on_oops)
panic("Fatal exception");
if (regs && kexec_should_crash(current))
crash_kexec(regs);
make_task_dead(sig);
}
extern struct exception_table_entry __start___dbe_table[];
extern struct exception_table_entry __stop___dbe_table[];
__asm__(
" .section __dbe_table, \"a\"\n"
" .previous \n");
/* Given an address, look for it in the exception tables. */
static const struct exception_table_entry *search_dbe_tables(unsigned long addr)
{
const struct exception_table_entry *e;
e = search_extable(__start___dbe_table,
__stop___dbe_table - __start___dbe_table, addr);
if (!e)
e = search_module_dbetables(addr);
return e;
}
asmlinkage void do_be(struct pt_regs *regs)
{
const int field = 2 * sizeof(unsigned long);
const struct exception_table_entry *fixup = NULL;
int data = regs->cp0_cause & 4;
int action = MIPS_BE_FATAL;
enum ctx_state prev_state;
prev_state = exception_enter();
/* XXX For now. Fixme, this searches the wrong table ... */
if (data && !user_mode(regs))
fixup = search_dbe_tables(exception_epc(regs));
if (fixup)
action = MIPS_BE_FIXUP;
if (board_be_handler)
action = board_be_handler(regs, fixup != NULL);
else
mips_cm_error_report();
switch (action) {
case MIPS_BE_DISCARD:
goto out;
case MIPS_BE_FIXUP:
if (fixup) {
regs->cp0_epc = fixup->nextinsn;
goto out;
}
break;
default:
break;
}
/*
* Assume it would be too dangerous to continue ...
*/
printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n",
data ? "Data" : "Instruction",
field, regs->cp0_epc, field, regs->regs[31]);
if (notify_die(DIE_OOPS, "bus error", regs, 0, current->thread.trap_nr,
SIGBUS) == NOTIFY_STOP)
goto out;
die_if_kernel("Oops", regs);
force_sig(SIGBUS);
out:
exception_exit(prev_state);
}
/*
* ll/sc, rdhwr, sync emulation
*/
#define OPCODE 0xfc000000
#define BASE 0x03e00000
#define RT 0x001f0000
#define OFFSET 0x0000ffff
#define LL 0xc0000000
#define SC 0xe0000000
#define SPEC0 0x00000000
#define SPEC3 0x7c000000
#define RD 0x0000f800
#define FUNC 0x0000003f
#define SYNC 0x0000000f
#define RDHWR 0x0000003b
/* microMIPS definitions */
#define MM_POOL32A_FUNC 0xfc00ffff
#define MM_RDHWR 0x00006b3c
#define MM_RS 0x001f0000
#define MM_RT 0x03e00000
/*
* The ll_bit is cleared by r*_switch.S
*/
unsigned int ll_bit;
struct task_struct *ll_task;
static inline int simulate_ll(struct pt_regs *regs, unsigned int opcode)
{
unsigned long value, __user *vaddr;
long offset;
/*
* analyse the ll instruction that just caused a ri exception
* and put the referenced address to addr.
*/
/* sign extend offset */
offset = opcode & OFFSET;
offset <<= 16;
offset >>= 16;
vaddr = (unsigned long __user *)
((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
if ((unsigned long)vaddr & 3)
return SIGBUS;
if (get_user(value, vaddr))
return SIGSEGV;
preempt_disable();
if (ll_task == NULL || ll_task == current) {
ll_bit = 1;
} else {
ll_bit = 0;
}
ll_task = current;
preempt_enable();
regs->regs[(opcode & RT) >> 16] = value;
return 0;
}
static inline int simulate_sc(struct pt_regs *regs, unsigned int opcode)
{
unsigned long __user *vaddr;
unsigned long reg;
long offset;
/*
* analyse the sc instruction that just caused a ri exception
* and put the referenced address to addr.
*/
/* sign extend offset */
offset = opcode & OFFSET;
offset <<= 16;
offset >>= 16;
vaddr = (unsigned long __user *)
((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
reg = (opcode & RT) >> 16;
if ((unsigned long)vaddr & 3)
return SIGBUS;
preempt_disable();
if (ll_bit == 0 || ll_task != current) {
regs->regs[reg] = 0;
preempt_enable();
return 0;
}
preempt_enable();
if (put_user(regs->regs[reg], vaddr))
return SIGSEGV;
regs->regs[reg] = 1;
return 0;
}
/*
* ll uses the opcode of lwc0 and sc uses the opcode of swc0. That is both
* opcodes are supposed to result in coprocessor unusable exceptions if
* executed on ll/sc-less processors. That's the theory. In practice a
* few processors such as NEC's VR4100 throw reserved instruction exceptions
* instead, so we're doing the emulation thing in both exception handlers.
*/
static int simulate_llsc(struct pt_regs *regs, unsigned int opcode)
{
if ((opcode & OPCODE) == LL) {
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
1, regs, 0);
return simulate_ll(regs, opcode);
}
if ((opcode & OPCODE) == SC) {
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
1, regs, 0);
return simulate_sc(regs, opcode);
}
return -1; /* Must be something else ... */
}
/*
* Simulate trapping 'rdhwr' instructions to provide user accessible
* registers not implemented in hardware.
*/
static int simulate_rdhwr(struct pt_regs *regs, int rd, int rt)
{
struct thread_info *ti = task_thread_info(current);
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
1, regs, 0);
switch (rd) {
case MIPS_HWR_CPUNUM: /* CPU number */
regs->regs[rt] = smp_processor_id();
return 0;
case MIPS_HWR_SYNCISTEP: /* SYNCI length */
regs->regs[rt] = min(current_cpu_data.dcache.linesz,
current_cpu_data.icache.linesz);
return 0;
case MIPS_HWR_CC: /* Read count register */
regs->regs[rt] = read_c0_count();
return 0;
case MIPS_HWR_CCRES: /* Count register resolution */
switch (current_cpu_type()) {
case CPU_20KC:
case CPU_25KF:
regs->regs[rt] = 1;
break;
default:
regs->regs[rt] = 2;
}
return 0;
case MIPS_HWR_ULR: /* Read UserLocal register */
regs->regs[rt] = ti->tp_value;
return 0;
default:
return -1;
}
}
static int simulate_rdhwr_normal(struct pt_regs *regs, unsigned int opcode)
{
if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) {
int rd = (opcode & RD) >> 11;
int rt = (opcode & RT) >> 16;
simulate_rdhwr(regs, rd, rt);
return 0;
}
/* Not ours. */
return -1;
}
static int simulate_rdhwr_mm(struct pt_regs *regs, unsigned int opcode)
{
if ((opcode & MM_POOL32A_FUNC) == MM_RDHWR) {
int rd = (opcode & MM_RS) >> 16;
int rt = (opcode & MM_RT) >> 21;
simulate_rdhwr(regs, rd, rt);
return 0;
}
/* Not ours. */
return -1;
}
static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
{
if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) {
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
1, regs, 0);
return 0;
}
return -1; /* Must be something else ... */
}
/*
* Loongson-3 CSR instructions emulation
*/
#ifdef CONFIG_CPU_LOONGSON3_CPUCFG_EMULATION
#define LWC2 0xc8000000
#define RS BASE
#define CSR_OPCODE2 0x00000118
#define CSR_OPCODE2_MASK 0x000007ff
#define CSR_FUNC_MASK RT
#define CSR_FUNC_CPUCFG 0x8
static int simulate_loongson3_cpucfg(struct pt_regs *regs,
unsigned int opcode)
{
int op = opcode & OPCODE;
int op2 = opcode & CSR_OPCODE2_MASK;
int csr_func = (opcode & CSR_FUNC_MASK) >> 16;
if (op == LWC2 && op2 == CSR_OPCODE2 && csr_func == CSR_FUNC_CPUCFG) {
int rd = (opcode & RD) >> 11;
int rs = (opcode & RS) >> 21;
__u64 sel = regs->regs[rs];
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
/* Do not emulate on unsupported core models. */
preempt_disable();
if (!loongson3_cpucfg_emulation_enabled(¤t_cpu_data)) {
preempt_enable();
return -1;
}
regs->regs[rd] = loongson3_cpucfg_read_synthesized(
¤t_cpu_data, sel);
preempt_enable();
return 0;
}
/* Not ours. */
return -1;
}
#endif /* CONFIG_CPU_LOONGSON3_CPUCFG_EMULATION */
asmlinkage void do_ov(struct pt_regs *regs)
{
enum ctx_state prev_state;
prev_state = exception_enter();
die_if_kernel("Integer overflow", regs);
force_sig_fault(SIGFPE, FPE_INTOVF, (void __user *)regs->cp0_epc);
exception_exit(prev_state);
}
#ifdef CONFIG_MIPS_FP_SUPPORT
/*
* Send SIGFPE according to FCSR Cause bits, which must have already
* been masked against Enable bits. This is impotant as Inexact can
* happen together with Overflow or Underflow, and `ptrace' can set
* any bits.
*/
void force_fcr31_sig(unsigned long fcr31, void __user *fault_addr,
struct task_struct *tsk)
{
int si_code = FPE_FLTUNK;
if (fcr31 & FPU_CSR_INV_X)
si_code = FPE_FLTINV;
else if (fcr31 & FPU_CSR_DIV_X)
si_code = FPE_FLTDIV;
else if (fcr31 & FPU_CSR_OVF_X)
si_code = FPE_FLTOVF;
else if (fcr31 & FPU_CSR_UDF_X)
si_code = FPE_FLTUND;
else if (fcr31 & FPU_CSR_INE_X)
si_code = FPE_FLTRES;
force_sig_fault_to_task(SIGFPE, si_code, fault_addr, tsk);
}
int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
{
int si_code;
switch (sig) {
case 0:
return 0;
case SIGFPE:
force_fcr31_sig(fcr31, fault_addr, current);
return 1;
case SIGBUS:
force_sig_fault(SIGBUS, BUS_ADRERR, fault_addr);
return 1;
case SIGSEGV:
mmap_read_lock(current->mm);
if (vma_lookup(current->mm, (unsigned long)fault_addr))
si_code = SEGV_ACCERR;
else
si_code = SEGV_MAPERR;
mmap_read_unlock(current->mm);
force_sig_fault(SIGSEGV, si_code, fault_addr);
return 1;
default:
force_sig(sig);
return 1;
}
}
static int simulate_fp(struct pt_regs *regs, unsigned int opcode,
unsigned long old_epc, unsigned long old_ra)
{
union mips_instruction inst = { .word = opcode };
void __user *fault_addr;
unsigned long fcr31;
int sig;
/* If it's obviously not an FP instruction, skip it */
switch (inst.i_format.opcode) {
case cop1_op:
case cop1x_op:
case lwc1_op:
case ldc1_op:
case swc1_op:
case sdc1_op:
break;
default:
return -1;
}
/*
* do_ri skipped over the instruction via compute_return_epc, undo
* that for the FPU emulator.
*/
regs->cp0_epc = old_epc;
regs->regs[31] = old_ra;
/* Run the emulator */
sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1,
&fault_addr);
/*
* We can't allow the emulated instruction to leave any
* enabled Cause bits set in $fcr31.
*/
fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
current->thread.fpu.fcr31 &= ~fcr31;
/* Restore the hardware register state */
own_fpu(1);
/* Send a signal if required. */
process_fpemu_return(sig, fault_addr, fcr31);
return 0;
}
/*
* XXX Delayed fp exceptions when doing a lazy ctx switch XXX
*/
asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
{
enum ctx_state prev_state;
void __user *fault_addr;
int sig;
prev_state = exception_enter();
if (notify_die(DIE_FP, "FP exception", regs, 0, current->thread.trap_nr,
SIGFPE) == NOTIFY_STOP)
goto out;
/* Clear FCSR.Cause before enabling interrupts */
write_32bit_cp1_register(CP1_STATUS, fcr31 & ~mask_fcr31_x(fcr31));
local_irq_enable();
die_if_kernel("FP exception in kernel code", regs);
if (fcr31 & FPU_CSR_UNI_X) {
/*
* Unimplemented operation exception. If we've got the full
* software emulator on-board, let's use it...
*
* Force FPU to dump state into task/thread context. We're
* moving a lot of data here for what is probably a single
* instruction, but the alternative is to pre-decode the FP
* register operands before invoking the emulator, which seems
* a bit extreme for what should be an infrequent event.
*/
/* Run the emulator */
sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1,
&fault_addr);
/*
* We can't allow the emulated instruction to leave any
* enabled Cause bits set in $fcr31.
*/
fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
current->thread.fpu.fcr31 &= ~fcr31;
/* Restore the hardware register state */
own_fpu(1); /* Using the FPU again. */
} else {
sig = SIGFPE;
fault_addr = (void __user *) regs->cp0_epc;
}
/* Send a signal if required. */
process_fpemu_return(sig, fault_addr, fcr31);
out:
exception_exit(prev_state);
}
/*
* MIPS MT processors may have fewer FPU contexts than CPU threads. If we've
* emulated more than some threshold number of instructions, force migration to
* a "CPU" that has FP support.
*/
static void mt_ase_fp_affinity(void)
{
#ifdef CONFIG_MIPS_MT_FPAFF
if (mt_fpemul_threshold > 0 &&
((current->thread.emulated_fp++ > mt_fpemul_threshold))) {
/*
* If there's no FPU present, or if the application has already
* restricted the allowed set to exclude any CPUs with FPUs,
* we'll skip the procedure.
*/
if (cpumask_intersects(¤t->cpus_mask, &mt_fpu_cpumask)) {
cpumask_t tmask;
current->thread.user_cpus_allowed
= current->cpus_mask;
cpumask_and(&tmask, ¤t->cpus_mask,
&mt_fpu_cpumask);
set_cpus_allowed_ptr(current, &tmask);
set_thread_flag(TIF_FPUBOUND);
}
}
#endif /* CONFIG_MIPS_MT_FPAFF */
}
#else /* !CONFIG_MIPS_FP_SUPPORT */
static int simulate_fp(struct pt_regs *regs, unsigned int opcode,
unsigned long old_epc, unsigned long old_ra)
{
return -1;
}
#endif /* !CONFIG_MIPS_FP_SUPPORT */
void do_trap_or_bp(struct pt_regs *regs, unsigned int code, int si_code,
const char *str)
{
char b[40];
#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
if (kgdb_ll_trap(DIE_TRAP, str, regs, code, current->thread.trap_nr,
SIGTRAP) == NOTIFY_STOP)
return;
#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
if (notify_die(DIE_TRAP, str, regs, code, current->thread.trap_nr,
SIGTRAP) == NOTIFY_STOP)
return;
/*
* A short test says that IRIX 5.3 sends SIGTRAP for all trap
* insns, even for trap and break codes that indicate arithmetic
* failures. Weird ...
* But should we continue the brokenness??? --macro
*/
switch (code) {
case BRK_OVERFLOW:
case BRK_DIVZERO:
scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
die_if_kernel(b, regs);
force_sig_fault(SIGFPE,
code == BRK_DIVZERO ? FPE_INTDIV : FPE_INTOVF,
(void __user *) regs->cp0_epc);
break;
case BRK_BUG:
die_if_kernel("Kernel bug detected", regs);
force_sig(SIGTRAP);
break;
case BRK_MEMU:
/*
* This breakpoint code is used by the FPU emulator to retake
* control of the CPU after executing the instruction from the
* delay slot of an emulated branch.
*
* Terminate if exception was recognized as a delay slot return
* otherwise handle as normal.
*/
if (do_dsemulret(regs))
return;
die_if_kernel("Math emu break/trap", regs);
force_sig(SIGTRAP);
break;
default:
scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
die_if_kernel(b, regs);
if (si_code) {
force_sig_fault(SIGTRAP, si_code, NULL);
} else {
force_sig(SIGTRAP);
}
}
}
asmlinkage void do_bp(struct pt_regs *regs)
{
unsigned long epc = msk_isa16_mode(exception_epc(regs));
unsigned int opcode, bcode;
enum ctx_state prev_state;
bool user = user_mode(regs);
prev_state = exception_enter();
current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
if (get_isa16_mode(regs->cp0_epc)) {
u16 instr[2];
if (__get_inst16(&instr[0], (u16 *)epc, user))
goto out_sigsegv;
if (!cpu_has_mmips) {
/* MIPS16e mode */
bcode = (instr[0] >> 5) & 0x3f;
} else if (mm_insn_16bit(instr[0])) {
/* 16-bit microMIPS BREAK */
bcode = instr[0] & 0xf;
} else {
/* 32-bit microMIPS BREAK */
if (__get_inst16(&instr[1], (u16 *)(epc + 2), user))
goto out_sigsegv;
opcode = (instr[0] << 16) | instr[1];
bcode = (opcode >> 6) & ((1 << 20) - 1);
}
} else {
if (__get_inst32(&opcode, (u32 *)epc, user))
goto out_sigsegv;
bcode = (opcode >> 6) & ((1 << 20) - 1);
}
/*
* There is the ancient bug in the MIPS assemblers that the break
* code starts left to bit 16 instead to bit 6 in the opcode.
* Gas is bug-compatible, but not always, grrr...
* We handle both cases with a simple heuristics. --macro
*/
if (bcode >= (1 << 10))
bcode = ((bcode & ((1 << 10) - 1)) << 10) | (bcode >> 10);
/*
* notify the kprobe handlers, if instruction is likely to
* pertain to them.
*/
switch (bcode) {
case BRK_UPROBE:
if (notify_die(DIE_UPROBE, "uprobe", regs, bcode,
current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
goto out;
else
break;
case BRK_UPROBE_XOL:
if (notify_die(DIE_UPROBE_XOL, "uprobe_xol", regs, bcode,
current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
goto out;
else
break;
case BRK_KPROBE_BP:
if (notify_die(DIE_BREAK, "debug", regs, bcode,
current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
goto out;
else
break;
case BRK_KPROBE_SSTEPBP:
if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode,
current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
goto out;
else
break;
default:
break;
}
do_trap_or_bp(regs, bcode, TRAP_BRKPT, "Break");
out:
exception_exit(prev_state);
return;
out_sigsegv:
force_sig(SIGSEGV);
goto out;
}
asmlinkage void do_tr(struct pt_regs *regs)
{
u32 opcode, tcode = 0;
enum ctx_state prev_state;
u16 instr[2];
bool user = user_mode(regs);
unsigned long epc = msk_isa16_mode(exception_epc(regs));
prev_state = exception_enter();
current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
if (get_isa16_mode(regs->cp0_epc)) {
if (__get_inst16(&instr[0], (u16 *)(epc + 0), user) ||
__get_inst16(&instr[1], (u16 *)(epc + 2), user))
goto out_sigsegv;
opcode = (instr[0] << 16) | instr[1];
/* Immediate versions don't provide a code. */
if (!(opcode & OPCODE))
tcode = (opcode >> 12) & ((1 << 4) - 1);
} else {
if (__get_inst32(&opcode, (u32 *)epc, user))
goto out_sigsegv;
/* Immediate versions don't provide a code. */
if (!(opcode & OPCODE))
tcode = (opcode >> 6) & ((1 << 10) - 1);
}
do_trap_or_bp(regs, tcode, 0, "Trap");
out:
exception_exit(prev_state);
return;
out_sigsegv:
force_sig(SIGSEGV);
goto out;
}
asmlinkage void do_ri(struct pt_regs *regs)
{
unsigned int __user *epc = (unsigned int __user *)exception_epc(regs);
unsigned long old_epc = regs->cp0_epc;
unsigned long old31 = regs->regs[31];
enum ctx_state prev_state;
unsigned int opcode = 0;
int status = -1;
/*
* Avoid any kernel code. Just emulate the R2 instruction
* as quickly as possible.
*/
if (mipsr2_emulation && cpu_has_mips_r6 &&
likely(user_mode(regs)) &&
likely(get_user(opcode, epc) >= 0)) {
unsigned long fcr31 = 0;
status = mipsr2_decoder(regs, opcode, &fcr31);
switch (status) {
case 0:
case SIGEMT:
return;
case SIGILL:
goto no_r2_instr;
default:
process_fpemu_return(status,
¤t->thread.cp0_baduaddr,
fcr31);
return;
}
}
no_r2_instr:
prev_state = exception_enter();
current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
if (notify_die(DIE_RI, "RI Fault", regs, 0, current->thread.trap_nr,
SIGILL) == NOTIFY_STOP)
goto out;
die_if_kernel("Reserved instruction in kernel code", regs);
if (unlikely(compute_return_epc(regs) < 0))
goto out;
if (!get_isa16_mode(regs->cp0_epc)) {
if (unlikely(get_user(opcode, epc) < 0))
status = SIGSEGV;
if (!cpu_has_llsc && status < 0)
status = simulate_llsc(regs, opcode);
if (status < 0)
status = simulate_rdhwr_normal(regs, opcode);
if (status < 0)
status = simulate_sync(regs, opcode);
if (status < 0)
status = simulate_fp(regs, opcode, old_epc, old31);
#ifdef CONFIG_CPU_LOONGSON3_CPUCFG_EMULATION
if (status < 0)
status = simulate_loongson3_cpucfg(regs, opcode);
#endif
} else if (cpu_has_mmips) {
unsigned short mmop[2] = { 0 };
if (unlikely(get_user(mmop[0], (u16 __user *)epc + 0) < 0))
status = SIGSEGV;
if (unlikely(get_user(mmop[1], (u16 __user *)epc + 1) < 0))
status = SIGSEGV;
opcode = mmop[0];
opcode = (opcode << 16) | mmop[1];
if (status < 0)
status = simulate_rdhwr_mm(regs, opcode);
}
if (status < 0)
status = SIGILL;
if (unlikely(status > 0)) {
regs->cp0_epc = old_epc; /* Undo skip-over. */
regs->regs[31] = old31;
force_sig(status);
}
out:
exception_exit(prev_state);
}
/*
* No lock; only written during early bootup by CPU 0.
*/
static RAW_NOTIFIER_HEAD(cu2_chain);
int __ref register_cu2_notifier(struct notifier_block *nb)
{
return raw_notifier_chain_register(&cu2_chain, nb);
}
int cu2_notifier_call_chain(unsigned long val, void *v)
{
return raw_notifier_call_chain(&cu2_chain, val, v);
}
static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
void *data)
{
struct pt_regs *regs = data;
die_if_kernel("COP2: Unhandled kernel unaligned access or invalid "
"instruction", regs);
force_sig(SIGILL);
return NOTIFY_OK;
}
#ifdef CONFIG_MIPS_FP_SUPPORT
static int enable_restore_fp_context(int msa)
{
int err, was_fpu_owner, prior_msa;
bool first_fp;
/* Initialize context if it hasn't been used already */
first_fp = init_fp_ctx(current);
if (first_fp) {
preempt_disable();
err = own_fpu_inatomic(1);
if (msa && !err) {
enable_msa();
/*
* with MSA enabled, userspace can see MSACSR
* and MSA regs, but the values in them are from
* other task before current task, restore them
* from saved fp/msa context
*/
write_msa_csr(current->thread.fpu.msacsr);
/*
* own_fpu_inatomic(1) just restore low 64bit,
* fix the high 64bit
*/
init_msa_upper();
set_thread_flag(TIF_USEDMSA);
set_thread_flag(TIF_MSA_CTX_LIVE);
}
preempt_enable();
return err;
}
/*
* This task has formerly used the FP context.
*
* If this thread has no live MSA vector context then we can simply
* restore the scalar FP context. If it has live MSA vector context
* (that is, it has or may have used MSA since last performing a
* function call) then we'll need to restore the vector context. This
* applies even if we're currently only executing a scalar FP
* instruction. This is because if we were to later execute an MSA
* instruction then we'd either have to:
*
* - Restore the vector context & clobber any registers modified by
* scalar FP instructions between now & then.
*
* or
*
* - Not restore the vector context & lose the most significant bits
* of all vector registers.
*
* Neither of those options is acceptable. We cannot restore the least
* significant bits of the registers now & only restore the most
* significant bits later because the most significant bits of any
* vector registers whose aliased FP register is modified now will have
* been zeroed. We'd have no way to know that when restoring the vector
* context & thus may load an outdated value for the most significant
* bits of a vector register.
*/
if (!msa && !thread_msa_context_live())
return own_fpu(1);
/*
* This task is using or has previously used MSA. Thus we require
* that Status.FR == 1.
*/
preempt_disable();
was_fpu_owner = is_fpu_owner();
err = own_fpu_inatomic(0);
if (err)
goto out;
enable_msa();
write_msa_csr(current->thread.fpu.msacsr);
set_thread_flag(TIF_USEDMSA);
/*
* If this is the first time that the task is using MSA and it has
* previously used scalar FP in this time slice then we already nave
* FP context which we shouldn't clobber. We do however need to clear
* the upper 64b of each vector register so that this task has no
* opportunity to see data left behind by another.
*/
prior_msa = test_and_set_thread_flag(TIF_MSA_CTX_LIVE);
if (!prior_msa && was_fpu_owner) {
init_msa_upper();
goto out;
}
if (!prior_msa) {
/*
* Restore the least significant 64b of each vector register
* from the existing scalar FP context.
*/
_restore_fp(current);
/*
* The task has not formerly used MSA, so clear the upper 64b
* of each vector register such that it cannot see data left
* behind by another task.
*/
init_msa_upper();
} else {
/* We need to restore the vector context. */
restore_msa(current);
/* Restore the scalar FP control & status register */
if (!was_fpu_owner)
write_32bit_cp1_register(CP1_STATUS,
current->thread.fpu.fcr31);
}
out:
preempt_enable();
return 0;
}
#else /* !CONFIG_MIPS_FP_SUPPORT */
static int enable_restore_fp_context(int msa)
{
return SIGILL;
}
#endif /* CONFIG_MIPS_FP_SUPPORT */
asmlinkage void do_cpu(struct pt_regs *regs)
{
enum ctx_state prev_state;
unsigned int __user *epc;
unsigned long old_epc, old31;
unsigned int opcode;
unsigned int cpid;
int status;
prev_state = exception_enter();
cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;
if (cpid != 2)
die_if_kernel("do_cpu invoked from kernel context!", regs);
switch (cpid) {
case 0:
epc = (unsigned int __user *)exception_epc(regs);
old_epc = regs->cp0_epc;
old31 = regs->regs[31];
opcode = 0;
status = -1;
if (unlikely(compute_return_epc(regs) < 0))
break;
if (!get_isa16_mode(regs->cp0_epc)) {
if (unlikely(get_user(opcode, epc) < 0))
status = SIGSEGV;
if (!cpu_has_llsc && status < 0)
status = simulate_llsc(regs, opcode);
}
if (status < 0)
status = SIGILL;
if (unlikely(status > 0)) {
regs->cp0_epc = old_epc; /* Undo skip-over. */
regs->regs[31] = old31;
force_sig(status);
}
break;
#ifdef CONFIG_MIPS_FP_SUPPORT
case 3:
/*
* The COP3 opcode space and consequently the CP0.Status.CU3
* bit and the CP0.Cause.CE=3 encoding have been removed as
* of the MIPS III ISA. From the MIPS IV and MIPS32r2 ISAs
* up the space has been reused for COP1X instructions, that
* are enabled by the CP0.Status.CU1 bit and consequently
* use the CP0.Cause.CE=1 encoding for Coprocessor Unusable
* exceptions. Some FPU-less processors that implement one
* of these ISAs however use this code erroneously for COP1X
* instructions. Therefore we redirect this trap to the FP
* emulator too.
*/
if (raw_cpu_has_fpu || !cpu_has_mips_4_5_64_r2_r6) {
force_sig(SIGILL);
break;
}
fallthrough;
case 1: {
void __user *fault_addr;
unsigned long fcr31;
int err, sig;
err = enable_restore_fp_context(0);
if (raw_cpu_has_fpu && !err)
break;
sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 0,
&fault_addr);
/*
* We can't allow the emulated instruction to leave
* any enabled Cause bits set in $fcr31.
*/
fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
current->thread.fpu.fcr31 &= ~fcr31;
/* Send a signal if required. */
if (!process_fpemu_return(sig, fault_addr, fcr31) && !err)
mt_ase_fp_affinity();
break;
}
#else /* CONFIG_MIPS_FP_SUPPORT */
case 1:
case 3:
force_sig(SIGILL);
break;
#endif /* CONFIG_MIPS_FP_SUPPORT */
case 2:
raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs);
break;
}
exception_exit(prev_state);
}
asmlinkage void do_msa_fpe(struct pt_regs *regs, unsigned int msacsr)
{
enum ctx_state prev_state;
prev_state = exception_enter();
current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
if (notify_die(DIE_MSAFP, "MSA FP exception", regs, 0,
current->thread.trap_nr, SIGFPE) == NOTIFY_STOP)
goto out;
/* Clear MSACSR.Cause before enabling interrupts */
write_msa_csr(msacsr & ~MSA_CSR_CAUSEF);
local_irq_enable();
die_if_kernel("do_msa_fpe invoked from kernel context!", regs);
force_sig(SIGFPE);
out:
exception_exit(prev_state);
}
asmlinkage void do_msa(struct pt_regs *regs)
{
enum ctx_state prev_state;
int err;
prev_state = exception_enter();
if (!cpu_has_msa || test_thread_flag(TIF_32BIT_FPREGS)) {
force_sig(SIGILL);
goto out;
}
die_if_kernel("do_msa invoked from kernel context!", regs);
err = enable_restore_fp_context(1);
if (err)
force_sig(SIGILL);
out:
exception_exit(prev_state);
}
asmlinkage void do_mdmx(struct pt_regs *regs)
{
enum ctx_state prev_state;
prev_state = exception_enter();
force_sig(SIGILL);
exception_exit(prev_state);
}
/*
* Called with interrupts disabled.
*/
asmlinkage void do_watch(struct pt_regs *regs)
{
enum ctx_state prev_state;
prev_state = exception_enter();
/*
* Clear WP (bit 22) bit of cause register so we don't loop
* forever.
*/
clear_c0_cause(CAUSEF_WP);
/*
* If the current thread has the watch registers loaded, save
* their values and send SIGTRAP. Otherwise another thread
* left the registers set, clear them and continue.
*/
if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) {
mips_read_watch_registers();
local_irq_enable();
force_sig_fault(SIGTRAP, TRAP_HWBKPT, NULL);
} else {
mips_clear_watch_registers();
local_irq_enable();
}
exception_exit(prev_state);
}
asmlinkage void do_mcheck(struct pt_regs *regs)
{
int multi_match = regs->cp0_status & ST0_TS;
enum ctx_state prev_state;
prev_state = exception_enter();
show_regs(regs);
if (multi_match) {
dump_tlb_regs();
pr_info("\n");
dump_tlb_all();
}
show_code((void *)regs->cp0_epc, user_mode(regs));
/*
* Some chips may have other causes of machine check (e.g. SB1
* graduation timer)
*/
panic("Caught Machine Check exception - %scaused by multiple "
"matching entries in the TLB.",
(multi_match) ? "" : "not ");
}
asmlinkage void do_mt(struct pt_regs *regs)
{
int subcode;
subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT)
>> VPECONTROL_EXCPT_SHIFT;
switch (subcode) {
case 0:
printk(KERN_DEBUG "Thread Underflow\n");
break;
case 1:
printk(KERN_DEBUG "Thread Overflow\n");
break;
case 2:
printk(KERN_DEBUG "Invalid YIELD Qualifier\n");
break;
case 3:
printk(KERN_DEBUG "Gating Storage Exception\n");
break;
case 4:
printk(KERN_DEBUG "YIELD Scheduler Exception\n");
break;
case 5:
printk(KERN_DEBUG "Gating Storage Scheduler Exception\n");
break;
default:
printk(KERN_DEBUG "*** UNKNOWN THREAD EXCEPTION %d ***\n",
subcode);
break;
}
die_if_kernel("MIPS MT Thread exception in kernel", regs);
force_sig(SIGILL);
}
asmlinkage void do_dsp(struct pt_regs *regs)
{
if (cpu_has_dsp)
panic("Unexpected DSP exception");
force_sig(SIGILL);
}
asmlinkage void do_reserved(struct pt_regs *regs)
{
/*
* Game over - no way to handle this if it ever occurs. Most probably
* caused by a new unknown cpu type or after another deadly
* hard/software error.
*/
show_regs(regs);
panic("Caught reserved exception %ld - should not happen.",
(regs->cp0_cause & 0x7f) >> 2);
}
static int __initdata l1parity = 1;
static int __init nol1parity(char *s)
{
l1parity = 0;
return 1;
}
__setup("nol1par", nol1parity);
static int __initdata l2parity = 1;
static int __init nol2parity(char *s)
{
l2parity = 0;
return 1;
}
__setup("nol2par", nol2parity);
/*
* Some MIPS CPUs can enable/disable for cache parity detection, but do
* it different ways.
*/
static inline __init void parity_protection_init(void)
{
#define ERRCTL_PE 0x80000000
#define ERRCTL_L2P 0x00800000
if (mips_cm_revision() >= CM_REV_CM3) {
ulong gcr_ectl, cp0_ectl;
/*
* With CM3 systems we need to ensure that the L1 & L2
* parity enables are set to the same value, since this
* is presumed by the hardware engineers.
*
* If the user disabled either of L1 or L2 ECC checking,
* disable both.
*/
l1parity &= l2parity;
l2parity &= l1parity;
/* Probe L1 ECC support */
cp0_ectl = read_c0_ecc();
write_c0_ecc(cp0_ectl | ERRCTL_PE);
back_to_back_c0_hazard();
cp0_ectl = read_c0_ecc();
/* Probe L2 ECC support */
gcr_ectl = read_gcr_err_control();
if (!(gcr_ectl & CM_GCR_ERR_CONTROL_L2_ECC_SUPPORT) ||
!(cp0_ectl & ERRCTL_PE)) {
/*
* One of L1 or L2 ECC checking isn't supported,
* so we cannot enable either.
*/
l1parity = l2parity = 0;
}
/* Configure L1 ECC checking */
if (l1parity)
cp0_ectl |= ERRCTL_PE;
else
cp0_ectl &= ~ERRCTL_PE;
write_c0_ecc(cp0_ectl);
back_to_back_c0_hazard();
WARN_ON(!!(read_c0_ecc() & ERRCTL_PE) != l1parity);
/* Configure L2 ECC checking */
if (l2parity)
gcr_ectl |= CM_GCR_ERR_CONTROL_L2_ECC_EN;
else
gcr_ectl &= ~CM_GCR_ERR_CONTROL_L2_ECC_EN;
write_gcr_err_control(gcr_ectl);
gcr_ectl = read_gcr_err_control();
gcr_ectl &= CM_GCR_ERR_CONTROL_L2_ECC_EN;
WARN_ON(!!gcr_ectl != l2parity);
pr_info("Cache parity protection %sabled\n",
l1parity ? "en" : "dis");
return;
}
switch (current_cpu_type()) {
case CPU_24K:
case CPU_34K:
case CPU_74K:
case CPU_1004K:
case CPU_1074K:
case CPU_INTERAPTIV:
case CPU_PROAPTIV:
case CPU_P5600:
case CPU_QEMU_GENERIC:
case CPU_P6600:
{
unsigned long errctl;
unsigned int l1parity_present, l2parity_present;
errctl = read_c0_ecc();
errctl &= ~(ERRCTL_PE|ERRCTL_L2P);
/* probe L1 parity support */
write_c0_ecc(errctl | ERRCTL_PE);
back_to_back_c0_hazard();
l1parity_present = (read_c0_ecc() & ERRCTL_PE);
/* probe L2 parity support */
write_c0_ecc(errctl|ERRCTL_L2P);
back_to_back_c0_hazard();
l2parity_present = (read_c0_ecc() & ERRCTL_L2P);
if (l1parity_present && l2parity_present) {
if (l1parity)
errctl |= ERRCTL_PE;
if (l1parity ^ l2parity)
errctl |= ERRCTL_L2P;
} else if (l1parity_present) {
if (l1parity)
errctl |= ERRCTL_PE;
} else if (l2parity_present) {
if (l2parity)
errctl |= ERRCTL_L2P;
} else {
/* No parity available */
}
printk(KERN_INFO "Writing ErrCtl register=%08lx\n", errctl);
write_c0_ecc(errctl);
back_to_back_c0_hazard();
errctl = read_c0_ecc();
printk(KERN_INFO "Readback ErrCtl register=%08lx\n", errctl);
if (l1parity_present)
printk(KERN_INFO "Cache parity protection %sabled\n",
(errctl & ERRCTL_PE) ? "en" : "dis");
if (l2parity_present) {
if (l1parity_present && l1parity)
errctl ^= ERRCTL_L2P;
printk(KERN_INFO "L2 cache parity protection %sabled\n",
(errctl & ERRCTL_L2P) ? "en" : "dis");
}
}
break;
case CPU_5KC:
case CPU_5KE:
case CPU_LOONGSON32:
write_c0_ecc(0x80000000);
back_to_back_c0_hazard();
/* Set the PE bit (bit 31) in the c0_errctl register. */
printk(KERN_INFO "Cache parity protection %sabled\n",
(read_c0_ecc() & 0x80000000) ? "en" : "dis");
break;
case CPU_20KC:
case CPU_25KF:
/* Clear the DE bit (bit 16) in the c0_status register. */
printk(KERN_INFO "Enable cache parity protection for "
"MIPS 20KC/25KF CPUs.\n");
clear_c0_status(ST0_DE);
break;
default:
break;
}
}
asmlinkage void cache_parity_error(void)
{
const int field = 2 * sizeof(unsigned long);
unsigned int reg_val;
/* For the moment, report the problem and hang. */
printk("Cache error exception:\n");
printk("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
reg_val = read_c0_cacheerr();
printk("c0_cacheerr == %08x\n", reg_val);
printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
reg_val & (1<<30) ? "secondary" : "primary",
reg_val & (1<<31) ? "data" : "insn");
if ((cpu_has_mips_r2_r6) &&
((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) {
pr_err("Error bits: %s%s%s%s%s%s%s%s\n",
reg_val & (1<<29) ? "ED " : "",
reg_val & (1<<28) ? "ET " : "",
reg_val & (1<<27) ? "ES " : "",
reg_val & (1<<26) ? "EE " : "",
reg_val & (1<<25) ? "EB " : "",
reg_val & (1<<24) ? "EI " : "",
reg_val & (1<<23) ? "E1 " : "",
reg_val & (1<<22) ? "E0 " : "");
} else {
pr_err("Error bits: %s%s%s%s%s%s%s\n",
reg_val & (1<<29) ? "ED " : "",
reg_val & (1<<28) ? "ET " : "",
reg_val & (1<<26) ? "EE " : "",
reg_val & (1<<25) ? "EB " : "",
reg_val & (1<<24) ? "EI " : "",
reg_val & (1<<23) ? "E1 " : "",
reg_val & (1<<22) ? "E0 " : "");
}
printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1));
#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
if (reg_val & (1<<22))
printk("DErrAddr0: 0x%0*lx\n", field, read_c0_derraddr0());
if (reg_val & (1<<23))
printk("DErrAddr1: 0x%0*lx\n", field, read_c0_derraddr1());
#endif
panic("Can't handle the cache error!");
}
asmlinkage void do_ftlb(void)
{
const int field = 2 * sizeof(unsigned long);
unsigned int reg_val;
/* For the moment, report the problem and hang. */
if ((cpu_has_mips_r2_r6) &&
(((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS) ||
((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_LOONGSON))) {
pr_err("FTLB error exception, cp0_ecc=0x%08x:\n",
read_c0_ecc());
pr_err("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
reg_val = read_c0_cacheerr();
pr_err("c0_cacheerr == %08x\n", reg_val);
if ((reg_val & 0xc0000000) == 0xc0000000) {
pr_err("Decoded c0_cacheerr: FTLB parity error\n");
} else {
pr_err("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
reg_val & (1<<30) ? "secondary" : "primary",
reg_val & (1<<31) ? "data" : "insn");
}
} else {
pr_err("FTLB error exception\n");
}
/* Just print the cacheerr bits for now */
cache_parity_error();
}
asmlinkage void do_gsexc(struct pt_regs *regs, u32 diag1)
{
u32 exccode = (diag1 & LOONGSON_DIAG1_EXCCODE) >>
LOONGSON_DIAG1_EXCCODE_SHIFT;
enum ctx_state prev_state;
prev_state = exception_enter();
switch (exccode) {
case 0x08:
/* Undocumented exception, will trigger on certain
* also-undocumented instructions accessible from userspace.
* Processor state is not otherwise corrupted, but currently
* we don't know how to proceed. Maybe there is some
* undocumented control flag to enable the instructions?
*/
force_sig(SIGILL);
break;
default:
/* None of the other exceptions, documented or not, have
* further details given; none are encountered in the wild
* either. Panic in case some of them turn out to be fatal.
*/
show_regs(regs);
panic("Unhandled Loongson exception - GSCause = %08x", diag1);
}
exception_exit(prev_state);
}
/*
* SDBBP EJTAG debug exception handler.
* We skip the instruction and return to the next instruction.
*/
void ejtag_exception_handler(struct pt_regs *regs)
{
const int field = 2 * sizeof(unsigned long);
unsigned long depc, old_epc, old_ra;
unsigned int debug;
printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n");
depc = read_c0_depc();
debug = read_c0_debug();
printk(KERN_DEBUG "c0_depc = %0*lx, DEBUG = %08x\n", field, depc, debug);
if (debug & 0x80000000) {
/*
* In branch delay slot.
* We cheat a little bit here and use EPC to calculate the
* debug return address (DEPC). EPC is restored after the
* calculation.
*/
old_epc = regs->cp0_epc;
old_ra = regs->regs[31];
regs->cp0_epc = depc;
compute_return_epc(regs);
depc = regs->cp0_epc;
regs->cp0_epc = old_epc;
regs->regs[31] = old_ra;
} else
depc += 4;
write_c0_depc(depc);
#if 0
printk(KERN_DEBUG "\n\n----- Enable EJTAG single stepping ----\n\n");
write_c0_debug(debug | 0x100);
#endif
}
/*
* NMI exception handler.
* No lock; only written during early bootup by CPU 0.
*/
static RAW_NOTIFIER_HEAD(nmi_chain);
int register_nmi_notifier(struct notifier_block *nb)
{
return raw_notifier_chain_register(&nmi_chain, nb);
}
void __noreturn nmi_exception_handler(struct pt_regs *regs)
{
char str[100];
nmi_enter();
raw_notifier_call_chain(&nmi_chain, 0, regs);
bust_spinlocks(1);
snprintf(str, 100, "CPU%d NMI taken, CP0_EPC=%lx\n",
smp_processor_id(), regs->cp0_epc);
regs->cp0_epc = read_c0_errorepc();
die(str, regs);
nmi_exit();
}
unsigned long ebase;
EXPORT_SYMBOL_GPL(ebase);
unsigned long exception_handlers[32];
unsigned long vi_handlers[64];
void reserve_exception_space(phys_addr_t addr, unsigned long size)
{
memblock_reserve(addr, size);
}
void __init *set_except_vector(int n, void *addr)
{
unsigned long handler = (unsigned long) addr;
unsigned long old_handler;
#ifdef CONFIG_CPU_MICROMIPS
/*
* Only the TLB handlers are cache aligned with an even
* address. All other handlers are on an odd address and
* require no modification. Otherwise, MIPS32 mode will
* be entered when handling any TLB exceptions. That
* would be bad...since we must stay in microMIPS mode.
*/
if (!(handler & 0x1))
handler |= 1;
#endif
old_handler = xchg(&exception_handlers[n], handler);
if (n == 0 && cpu_has_divec) {
#ifdef CONFIG_CPU_MICROMIPS
unsigned long jump_mask = ~((1 << 27) - 1);
#else
unsigned long jump_mask = ~((1 << 28) - 1);
#endif
u32 *buf = (u32 *)(ebase + 0x200);
unsigned int k0 = 26;
if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) {
uasm_i_j(&buf, handler & ~jump_mask);
uasm_i_nop(&buf);
} else {
UASM_i_LA(&buf, k0, handler);
uasm_i_jr(&buf, k0);
uasm_i_nop(&buf);
}
local_flush_icache_range(ebase + 0x200, (unsigned long)buf);
}
return (void *)old_handler;
}
static void do_default_vi(void)
{
show_regs(get_irq_regs());
panic("Caught unexpected vectored interrupt.");
}
static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
{
unsigned long handler;
unsigned long old_handler = vi_handlers[n];
int srssets = current_cpu_data.srsets;
u16 *h;
unsigned char *b;
BUG_ON(!cpu_has_veic && !cpu_has_vint);
if (addr == NULL) {
handler = (unsigned long) do_default_vi;
srs = 0;
} else
handler = (unsigned long) addr;
vi_handlers[n] = handler;
b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING);
if (srs >= srssets)
panic("Shadow register set %d not supported", srs);
if (cpu_has_veic) {
if (board_bind_eic_interrupt)
board_bind_eic_interrupt(n, srs);
} else if (cpu_has_vint) {
/* SRSMap is only defined if shadow sets are implemented */
if (srssets > 1)
change_c0_srsmap(0xf << n*4, srs << n*4);
}
if (srs == 0) {
/*
* If no shadow set is selected then use the default handler
* that does normal register saving and standard interrupt exit
*/
extern const u8 except_vec_vi[], except_vec_vi_lui[];
extern const u8 except_vec_vi_ori[], except_vec_vi_end[];
extern const u8 rollback_except_vec_vi[];
const u8 *vec_start = using_rollback_handler() ?
rollback_except_vec_vi : except_vec_vi;
#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
const int lui_offset = except_vec_vi_lui - vec_start + 2;
const int ori_offset = except_vec_vi_ori - vec_start + 2;
#else
const int lui_offset = except_vec_vi_lui - vec_start;
const int ori_offset = except_vec_vi_ori - vec_start;
#endif
const int handler_len = except_vec_vi_end - vec_start;
if (handler_len > VECTORSPACING) {
/*
* Sigh... panicing won't help as the console
* is probably not configured :(
*/
panic("VECTORSPACING too small");
}
set_handler(((unsigned long)b - ebase), vec_start,
#ifdef CONFIG_CPU_MICROMIPS
(handler_len - 1));
#else
handler_len);
#endif
h = (u16 *)(b + lui_offset);
*h = (handler >> 16) & 0xffff;
h = (u16 *)(b + ori_offset);
*h = (handler & 0xffff);
local_flush_icache_range((unsigned long)b,
(unsigned long)(b+handler_len));
}
else {
/*
* In other cases jump directly to the interrupt handler. It
* is the handler's responsibility to save registers if required
* (eg hi/lo) and return from the exception using "eret".
*/
u32 insn;
h = (u16 *)b;
/* j handler */
#ifdef CONFIG_CPU_MICROMIPS
insn = 0xd4000000 | (((u32)handler & 0x07ffffff) >> 1);
#else
insn = 0x08000000 | (((u32)handler & 0x0fffffff) >> 2);
#endif
h[0] = (insn >> 16) & 0xffff;
h[1] = insn & 0xffff;
h[2] = 0;
h[3] = 0;
local_flush_icache_range((unsigned long)b,
(unsigned long)(b+8));
}
return (void *)old_handler;
}
void *set_vi_handler(int n, vi_handler_t addr)
{
return set_vi_srs_handler(n, addr, 0);
}
extern void tlb_init(void);
/*
* Timer interrupt
*/
int cp0_compare_irq;
EXPORT_SYMBOL_GPL(cp0_compare_irq);
int cp0_compare_irq_shift;
/*
* Performance counter IRQ or -1 if shared with timer
*/
int cp0_perfcount_irq;
EXPORT_SYMBOL_GPL(cp0_perfcount_irq);
/*
* Fast debug channel IRQ or -1 if not present
*/
int cp0_fdc_irq;
EXPORT_SYMBOL_GPL(cp0_fdc_irq);
static int noulri;
static int __init ulri_disable(char *s)
{
pr_info("Disabling ulri\n");
noulri = 1;
return 1;
}
__setup("noulri", ulri_disable);
/* configure STATUS register */
static void configure_status(void)
{
/*
* Disable coprocessors and select 32-bit or 64-bit addressing
* and the 16/32 or 32/32 FPR register model. Reset the BEV
* flag that some firmware may have left set and the TS bit (for
* IP27). Set XX for ISA IV code to work.
*/
unsigned int status_set = ST0_KERNEL_CUMASK;
#ifdef CONFIG_64BIT
status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX;
#endif
if (current_cpu_data.isa_level & MIPS_CPU_ISA_IV)
status_set |= ST0_XX;
if (cpu_has_dsp)
status_set |= ST0_MX;
change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
status_set);
back_to_back_c0_hazard();
}
unsigned int hwrena;
EXPORT_SYMBOL_GPL(hwrena);
/* configure HWRENA register */
static void configure_hwrena(void)
{
hwrena = cpu_hwrena_impl_bits;
if (cpu_has_mips_r2_r6)
hwrena |= MIPS_HWRENA_CPUNUM |
MIPS_HWRENA_SYNCISTEP |
MIPS_HWRENA_CC |
MIPS_HWRENA_CCRES;
if (!noulri && cpu_has_userlocal)
hwrena |= MIPS_HWRENA_ULR;
if (hwrena)
write_c0_hwrena(hwrena);
}
static void configure_exception_vector(void)
{
if (cpu_has_mips_r2_r6) {
unsigned long sr = set_c0_status(ST0_BEV);
/* If available, use WG to set top bits of EBASE */
if (cpu_has_ebase_wg) {
#ifdef CONFIG_64BIT
write_c0_ebase_64(ebase | MIPS_EBASE_WG);
#else
write_c0_ebase(ebase | MIPS_EBASE_WG);
#endif
}
write_c0_ebase(ebase);
write_c0_status(sr);
}
if (cpu_has_veic || cpu_has_vint) {
/* Setting vector spacing enables EI/VI mode */
change_c0_intctl(0x3e0, VECTORSPACING);
}
if (cpu_has_divec) {
if (cpu_has_mipsmt) {
unsigned int vpflags = dvpe();
set_c0_cause(CAUSEF_IV);
evpe(vpflags);
} else
set_c0_cause(CAUSEF_IV);
}
}
void per_cpu_trap_init(bool is_boot_cpu)
{
unsigned int cpu = smp_processor_id();
configure_status();
configure_hwrena();
configure_exception_vector();
/*
* Before R2 both interrupt numbers were fixed to 7, so on R2 only:
*
* o read IntCtl.IPTI to determine the timer interrupt
* o read IntCtl.IPPCI to determine the performance counter interrupt
* o read IntCtl.IPFDC to determine the fast debug channel interrupt
*/
if (cpu_has_mips_r2_r6) {
cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP;
cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7;
cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7;
cp0_fdc_irq = (read_c0_intctl() >> INTCTLB_IPFDC) & 7;
if (!cp0_fdc_irq)
cp0_fdc_irq = -1;
} else {
cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ;
cp0_compare_irq_shift = CP0_LEGACY_PERFCNT_IRQ;
cp0_perfcount_irq = -1;
cp0_fdc_irq = -1;
}
if (cpu_has_mmid)
cpu_data[cpu].asid_cache = 0;
else if (!cpu_data[cpu].asid_cache)
cpu_data[cpu].asid_cache = asid_first_version(cpu);
mmgrab(&init_mm);
current->active_mm = &init_mm;
BUG_ON(current->mm);
enter_lazy_tlb(&init_mm, current);
/* Boot CPU's cache setup in setup_arch(). */
if (!is_boot_cpu)
cpu_cache_init();
tlb_init();
TLBMISS_HANDLER_SETUP();
}
/* Install CPU exception handler */
void set_handler(unsigned long offset, const void *addr, unsigned long size)
{
#ifdef CONFIG_CPU_MICROMIPS
memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size);
#else
memcpy((void *)(ebase + offset), addr, size);
#endif
local_flush_icache_range(ebase + offset, ebase + offset + size);
}
static const char panic_null_cerr[] =
"Trying to set NULL cache error exception handler\n";
/*
* Install uncached CPU exception handler.
* This is suitable only for the cache error exception which is the only
* exception handler that is being run uncached.
*/
void set_uncached_handler(unsigned long offset, void *addr,
unsigned long size)
{
unsigned long uncached_ebase = CKSEG1ADDR(ebase);
if (!addr)
panic(panic_null_cerr);
memcpy((void *)(uncached_ebase + offset), addr, size);
}
static int __initdata rdhwr_noopt;
static int __init set_rdhwr_noopt(char *str)
{
rdhwr_noopt = 1;
return 1;
}
__setup("rdhwr_noopt", set_rdhwr_noopt);
void __init trap_init(void)
{
extern char except_vec3_generic;
extern char except_vec4;
extern char except_vec3_r4000;
unsigned long i, vec_size;
phys_addr_t ebase_pa;
check_wait();
if (!cpu_has_mips_r2_r6) {
ebase = CAC_BASE;
vec_size = 0x400;
} else {
if (cpu_has_veic || cpu_has_vint)
vec_size = 0x200 + VECTORSPACING*64;
else
vec_size = PAGE_SIZE;
ebase_pa = memblock_phys_alloc(vec_size, 1 << fls(vec_size));
if (!ebase_pa)
panic("%s: Failed to allocate %lu bytes align=0x%x\n",
__func__, vec_size, 1 << fls(vec_size));
/*
* Try to ensure ebase resides in KSeg0 if possible.
*
* It shouldn't generally be in XKPhys on MIPS64 to avoid
* hitting a poorly defined exception base for Cache Errors.
* The allocation is likely to be in the low 512MB of physical,
* in which case we should be able to convert to KSeg0.
*
* EVA is special though as it allows segments to be rearranged
* and to become uncached during cache error handling.
*/
if (!IS_ENABLED(CONFIG_EVA) && !WARN_ON(ebase_pa >= 0x20000000))
ebase = CKSEG0ADDR(ebase_pa);
else
ebase = (unsigned long)phys_to_virt(ebase_pa);
}
if (cpu_has_mmips) {
unsigned int config3 = read_c0_config3();
if (IS_ENABLED(CONFIG_CPU_MICROMIPS))
write_c0_config3(config3 | MIPS_CONF3_ISA_OE);
else
write_c0_config3(config3 & ~MIPS_CONF3_ISA_OE);
}
if (board_ebase_setup)
board_ebase_setup();
per_cpu_trap_init(true);
memblock_set_bottom_up(false);
/*
* Copy the generic exception handlers to their final destination.
* This will be overridden later as suitable for a particular
* configuration.
*/
set_handler(0x180, &except_vec3_generic, 0x80);
/*
* Setup default vectors
*/
for (i = 0; i <= 31; i++)
set_except_vector(i, handle_reserved);
/*
* Copy the EJTAG debug exception vector handler code to it's final
* destination.
*/
if (cpu_has_ejtag && board_ejtag_handler_setup)
board_ejtag_handler_setup();
/*
* Only some CPUs have the watch exceptions.
*/
if (cpu_has_watch)
set_except_vector(EXCCODE_WATCH, handle_watch);
/*
* Initialise interrupt handlers
*/
if (cpu_has_veic || cpu_has_vint) {
int nvec = cpu_has_veic ? 64 : 8;
for (i = 0; i < nvec; i++)
set_vi_handler(i, NULL);
}
else if (cpu_has_divec)
set_handler(0x200, &except_vec4, 0x8);
/*
* Some CPUs can enable/disable for cache parity detection, but does
* it different ways.
*/
parity_protection_init();
/*
* The Data Bus Errors / Instruction Bus Errors are signaled
* by external hardware. Therefore these two exceptions
* may have board specific handlers.
*/
if (board_be_init)
board_be_init();
set_except_vector(EXCCODE_INT, using_rollback_handler() ?
rollback_handle_int : handle_int);
set_except_vector(EXCCODE_MOD, handle_tlbm);
set_except_vector(EXCCODE_TLBL, handle_tlbl);
set_except_vector(EXCCODE_TLBS, handle_tlbs);
set_except_vector(EXCCODE_ADEL, handle_adel);
set_except_vector(EXCCODE_ADES, handle_ades);
set_except_vector(EXCCODE_IBE, handle_ibe);
set_except_vector(EXCCODE_DBE, handle_dbe);
set_except_vector(EXCCODE_SYS, handle_sys);
set_except_vector(EXCCODE_BP, handle_bp);
if (rdhwr_noopt)
set_except_vector(EXCCODE_RI, handle_ri);
else {
if (cpu_has_vtag_icache)
set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp);
else if (current_cpu_type() == CPU_LOONGSON64)
set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp);
else
set_except_vector(EXCCODE_RI, handle_ri_rdhwr);
}
set_except_vector(EXCCODE_CPU, handle_cpu);
set_except_vector(EXCCODE_OV, handle_ov);
set_except_vector(EXCCODE_TR, handle_tr);
set_except_vector(EXCCODE_MSAFPE, handle_msa_fpe);
if (board_nmi_handler_setup)
board_nmi_handler_setup();
if (cpu_has_fpu && !cpu_has_nofpuex)
set_except_vector(EXCCODE_FPE, handle_fpe);
if (cpu_has_ftlbparex)
set_except_vector(MIPS_EXCCODE_TLBPAR, handle_ftlb);
if (cpu_has_gsexcex)
set_except_vector(LOONGSON_EXCCODE_GSEXC, handle_gsexc);
if (cpu_has_rixiex) {
set_except_vector(EXCCODE_TLBRI, tlb_do_page_fault_0);
set_except_vector(EXCCODE_TLBXI, tlb_do_page_fault_0);
}
set_except_vector(EXCCODE_MSADIS, handle_msa);
set_except_vector(EXCCODE_MDMX, handle_mdmx);
if (cpu_has_mcheck)
set_except_vector(EXCCODE_MCHECK, handle_mcheck);
if (cpu_has_mipsmt)
set_except_vector(EXCCODE_THREAD, handle_mt);
set_except_vector(EXCCODE_DSPDIS, handle_dsp);
if (board_cache_error_setup)
board_cache_error_setup();
if (cpu_has_vce)
/* Special exception: R4[04]00 uses also the divec space. */
set_handler(0x180, &except_vec3_r4000, 0x100);
else if (cpu_has_4kex)
set_handler(0x180, &except_vec3_generic, 0x80);
else
set_handler(0x080, &except_vec3_generic, 0x80);
local_flush_icache_range(ebase, ebase + vec_size);
sort_extable(__start___dbe_table, __stop___dbe_table);
cu2_notifier(default_cu2_call, 0x80000000); /* Run last */
}
static int trap_pm_notifier(struct notifier_block *self, unsigned long cmd,
void *v)
{
switch (cmd) {
case CPU_PM_ENTER_FAILED:
case CPU_PM_EXIT:
configure_status();
configure_hwrena();
configure_exception_vector();
/* Restore register with CPU number for TLB handlers */
TLBMISS_HANDLER_RESTORE();
break;
}
return NOTIFY_OK;
}
static struct notifier_block trap_pm_notifier_block = {
.notifier_call = trap_pm_notifier,
};
static int __init trap_pm_init(void)
{
return cpu_pm_register_notifier(&trap_pm_notifier_block);
}
arch_initcall(trap_pm_init);
| linux-master | arch/mips/kernel/traps.c |
// SPDX-License-Identifier: GPL-2.0
/*
* asm-offsets.c: Calculate pt_regs and task_struct offsets.
*
* Copyright (C) 1996 David S. Miller
* Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003 Ralf Baechle
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
*
* Kevin Kissell, [email protected] and Carsten Langgaard, [email protected]
* Copyright (C) 2000 MIPS Technologies, Inc.
*/
#include <linux/compat.h>
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/kbuild.h>
#include <linux/suspend.h>
#include <asm/cpu-info.h>
#include <asm/pm.h>
#include <asm/ptrace.h>
#include <asm/processor.h>
#include <asm/smp-cps.h>
#include <linux/kvm_host.h>
void output_ptreg_defines(void);
void output_ptreg_defines(void)
{
COMMENT("MIPS pt_regs offsets.");
OFFSET(PT_R0, pt_regs, regs[0]);
OFFSET(PT_R1, pt_regs, regs[1]);
OFFSET(PT_R2, pt_regs, regs[2]);
OFFSET(PT_R3, pt_regs, regs[3]);
OFFSET(PT_R4, pt_regs, regs[4]);
OFFSET(PT_R5, pt_regs, regs[5]);
OFFSET(PT_R6, pt_regs, regs[6]);
OFFSET(PT_R7, pt_regs, regs[7]);
OFFSET(PT_R8, pt_regs, regs[8]);
OFFSET(PT_R9, pt_regs, regs[9]);
OFFSET(PT_R10, pt_regs, regs[10]);
OFFSET(PT_R11, pt_regs, regs[11]);
OFFSET(PT_R12, pt_regs, regs[12]);
OFFSET(PT_R13, pt_regs, regs[13]);
OFFSET(PT_R14, pt_regs, regs[14]);
OFFSET(PT_R15, pt_regs, regs[15]);
OFFSET(PT_R16, pt_regs, regs[16]);
OFFSET(PT_R17, pt_regs, regs[17]);
OFFSET(PT_R18, pt_regs, regs[18]);
OFFSET(PT_R19, pt_regs, regs[19]);
OFFSET(PT_R20, pt_regs, regs[20]);
OFFSET(PT_R21, pt_regs, regs[21]);
OFFSET(PT_R22, pt_regs, regs[22]);
OFFSET(PT_R23, pt_regs, regs[23]);
OFFSET(PT_R24, pt_regs, regs[24]);
OFFSET(PT_R25, pt_regs, regs[25]);
OFFSET(PT_R26, pt_regs, regs[26]);
OFFSET(PT_R27, pt_regs, regs[27]);
OFFSET(PT_R28, pt_regs, regs[28]);
OFFSET(PT_R29, pt_regs, regs[29]);
OFFSET(PT_R30, pt_regs, regs[30]);
OFFSET(PT_R31, pt_regs, regs[31]);
OFFSET(PT_LO, pt_regs, lo);
OFFSET(PT_HI, pt_regs, hi);
#ifdef CONFIG_CPU_HAS_SMARTMIPS
OFFSET(PT_ACX, pt_regs, acx);
#endif
OFFSET(PT_EPC, pt_regs, cp0_epc);
OFFSET(PT_BVADDR, pt_regs, cp0_badvaddr);
OFFSET(PT_STATUS, pt_regs, cp0_status);
OFFSET(PT_CAUSE, pt_regs, cp0_cause);
#ifdef CONFIG_CPU_CAVIUM_OCTEON
OFFSET(PT_MPL, pt_regs, mpl);
OFFSET(PT_MTP, pt_regs, mtp);
#endif /* CONFIG_CPU_CAVIUM_OCTEON */
DEFINE(PT_SIZE, sizeof(struct pt_regs));
BLANK();
}
void output_task_defines(void);
void output_task_defines(void)
{
COMMENT("MIPS task_struct offsets.");
OFFSET(TASK_THREAD_INFO, task_struct, stack);
OFFSET(TASK_FLAGS, task_struct, flags);
OFFSET(TASK_MM, task_struct, mm);
OFFSET(TASK_PID, task_struct, pid);
#if defined(CONFIG_STACKPROTECTOR)
OFFSET(TASK_STACK_CANARY, task_struct, stack_canary);
#endif
DEFINE(TASK_STRUCT_SIZE, sizeof(struct task_struct));
BLANK();
}
void output_thread_info_defines(void);
void output_thread_info_defines(void)
{
COMMENT("MIPS thread_info offsets.");
OFFSET(TI_TASK, thread_info, task);
OFFSET(TI_FLAGS, thread_info, flags);
OFFSET(TI_TP_VALUE, thread_info, tp_value);
OFFSET(TI_CPU, thread_info, cpu);
OFFSET(TI_PRE_COUNT, thread_info, preempt_count);
OFFSET(TI_REGS, thread_info, regs);
DEFINE(_THREAD_SIZE, THREAD_SIZE);
DEFINE(_THREAD_MASK, THREAD_MASK);
DEFINE(_IRQ_STACK_SIZE, IRQ_STACK_SIZE);
DEFINE(_IRQ_STACK_START, IRQ_STACK_START);
BLANK();
}
void output_thread_defines(void);
void output_thread_defines(void)
{
COMMENT("MIPS specific thread_struct offsets.");
OFFSET(THREAD_REG16, task_struct, thread.reg16);
OFFSET(THREAD_REG17, task_struct, thread.reg17);
OFFSET(THREAD_REG18, task_struct, thread.reg18);
OFFSET(THREAD_REG19, task_struct, thread.reg19);
OFFSET(THREAD_REG20, task_struct, thread.reg20);
OFFSET(THREAD_REG21, task_struct, thread.reg21);
OFFSET(THREAD_REG22, task_struct, thread.reg22);
OFFSET(THREAD_REG23, task_struct, thread.reg23);
OFFSET(THREAD_REG29, task_struct, thread.reg29);
OFFSET(THREAD_REG30, task_struct, thread.reg30);
OFFSET(THREAD_REG31, task_struct, thread.reg31);
OFFSET(THREAD_STATUS, task_struct,
thread.cp0_status);
OFFSET(THREAD_BVADDR, task_struct, \
thread.cp0_badvaddr);
OFFSET(THREAD_BUADDR, task_struct, \
thread.cp0_baduaddr);
OFFSET(THREAD_ECODE, task_struct, \
thread.error_code);
OFFSET(THREAD_TRAPNO, task_struct, thread.trap_nr);
BLANK();
}
#ifdef CONFIG_MIPS_FP_SUPPORT
void output_thread_fpu_defines(void);
void output_thread_fpu_defines(void)
{
OFFSET(THREAD_FPU, task_struct, thread.fpu);
OFFSET(THREAD_FPR0, task_struct, thread.fpu.fpr[0]);
OFFSET(THREAD_FPR1, task_struct, thread.fpu.fpr[1]);
OFFSET(THREAD_FPR2, task_struct, thread.fpu.fpr[2]);
OFFSET(THREAD_FPR3, task_struct, thread.fpu.fpr[3]);
OFFSET(THREAD_FPR4, task_struct, thread.fpu.fpr[4]);
OFFSET(THREAD_FPR5, task_struct, thread.fpu.fpr[5]);
OFFSET(THREAD_FPR6, task_struct, thread.fpu.fpr[6]);
OFFSET(THREAD_FPR7, task_struct, thread.fpu.fpr[7]);
OFFSET(THREAD_FPR8, task_struct, thread.fpu.fpr[8]);
OFFSET(THREAD_FPR9, task_struct, thread.fpu.fpr[9]);
OFFSET(THREAD_FPR10, task_struct, thread.fpu.fpr[10]);
OFFSET(THREAD_FPR11, task_struct, thread.fpu.fpr[11]);
OFFSET(THREAD_FPR12, task_struct, thread.fpu.fpr[12]);
OFFSET(THREAD_FPR13, task_struct, thread.fpu.fpr[13]);
OFFSET(THREAD_FPR14, task_struct, thread.fpu.fpr[14]);
OFFSET(THREAD_FPR15, task_struct, thread.fpu.fpr[15]);
OFFSET(THREAD_FPR16, task_struct, thread.fpu.fpr[16]);
OFFSET(THREAD_FPR17, task_struct, thread.fpu.fpr[17]);
OFFSET(THREAD_FPR18, task_struct, thread.fpu.fpr[18]);
OFFSET(THREAD_FPR19, task_struct, thread.fpu.fpr[19]);
OFFSET(THREAD_FPR20, task_struct, thread.fpu.fpr[20]);
OFFSET(THREAD_FPR21, task_struct, thread.fpu.fpr[21]);
OFFSET(THREAD_FPR22, task_struct, thread.fpu.fpr[22]);
OFFSET(THREAD_FPR23, task_struct, thread.fpu.fpr[23]);
OFFSET(THREAD_FPR24, task_struct, thread.fpu.fpr[24]);
OFFSET(THREAD_FPR25, task_struct, thread.fpu.fpr[25]);
OFFSET(THREAD_FPR26, task_struct, thread.fpu.fpr[26]);
OFFSET(THREAD_FPR27, task_struct, thread.fpu.fpr[27]);
OFFSET(THREAD_FPR28, task_struct, thread.fpu.fpr[28]);
OFFSET(THREAD_FPR29, task_struct, thread.fpu.fpr[29]);
OFFSET(THREAD_FPR30, task_struct, thread.fpu.fpr[30]);
OFFSET(THREAD_FPR31, task_struct, thread.fpu.fpr[31]);
OFFSET(THREAD_FCR31, task_struct, thread.fpu.fcr31);
OFFSET(THREAD_MSA_CSR, task_struct, thread.fpu.msacsr);
BLANK();
}
#endif
void output_mm_defines(void);
void output_mm_defines(void)
{
COMMENT("Size of struct page");
DEFINE(STRUCT_PAGE_SIZE, sizeof(struct page));
BLANK();
COMMENT("Linux mm_struct offsets.");
OFFSET(MM_USERS, mm_struct, mm_users);
OFFSET(MM_PGD, mm_struct, pgd);
OFFSET(MM_CONTEXT, mm_struct, context);
BLANK();
DEFINE(_PGD_T_SIZE, sizeof(pgd_t));
DEFINE(_PMD_T_SIZE, sizeof(pmd_t));
DEFINE(_PTE_T_SIZE, sizeof(pte_t));
BLANK();
DEFINE(_PGD_T_LOG2, PGD_T_LOG2);
#ifndef __PAGETABLE_PMD_FOLDED
DEFINE(_PMD_T_LOG2, PMD_T_LOG2);
#endif
DEFINE(_PTE_T_LOG2, PTE_T_LOG2);
BLANK();
BLANK();
DEFINE(_PMD_SHIFT, PMD_SHIFT);
DEFINE(_PGDIR_SHIFT, PGDIR_SHIFT);
BLANK();
DEFINE(_PTRS_PER_PGD, PTRS_PER_PGD);
DEFINE(_PTRS_PER_PMD, PTRS_PER_PMD);
DEFINE(_PTRS_PER_PTE, PTRS_PER_PTE);
BLANK();
DEFINE(_PAGE_SHIFT, PAGE_SHIFT);
DEFINE(_PAGE_SIZE, PAGE_SIZE);
BLANK();
}
#ifdef CONFIG_32BIT
void output_sc_defines(void);
void output_sc_defines(void)
{
COMMENT("Linux sigcontext offsets.");
OFFSET(SC_REGS, sigcontext, sc_regs);
OFFSET(SC_FPREGS, sigcontext, sc_fpregs);
OFFSET(SC_ACX, sigcontext, sc_acx);
OFFSET(SC_MDHI, sigcontext, sc_mdhi);
OFFSET(SC_MDLO, sigcontext, sc_mdlo);
OFFSET(SC_PC, sigcontext, sc_pc);
OFFSET(SC_FPC_CSR, sigcontext, sc_fpc_csr);
OFFSET(SC_FPC_EIR, sigcontext, sc_fpc_eir);
OFFSET(SC_HI1, sigcontext, sc_hi1);
OFFSET(SC_LO1, sigcontext, sc_lo1);
OFFSET(SC_HI2, sigcontext, sc_hi2);
OFFSET(SC_LO2, sigcontext, sc_lo2);
OFFSET(SC_HI3, sigcontext, sc_hi3);
OFFSET(SC_LO3, sigcontext, sc_lo3);
BLANK();
}
#endif
#ifdef CONFIG_64BIT
void output_sc_defines(void);
void output_sc_defines(void)
{
COMMENT("Linux sigcontext offsets.");
OFFSET(SC_REGS, sigcontext, sc_regs);
OFFSET(SC_FPREGS, sigcontext, sc_fpregs);
OFFSET(SC_MDHI, sigcontext, sc_mdhi);
OFFSET(SC_MDLO, sigcontext, sc_mdlo);
OFFSET(SC_PC, sigcontext, sc_pc);
OFFSET(SC_FPC_CSR, sigcontext, sc_fpc_csr);
BLANK();
}
#endif
void output_signal_defined(void);
void output_signal_defined(void)
{
COMMENT("Linux signal numbers.");
DEFINE(_SIGHUP, SIGHUP);
DEFINE(_SIGINT, SIGINT);
DEFINE(_SIGQUIT, SIGQUIT);
DEFINE(_SIGILL, SIGILL);
DEFINE(_SIGTRAP, SIGTRAP);
DEFINE(_SIGIOT, SIGIOT);
DEFINE(_SIGABRT, SIGABRT);
DEFINE(_SIGEMT, SIGEMT);
DEFINE(_SIGFPE, SIGFPE);
DEFINE(_SIGKILL, SIGKILL);
DEFINE(_SIGBUS, SIGBUS);
DEFINE(_SIGSEGV, SIGSEGV);
DEFINE(_SIGSYS, SIGSYS);
DEFINE(_SIGPIPE, SIGPIPE);
DEFINE(_SIGALRM, SIGALRM);
DEFINE(_SIGTERM, SIGTERM);
DEFINE(_SIGUSR1, SIGUSR1);
DEFINE(_SIGUSR2, SIGUSR2);
DEFINE(_SIGCHLD, SIGCHLD);
DEFINE(_SIGPWR, SIGPWR);
DEFINE(_SIGWINCH, SIGWINCH);
DEFINE(_SIGURG, SIGURG);
DEFINE(_SIGIO, SIGIO);
DEFINE(_SIGSTOP, SIGSTOP);
DEFINE(_SIGTSTP, SIGTSTP);
DEFINE(_SIGCONT, SIGCONT);
DEFINE(_SIGTTIN, SIGTTIN);
DEFINE(_SIGTTOU, SIGTTOU);
DEFINE(_SIGVTALRM, SIGVTALRM);
DEFINE(_SIGPROF, SIGPROF);
DEFINE(_SIGXCPU, SIGXCPU);
DEFINE(_SIGXFSZ, SIGXFSZ);
BLANK();
}
#ifdef CONFIG_CPU_CAVIUM_OCTEON
void output_octeon_cop2_state_defines(void);
void output_octeon_cop2_state_defines(void)
{
COMMENT("Octeon specific octeon_cop2_state offsets.");
OFFSET(OCTEON_CP2_CRC_IV, octeon_cop2_state, cop2_crc_iv);
OFFSET(OCTEON_CP2_CRC_LENGTH, octeon_cop2_state, cop2_crc_length);
OFFSET(OCTEON_CP2_CRC_POLY, octeon_cop2_state, cop2_crc_poly);
OFFSET(OCTEON_CP2_LLM_DAT, octeon_cop2_state, cop2_llm_dat);
OFFSET(OCTEON_CP2_3DES_IV, octeon_cop2_state, cop2_3des_iv);
OFFSET(OCTEON_CP2_3DES_KEY, octeon_cop2_state, cop2_3des_key);
OFFSET(OCTEON_CP2_3DES_RESULT, octeon_cop2_state, cop2_3des_result);
OFFSET(OCTEON_CP2_AES_INP0, octeon_cop2_state, cop2_aes_inp0);
OFFSET(OCTEON_CP2_AES_IV, octeon_cop2_state, cop2_aes_iv);
OFFSET(OCTEON_CP2_AES_KEY, octeon_cop2_state, cop2_aes_key);
OFFSET(OCTEON_CP2_AES_KEYLEN, octeon_cop2_state, cop2_aes_keylen);
OFFSET(OCTEON_CP2_AES_RESULT, octeon_cop2_state, cop2_aes_result);
OFFSET(OCTEON_CP2_GFM_MULT, octeon_cop2_state, cop2_gfm_mult);
OFFSET(OCTEON_CP2_GFM_POLY, octeon_cop2_state, cop2_gfm_poly);
OFFSET(OCTEON_CP2_GFM_RESULT, octeon_cop2_state, cop2_gfm_result);
OFFSET(OCTEON_CP2_HSH_DATW, octeon_cop2_state, cop2_hsh_datw);
OFFSET(OCTEON_CP2_HSH_IVW, octeon_cop2_state, cop2_hsh_ivw);
OFFSET(OCTEON_CP2_SHA3, octeon_cop2_state, cop2_sha3);
OFFSET(THREAD_CP2, task_struct, thread.cp2);
#if defined(CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE) && \
CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0
OFFSET(THREAD_CVMSEG, task_struct, thread.cvmseg.cvmseg);
#endif
BLANK();
}
#endif
#ifdef CONFIG_HIBERNATION
void output_pbe_defines(void);
void output_pbe_defines(void)
{
COMMENT(" Linux struct pbe offsets. ");
OFFSET(PBE_ADDRESS, pbe, address);
OFFSET(PBE_ORIG_ADDRESS, pbe, orig_address);
OFFSET(PBE_NEXT, pbe, next);
DEFINE(PBE_SIZE, sizeof(struct pbe));
BLANK();
}
#endif
#ifdef CONFIG_CPU_PM
void output_pm_defines(void);
void output_pm_defines(void)
{
COMMENT(" PM offsets. ");
#ifdef CONFIG_EVA
OFFSET(SSS_SEGCTL0, mips_static_suspend_state, segctl[0]);
OFFSET(SSS_SEGCTL1, mips_static_suspend_state, segctl[1]);
OFFSET(SSS_SEGCTL2, mips_static_suspend_state, segctl[2]);
#endif
OFFSET(SSS_SP, mips_static_suspend_state, sp);
BLANK();
}
#endif
#ifdef CONFIG_MIPS_FP_SUPPORT
void output_kvm_defines(void);
void output_kvm_defines(void)
{
COMMENT(" KVM/MIPS Specific offsets. ");
OFFSET(VCPU_FPR0, kvm_vcpu_arch, fpu.fpr[0]);
OFFSET(VCPU_FPR1, kvm_vcpu_arch, fpu.fpr[1]);
OFFSET(VCPU_FPR2, kvm_vcpu_arch, fpu.fpr[2]);
OFFSET(VCPU_FPR3, kvm_vcpu_arch, fpu.fpr[3]);
OFFSET(VCPU_FPR4, kvm_vcpu_arch, fpu.fpr[4]);
OFFSET(VCPU_FPR5, kvm_vcpu_arch, fpu.fpr[5]);
OFFSET(VCPU_FPR6, kvm_vcpu_arch, fpu.fpr[6]);
OFFSET(VCPU_FPR7, kvm_vcpu_arch, fpu.fpr[7]);
OFFSET(VCPU_FPR8, kvm_vcpu_arch, fpu.fpr[8]);
OFFSET(VCPU_FPR9, kvm_vcpu_arch, fpu.fpr[9]);
OFFSET(VCPU_FPR10, kvm_vcpu_arch, fpu.fpr[10]);
OFFSET(VCPU_FPR11, kvm_vcpu_arch, fpu.fpr[11]);
OFFSET(VCPU_FPR12, kvm_vcpu_arch, fpu.fpr[12]);
OFFSET(VCPU_FPR13, kvm_vcpu_arch, fpu.fpr[13]);
OFFSET(VCPU_FPR14, kvm_vcpu_arch, fpu.fpr[14]);
OFFSET(VCPU_FPR15, kvm_vcpu_arch, fpu.fpr[15]);
OFFSET(VCPU_FPR16, kvm_vcpu_arch, fpu.fpr[16]);
OFFSET(VCPU_FPR17, kvm_vcpu_arch, fpu.fpr[17]);
OFFSET(VCPU_FPR18, kvm_vcpu_arch, fpu.fpr[18]);
OFFSET(VCPU_FPR19, kvm_vcpu_arch, fpu.fpr[19]);
OFFSET(VCPU_FPR20, kvm_vcpu_arch, fpu.fpr[20]);
OFFSET(VCPU_FPR21, kvm_vcpu_arch, fpu.fpr[21]);
OFFSET(VCPU_FPR22, kvm_vcpu_arch, fpu.fpr[22]);
OFFSET(VCPU_FPR23, kvm_vcpu_arch, fpu.fpr[23]);
OFFSET(VCPU_FPR24, kvm_vcpu_arch, fpu.fpr[24]);
OFFSET(VCPU_FPR25, kvm_vcpu_arch, fpu.fpr[25]);
OFFSET(VCPU_FPR26, kvm_vcpu_arch, fpu.fpr[26]);
OFFSET(VCPU_FPR27, kvm_vcpu_arch, fpu.fpr[27]);
OFFSET(VCPU_FPR28, kvm_vcpu_arch, fpu.fpr[28]);
OFFSET(VCPU_FPR29, kvm_vcpu_arch, fpu.fpr[29]);
OFFSET(VCPU_FPR30, kvm_vcpu_arch, fpu.fpr[30]);
OFFSET(VCPU_FPR31, kvm_vcpu_arch, fpu.fpr[31]);
OFFSET(VCPU_FCR31, kvm_vcpu_arch, fpu.fcr31);
OFFSET(VCPU_MSA_CSR, kvm_vcpu_arch, fpu.msacsr);
BLANK();
}
#endif
#ifdef CONFIG_MIPS_CPS
void output_cps_defines(void);
void output_cps_defines(void)
{
COMMENT(" MIPS CPS offsets. ");
OFFSET(COREBOOTCFG_VPEMASK, core_boot_config, vpe_mask);
OFFSET(COREBOOTCFG_VPECONFIG, core_boot_config, vpe_config);
DEFINE(COREBOOTCFG_SIZE, sizeof(struct core_boot_config));
OFFSET(VPEBOOTCFG_PC, vpe_boot_config, pc);
OFFSET(VPEBOOTCFG_SP, vpe_boot_config, sp);
OFFSET(VPEBOOTCFG_GP, vpe_boot_config, gp);
DEFINE(VPEBOOTCFG_SIZE, sizeof(struct vpe_boot_config));
}
#endif
| linux-master | arch/mips/kernel/asm-offsets.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992 Ross Biro
* Copyright (C) Linus Torvalds
* Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle
* Copyright (C) 1996 David S. Miller
* Kevin D. Kissell, [email protected] and Carsten Langgaard, [email protected]
* Copyright (C) 1999 MIPS Technologies, Inc.
* Copyright (C) 2000 Ulf Carlsson
*
* At this time Linux/MIPS64 only supports syscall tracing, even for 32-bit
* binaries.
*/
#include <linux/compiler.h>
#include <linux/compat.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
#include <linux/mm.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/smp.h>
#include <linux/security.h>
#include <asm/cpu.h>
#include <asm/dsp.h>
#include <asm/fpu.h>
#include <asm/mipsregs.h>
#include <asm/mipsmtregs.h>
#include <asm/page.h>
#include <asm/reg.h>
#include <asm/syscall.h>
#include <linux/uaccess.h>
#include <asm/bootinfo.h>
/*
* Tracing a 32-bit process with a 64-bit strace and vice versa will not
* work. I don't know how to fix this.
*/
long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
compat_ulong_t caddr, compat_ulong_t cdata)
{
int addr = caddr;
int data = cdata;
int ret;
switch (request) {
/*
* Read 4 bytes of the other process' storage
* data is a pointer specifying where the user wants the
* 4 bytes copied into
* addr is a pointer in the user's storage that contains an 8 byte
* address in the other process of the 4 bytes that is to be read
* (this is run in a 32-bit process looking at a 64-bit process)
* when I and D space are separate, these will need to be fixed.
*/
case PTRACE_PEEKTEXT_3264:
case PTRACE_PEEKDATA_3264: {
u32 tmp;
int copied;
u32 __user * addrOthers;
ret = -EIO;
/* Get the addr in the other process that we want to read */
if (get_user(addrOthers, (u32 __user * __user *) (unsigned long) addr) != 0)
break;
copied = ptrace_access_vm(child, (u64)addrOthers, &tmp,
sizeof(tmp), FOLL_FORCE);
if (copied != sizeof(tmp))
break;
ret = put_user(tmp, (u32 __user *) (unsigned long) data);
break;
}
/* Read the word at location addr in the USER area. */
case PTRACE_PEEKUSR: {
struct pt_regs *regs;
unsigned int tmp;
regs = task_pt_regs(child);
ret = 0; /* Default return value. */
switch (addr) {
case 0 ... 31:
tmp = regs->regs[addr];
break;
#ifdef CONFIG_MIPS_FP_SUPPORT
case FPR_BASE ... FPR_BASE + 31: {
union fpureg *fregs;
if (!tsk_used_math(child)) {
/* FP not yet used */
tmp = -1;
break;
}
fregs = get_fpu_regs(child);
if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
/*
* The odd registers are actually the high
* order bits of the values stored in the even
* registers.
*/
tmp = get_fpr32(&fregs[(addr & ~1) - FPR_BASE],
addr & 1);
break;
}
tmp = get_fpr64(&fregs[addr - FPR_BASE], 0);
break;
}
case FPC_CSR:
tmp = child->thread.fpu.fcr31;
break;
case FPC_EIR:
/* implementation / version register */
tmp = boot_cpu_data.fpu_id;
break;
#endif /* CONFIG_MIPS_FP_SUPPORT */
case PC:
tmp = regs->cp0_epc;
break;
case CAUSE:
tmp = regs->cp0_cause;
break;
case BADVADDR:
tmp = regs->cp0_badvaddr;
break;
case MMHI:
tmp = regs->hi;
break;
case MMLO:
tmp = regs->lo;
break;
case DSP_BASE ... DSP_BASE + 5: {
dspreg_t *dregs;
if (!cpu_has_dsp) {
tmp = 0;
ret = -EIO;
goto out;
}
dregs = __get_dsp_regs(child);
tmp = dregs[addr - DSP_BASE];
break;
}
case DSP_CONTROL:
if (!cpu_has_dsp) {
tmp = 0;
ret = -EIO;
goto out;
}
tmp = child->thread.dsp.dspcontrol;
break;
default:
tmp = 0;
ret = -EIO;
goto out;
}
ret = put_user(tmp, (unsigned __user *) (unsigned long) data);
break;
}
/*
* Write 4 bytes into the other process' storage
* data is the 4 bytes that the user wants written
* addr is a pointer in the user's storage that contains an
* 8 byte address in the other process where the 4 bytes
* that is to be written
* (this is run in a 32-bit process looking at a 64-bit process)
* when I and D space are separate, these will need to be fixed.
*/
case PTRACE_POKETEXT_3264:
case PTRACE_POKEDATA_3264: {
u32 __user * addrOthers;
/* Get the addr in the other process that we want to write into */
ret = -EIO;
if (get_user(addrOthers, (u32 __user * __user *) (unsigned long) addr) != 0)
break;
ret = 0;
if (ptrace_access_vm(child, (u64)addrOthers, &data,
sizeof(data),
FOLL_FORCE | FOLL_WRITE) == sizeof(data))
break;
ret = -EIO;
break;
}
case PTRACE_POKEUSR: {
struct pt_regs *regs;
ret = 0;
regs = task_pt_regs(child);
switch (addr) {
case 0 ... 31:
regs->regs[addr] = data;
/* System call number may have been changed */
if (addr == 2)
mips_syscall_update_nr(child, regs);
else if (addr == 4 &&
mips_syscall_is_indirect(child, regs))
mips_syscall_update_nr(child, regs);
break;
#ifdef CONFIG_MIPS_FP_SUPPORT
case FPR_BASE ... FPR_BASE + 31: {
union fpureg *fregs = get_fpu_regs(child);
if (!tsk_used_math(child)) {
/* FP not yet used */
memset(&child->thread.fpu, ~0,
sizeof(child->thread.fpu));
child->thread.fpu.fcr31 = 0;
}
if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
/*
* The odd registers are actually the high
* order bits of the values stored in the even
* registers.
*/
set_fpr32(&fregs[(addr & ~1) - FPR_BASE],
addr & 1, data);
break;
}
set_fpr64(&fregs[addr - FPR_BASE], 0, data);
break;
}
case FPC_CSR:
child->thread.fpu.fcr31 = data;
break;
#endif /* CONFIG_MIPS_FP_SUPPORT */
case PC:
regs->cp0_epc = data;
break;
case MMHI:
regs->hi = data;
break;
case MMLO:
regs->lo = data;
break;
case DSP_BASE ... DSP_BASE + 5: {
dspreg_t *dregs;
if (!cpu_has_dsp) {
ret = -EIO;
break;
}
dregs = __get_dsp_regs(child);
dregs[addr - DSP_BASE] = data;
break;
}
case DSP_CONTROL:
if (!cpu_has_dsp) {
ret = -EIO;
break;
}
child->thread.dsp.dspcontrol = data;
break;
default:
/* The rest are not allowed. */
ret = -EIO;
break;
}
break;
}
case PTRACE_GETREGS:
ret = ptrace_getregs(child,
(struct user_pt_regs __user *) (__u64) data);
break;
case PTRACE_SETREGS:
ret = ptrace_setregs(child,
(struct user_pt_regs __user *) (__u64) data);
break;
#ifdef CONFIG_MIPS_FP_SUPPORT
case PTRACE_GETFPREGS:
ret = ptrace_getfpregs(child, (__u32 __user *) (__u64) data);
break;
case PTRACE_SETFPREGS:
ret = ptrace_setfpregs(child, (__u32 __user *) (__u64) data);
break;
#endif
case PTRACE_GET_THREAD_AREA:
ret = put_user(task_thread_info(child)->tp_value,
(unsigned int __user *) (unsigned long) data);
break;
case PTRACE_GET_THREAD_AREA_3264:
ret = put_user(task_thread_info(child)->tp_value,
(unsigned long __user *) (unsigned long) data);
break;
case PTRACE_GET_WATCH_REGS:
ret = ptrace_get_watch_regs(child,
(struct pt_watch_regs __user *) (unsigned long) addr);
break;
case PTRACE_SET_WATCH_REGS:
ret = ptrace_set_watch_regs(child,
(struct pt_watch_regs __user *) (unsigned long) addr);
break;
default:
ret = compat_ptrace_request(child, request, addr, data);
break;
}
out:
return ret;
}
| linux-master | arch/mips/kernel/ptrace32.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/init.h>
#include <linux/node.h>
#include <linux/nodemask.h>
#include <linux/percpu.h>
static DEFINE_PER_CPU(struct cpu, cpu_devices);
static int __init topology_init(void)
{
int i, ret;
for_each_present_cpu(i) {
struct cpu *c = &per_cpu(cpu_devices, i);
c->hotpluggable = !!i;
ret = register_cpu(c, i);
if (ret)
printk(KERN_WARNING "topology_init: register_cpu %d "
"failed (%d)\n", i, ret);
}
return 0;
}
subsys_initcall(topology_init);
| linux-master | arch/mips/kernel/topology.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2013 Imagination Technologies
* Author: Paul Burton <[email protected]>
*/
#include <linux/cpu.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/sched/task_stack.h>
#include <linux/sched/hotplug.h>
#include <linux/slab.h>
#include <linux/smp.h>
#include <linux/types.h>
#include <linux/irq.h>
#include <asm/bcache.h>
#include <asm/mips-cps.h>
#include <asm/mips_mt.h>
#include <asm/mipsregs.h>
#include <asm/pm-cps.h>
#include <asm/r4kcache.h>
#include <asm/smp.h>
#include <asm/smp-cps.h>
#include <asm/time.h>
#include <asm/uasm.h>
static DECLARE_BITMAP(core_power, NR_CPUS);
struct core_boot_config *mips_cps_core_bootcfg;
static unsigned __init core_vpe_count(unsigned int cluster, unsigned core)
{
return min(smp_max_threads, mips_cps_numvps(cluster, core));
}
static void __init cps_smp_setup(void)
{
unsigned int nclusters, ncores, nvpes, core_vpes;
unsigned long core_entry;
int cl, c, v;
/* Detect & record VPE topology */
nvpes = 0;
nclusters = mips_cps_numclusters();
pr_info("%s topology ", cpu_has_mips_r6 ? "VP" : "VPE");
for (cl = 0; cl < nclusters; cl++) {
if (cl > 0)
pr_cont(",");
pr_cont("{");
ncores = mips_cps_numcores(cl);
for (c = 0; c < ncores; c++) {
core_vpes = core_vpe_count(cl, c);
if (c > 0)
pr_cont(",");
pr_cont("%u", core_vpes);
/* Use the number of VPEs in cluster 0 core 0 for smp_num_siblings */
if (!cl && !c)
smp_num_siblings = core_vpes;
for (v = 0; v < min_t(int, core_vpes, NR_CPUS - nvpes); v++) {
cpu_set_cluster(&cpu_data[nvpes + v], cl);
cpu_set_core(&cpu_data[nvpes + v], c);
cpu_set_vpe_id(&cpu_data[nvpes + v], v);
}
nvpes += core_vpes;
}
pr_cont("}");
}
pr_cont(" total %u\n", nvpes);
/* Indicate present CPUs (CPU being synonymous with VPE) */
for (v = 0; v < min_t(unsigned, nvpes, NR_CPUS); v++) {
set_cpu_possible(v, cpu_cluster(&cpu_data[v]) == 0);
set_cpu_present(v, cpu_cluster(&cpu_data[v]) == 0);
__cpu_number_map[v] = v;
__cpu_logical_map[v] = v;
}
/* Set a coherent default CCA (CWB) */
change_c0_config(CONF_CM_CMASK, 0x5);
/* Core 0 is powered up (we're running on it) */
bitmap_set(core_power, 0, 1);
/* Initialise core 0 */
mips_cps_core_init();
/* Make core 0 coherent with everything */
write_gcr_cl_coherence(0xff);
if (mips_cm_revision() >= CM_REV_CM3) {
core_entry = CKSEG1ADDR((unsigned long)mips_cps_core_entry);
write_gcr_bev_base(core_entry);
}
#ifdef CONFIG_MIPS_MT_FPAFF
/* If we have an FPU, enroll ourselves in the FPU-full mask */
if (cpu_has_fpu)
cpumask_set_cpu(0, &mt_fpu_cpumask);
#endif /* CONFIG_MIPS_MT_FPAFF */
}
static void __init cps_prepare_cpus(unsigned int max_cpus)
{
unsigned ncores, core_vpes, c, cca;
bool cca_unsuitable, cores_limited;
u32 *entry_code;
mips_mt_set_cpuoptions();
/* Detect whether the CCA is unsuited to multi-core SMP */
cca = read_c0_config() & CONF_CM_CMASK;
switch (cca) {
case 0x4: /* CWBE */
case 0x5: /* CWB */
/* The CCA is coherent, multi-core is fine */
cca_unsuitable = false;
break;
default:
/* CCA is not coherent, multi-core is not usable */
cca_unsuitable = true;
}
/* Warn the user if the CCA prevents multi-core */
cores_limited = false;
if (cca_unsuitable || cpu_has_dc_aliases) {
for_each_present_cpu(c) {
if (cpus_are_siblings(smp_processor_id(), c))
continue;
set_cpu_present(c, false);
cores_limited = true;
}
}
if (cores_limited)
pr_warn("Using only one core due to %s%s%s\n",
cca_unsuitable ? "unsuitable CCA" : "",
(cca_unsuitable && cpu_has_dc_aliases) ? " & " : "",
cpu_has_dc_aliases ? "dcache aliasing" : "");
/*
* Patch the start of mips_cps_core_entry to provide:
*
* s0 = kseg0 CCA
*/
entry_code = (u32 *)&mips_cps_core_entry;
uasm_i_addiu(&entry_code, 16, 0, cca);
UASM_i_LA(&entry_code, 17, (long)mips_gcr_base);
BUG_ON((void *)entry_code > (void *)&mips_cps_core_entry_patch_end);
blast_dcache_range((unsigned long)&mips_cps_core_entry,
(unsigned long)entry_code);
bc_wback_inv((unsigned long)&mips_cps_core_entry,
(void *)entry_code - (void *)&mips_cps_core_entry);
__sync();
/* Allocate core boot configuration structs */
ncores = mips_cps_numcores(0);
mips_cps_core_bootcfg = kcalloc(ncores, sizeof(*mips_cps_core_bootcfg),
GFP_KERNEL);
if (!mips_cps_core_bootcfg) {
pr_err("Failed to allocate boot config for %u cores\n", ncores);
goto err_out;
}
/* Allocate VPE boot configuration structs */
for (c = 0; c < ncores; c++) {
core_vpes = core_vpe_count(0, c);
mips_cps_core_bootcfg[c].vpe_config = kcalloc(core_vpes,
sizeof(*mips_cps_core_bootcfg[c].vpe_config),
GFP_KERNEL);
if (!mips_cps_core_bootcfg[c].vpe_config) {
pr_err("Failed to allocate %u VPE boot configs\n",
core_vpes);
goto err_out;
}
}
/* Mark this CPU as booted */
atomic_set(&mips_cps_core_bootcfg[cpu_core(¤t_cpu_data)].vpe_mask,
1 << cpu_vpe_id(¤t_cpu_data));
return;
err_out:
/* Clean up allocations */
if (mips_cps_core_bootcfg) {
for (c = 0; c < ncores; c++)
kfree(mips_cps_core_bootcfg[c].vpe_config);
kfree(mips_cps_core_bootcfg);
mips_cps_core_bootcfg = NULL;
}
/* Effectively disable SMP by declaring CPUs not present */
for_each_possible_cpu(c) {
if (c == 0)
continue;
set_cpu_present(c, false);
}
}
static void boot_core(unsigned int core, unsigned int vpe_id)
{
u32 stat, seq_state;
unsigned timeout;
/* Select the appropriate core */
mips_cm_lock_other(0, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
/* Set its reset vector */
write_gcr_co_reset_base(CKSEG1ADDR((unsigned long)mips_cps_core_entry));
/* Ensure its coherency is disabled */
write_gcr_co_coherence(0);
/* Start it with the legacy memory map and exception base */
write_gcr_co_reset_ext_base(CM_GCR_Cx_RESET_EXT_BASE_UEB);
/* Ensure the core can access the GCRs */
set_gcr_access(1 << core);
if (mips_cpc_present()) {
/* Reset the core */
mips_cpc_lock_other(core);
if (mips_cm_revision() >= CM_REV_CM3) {
/* Run only the requested VP following the reset */
write_cpc_co_vp_stop(0xf);
write_cpc_co_vp_run(1 << vpe_id);
/*
* Ensure that the VP_RUN register is written before the
* core leaves reset.
*/
wmb();
}
write_cpc_co_cmd(CPC_Cx_CMD_RESET);
timeout = 100;
while (true) {
stat = read_cpc_co_stat_conf();
seq_state = stat & CPC_Cx_STAT_CONF_SEQSTATE;
seq_state >>= __ffs(CPC_Cx_STAT_CONF_SEQSTATE);
/* U6 == coherent execution, ie. the core is up */
if (seq_state == CPC_Cx_STAT_CONF_SEQSTATE_U6)
break;
/* Delay a little while before we start warning */
if (timeout) {
timeout--;
mdelay(10);
continue;
}
pr_warn("Waiting for core %u to start... STAT_CONF=0x%x\n",
core, stat);
mdelay(1000);
}
mips_cpc_unlock_other();
} else {
/* Take the core out of reset */
write_gcr_co_reset_release(0);
}
mips_cm_unlock_other();
/* The core is now powered up */
bitmap_set(core_power, core, 1);
}
static void remote_vpe_boot(void *dummy)
{
unsigned core = cpu_core(¤t_cpu_data);
struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core];
mips_cps_boot_vpes(core_cfg, cpu_vpe_id(¤t_cpu_data));
}
static int cps_boot_secondary(int cpu, struct task_struct *idle)
{
unsigned core = cpu_core(&cpu_data[cpu]);
unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]);
struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core];
struct vpe_boot_config *vpe_cfg = &core_cfg->vpe_config[vpe_id];
unsigned long core_entry;
unsigned int remote;
int err;
/* We don't yet support booting CPUs in other clusters */
if (cpu_cluster(&cpu_data[cpu]) != cpu_cluster(&raw_current_cpu_data))
return -ENOSYS;
vpe_cfg->pc = (unsigned long)&smp_bootstrap;
vpe_cfg->sp = __KSTK_TOS(idle);
vpe_cfg->gp = (unsigned long)task_thread_info(idle);
atomic_or(1 << cpu_vpe_id(&cpu_data[cpu]), &core_cfg->vpe_mask);
preempt_disable();
if (!test_bit(core, core_power)) {
/* Boot a VPE on a powered down core */
boot_core(core, vpe_id);
goto out;
}
if (cpu_has_vp) {
mips_cm_lock_other(0, core, vpe_id, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
core_entry = CKSEG1ADDR((unsigned long)mips_cps_core_entry);
write_gcr_co_reset_base(core_entry);
mips_cm_unlock_other();
}
if (!cpus_are_siblings(cpu, smp_processor_id())) {
/* Boot a VPE on another powered up core */
for (remote = 0; remote < NR_CPUS; remote++) {
if (!cpus_are_siblings(cpu, remote))
continue;
if (cpu_online(remote))
break;
}
if (remote >= NR_CPUS) {
pr_crit("No online CPU in core %u to start CPU%d\n",
core, cpu);
goto out;
}
err = smp_call_function_single(remote, remote_vpe_boot,
NULL, 1);
if (err)
panic("Failed to call remote CPU\n");
goto out;
}
BUG_ON(!cpu_has_mipsmt && !cpu_has_vp);
/* Boot a VPE on this core */
mips_cps_boot_vpes(core_cfg, vpe_id);
out:
preempt_enable();
return 0;
}
static void cps_init_secondary(void)
{
int core = cpu_core(¤t_cpu_data);
/* Disable MT - we only want to run 1 TC per VPE */
if (cpu_has_mipsmt)
dmt();
if (mips_cm_revision() >= CM_REV_CM3) {
unsigned int ident = read_gic_vl_ident();
/*
* Ensure that our calculation of the VP ID matches up with
* what the GIC reports, otherwise we'll have configured
* interrupts incorrectly.
*/
BUG_ON(ident != mips_cm_vp_id(smp_processor_id()));
}
if (core > 0 && !read_gcr_cl_coherence())
pr_warn("Core %u is not in coherent domain\n", core);
if (cpu_has_veic)
clear_c0_status(ST0_IM);
else
change_c0_status(ST0_IM, STATUSF_IP2 | STATUSF_IP3 |
STATUSF_IP4 | STATUSF_IP5 |
STATUSF_IP6 | STATUSF_IP7);
}
static void cps_smp_finish(void)
{
write_c0_compare(read_c0_count() + (8 * mips_hpt_frequency / HZ));
#ifdef CONFIG_MIPS_MT_FPAFF
/* If we have an FPU, enroll ourselves in the FPU-full mask */
if (cpu_has_fpu)
cpumask_set_cpu(smp_processor_id(), &mt_fpu_cpumask);
#endif /* CONFIG_MIPS_MT_FPAFF */
local_irq_enable();
}
#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_KEXEC)
enum cpu_death {
CPU_DEATH_HALT,
CPU_DEATH_POWER,
};
static void cps_shutdown_this_cpu(enum cpu_death death)
{
unsigned int cpu, core, vpe_id;
cpu = smp_processor_id();
core = cpu_core(&cpu_data[cpu]);
if (death == CPU_DEATH_HALT) {
vpe_id = cpu_vpe_id(&cpu_data[cpu]);
pr_debug("Halting core %d VP%d\n", core, vpe_id);
if (cpu_has_mipsmt) {
/* Halt this TC */
write_c0_tchalt(TCHALT_H);
instruction_hazard();
} else if (cpu_has_vp) {
write_cpc_cl_vp_stop(1 << vpe_id);
/* Ensure that the VP_STOP register is written */
wmb();
}
} else {
if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) {
pr_debug("Gating power to core %d\n", core);
/* Power down the core */
cps_pm_enter_state(CPS_PM_POWER_GATED);
}
}
}
#ifdef CONFIG_KEXEC
static void cps_kexec_nonboot_cpu(void)
{
if (cpu_has_mipsmt || cpu_has_vp)
cps_shutdown_this_cpu(CPU_DEATH_HALT);
else
cps_shutdown_this_cpu(CPU_DEATH_POWER);
}
#endif /* CONFIG_KEXEC */
#endif /* CONFIG_HOTPLUG_CPU || CONFIG_KEXEC */
#ifdef CONFIG_HOTPLUG_CPU
static int cps_cpu_disable(void)
{
unsigned cpu = smp_processor_id();
struct core_boot_config *core_cfg;
if (!cps_pm_support_state(CPS_PM_POWER_GATED))
return -EINVAL;
core_cfg = &mips_cps_core_bootcfg[cpu_core(¤t_cpu_data)];
atomic_sub(1 << cpu_vpe_id(¤t_cpu_data), &core_cfg->vpe_mask);
smp_mb__after_atomic();
set_cpu_online(cpu, false);
calculate_cpu_foreign_map();
irq_migrate_all_off_this_cpu();
return 0;
}
static unsigned cpu_death_sibling;
static enum cpu_death cpu_death;
void play_dead(void)
{
unsigned int cpu;
local_irq_disable();
idle_task_exit();
cpu = smp_processor_id();
cpu_death = CPU_DEATH_POWER;
pr_debug("CPU%d going offline\n", cpu);
if (cpu_has_mipsmt || cpu_has_vp) {
/* Look for another online VPE within the core */
for_each_online_cpu(cpu_death_sibling) {
if (!cpus_are_siblings(cpu, cpu_death_sibling))
continue;
/*
* There is an online VPE within the core. Just halt
* this TC and leave the core alone.
*/
cpu_death = CPU_DEATH_HALT;
break;
}
}
cpuhp_ap_report_dead();
cps_shutdown_this_cpu(cpu_death);
/* This should never be reached */
panic("Failed to offline CPU %u", cpu);
}
static void wait_for_sibling_halt(void *ptr_cpu)
{
unsigned cpu = (unsigned long)ptr_cpu;
unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]);
unsigned halted;
unsigned long flags;
do {
local_irq_save(flags);
settc(vpe_id);
halted = read_tc_c0_tchalt();
local_irq_restore(flags);
} while (!(halted & TCHALT_H));
}
static void cps_cpu_die(unsigned int cpu) { }
static void cps_cleanup_dead_cpu(unsigned cpu)
{
unsigned core = cpu_core(&cpu_data[cpu]);
unsigned int vpe_id = cpu_vpe_id(&cpu_data[cpu]);
ktime_t fail_time;
unsigned stat;
int err;
/*
* Now wait for the CPU to actually offline. Without doing this that
* offlining may race with one or more of:
*
* - Onlining the CPU again.
* - Powering down the core if another VPE within it is offlined.
* - A sibling VPE entering a non-coherent state.
*
* In the non-MT halt case (ie. infinite loop) the CPU is doing nothing
* with which we could race, so do nothing.
*/
if (cpu_death == CPU_DEATH_POWER) {
/*
* Wait for the core to enter a powered down or clock gated
* state, the latter happening when a JTAG probe is connected
* in which case the CPC will refuse to power down the core.
*/
fail_time = ktime_add_ms(ktime_get(), 2000);
do {
mips_cm_lock_other(0, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
mips_cpc_lock_other(core);
stat = read_cpc_co_stat_conf();
stat &= CPC_Cx_STAT_CONF_SEQSTATE;
stat >>= __ffs(CPC_Cx_STAT_CONF_SEQSTATE);
mips_cpc_unlock_other();
mips_cm_unlock_other();
if (stat == CPC_Cx_STAT_CONF_SEQSTATE_D0 ||
stat == CPC_Cx_STAT_CONF_SEQSTATE_D2 ||
stat == CPC_Cx_STAT_CONF_SEQSTATE_U2)
break;
/*
* The core ought to have powered down, but didn't &
* now we don't really know what state it's in. It's
* likely that its _pwr_up pin has been wired to logic
* 1 & it powered back up as soon as we powered it
* down...
*
* The best we can do is warn the user & continue in
* the hope that the core is doing nothing harmful &
* might behave properly if we online it later.
*/
if (WARN(ktime_after(ktime_get(), fail_time),
"CPU%u hasn't powered down, seq. state %u\n",
cpu, stat))
break;
} while (1);
/* Indicate the core is powered off */
bitmap_clear(core_power, core, 1);
} else if (cpu_has_mipsmt) {
/*
* Have a CPU with access to the offlined CPUs registers wait
* for its TC to halt.
*/
err = smp_call_function_single(cpu_death_sibling,
wait_for_sibling_halt,
(void *)(unsigned long)cpu, 1);
if (err)
panic("Failed to call remote sibling CPU\n");
} else if (cpu_has_vp) {
do {
mips_cm_lock_other(0, core, vpe_id, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
stat = read_cpc_co_vp_running();
mips_cm_unlock_other();
} while (stat & (1 << vpe_id));
}
}
#endif /* CONFIG_HOTPLUG_CPU */
static const struct plat_smp_ops cps_smp_ops = {
.smp_setup = cps_smp_setup,
.prepare_cpus = cps_prepare_cpus,
.boot_secondary = cps_boot_secondary,
.init_secondary = cps_init_secondary,
.smp_finish = cps_smp_finish,
.send_ipi_single = mips_smp_send_ipi_single,
.send_ipi_mask = mips_smp_send_ipi_mask,
#ifdef CONFIG_HOTPLUG_CPU
.cpu_disable = cps_cpu_disable,
.cpu_die = cps_cpu_die,
.cleanup_dead_cpu = cps_cleanup_dead_cpu,
#endif
#ifdef CONFIG_KEXEC
.kexec_nonboot_cpu = cps_kexec_nonboot_cpu,
#endif
};
bool mips_cps_smp_in_use(void)
{
extern const struct plat_smp_ops *mp_ops;
return mp_ops == &cps_smp_ops;
}
int register_cps_smp_ops(void)
{
if (!mips_cm_present()) {
pr_warn("MIPS CPS SMP unable to proceed without a CM\n");
return -ENODEV;
}
/* check we have a GIC - we need one for IPIs */
if (!(read_gcr_gic_status() & CM_GCR_GIC_STATUS_EX)) {
pr_warn("MIPS CPS SMP unable to proceed without a GIC\n");
return -ENODEV;
}
register_smp_ops(&cps_smp_ops);
return 0;
}
| linux-master | arch/mips/kernel/smp-cps.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Linux performance counter support for MIPS.
*
* Copyright (C) 2010 MIPS Technologies, Inc.
* Copyright (C) 2011 Cavium Networks, Inc.
* Author: Deng-Cheng Zhu
*
* This code is based on the implementation for ARM, which is in turn
* based on the sparc64 perf event code and the x86 code. Performance
* counter access is based on the MIPS Oprofile code. And the callchain
* support references the code of MIPS stacktrace.c.
*/
#include <linux/cpumask.h>
#include <linux/interrupt.h>
#include <linux/smp.h>
#include <linux/kernel.h>
#include <linux/perf_event.h>
#include <linux/uaccess.h>
#include <asm/irq.h>
#include <asm/irq_regs.h>
#include <asm/stacktrace.h>
#include <asm/time.h> /* For perf_irq */
#define MIPS_MAX_HWEVENTS 4
#define MIPS_TCS_PER_COUNTER 2
#define MIPS_CPUID_TO_COUNTER_MASK (MIPS_TCS_PER_COUNTER - 1)
struct cpu_hw_events {
/* Array of events on this cpu. */
struct perf_event *events[MIPS_MAX_HWEVENTS];
/*
* Set the bit (indexed by the counter number) when the counter
* is used for an event.
*/
unsigned long used_mask[BITS_TO_LONGS(MIPS_MAX_HWEVENTS)];
/*
* Software copy of the control register for each performance counter.
* MIPS CPUs vary in performance counters. They use this differently,
* and even may not use it.
*/
unsigned int saved_ctrl[MIPS_MAX_HWEVENTS];
};
DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
.saved_ctrl = {0},
};
/* The description of MIPS performance events. */
struct mips_perf_event {
unsigned int event_id;
/*
* MIPS performance counters are indexed starting from 0.
* CNTR_EVEN indicates the indexes of the counters to be used are
* even numbers.
*/
unsigned int cntr_mask;
#define CNTR_EVEN 0x55555555
#define CNTR_ODD 0xaaaaaaaa
#define CNTR_ALL 0xffffffff
enum {
T = 0,
V = 1,
P = 2,
} range;
};
static struct mips_perf_event raw_event;
static DEFINE_MUTEX(raw_event_mutex);
#define C(x) PERF_COUNT_HW_CACHE_##x
struct mips_pmu {
u64 max_period;
u64 valid_count;
u64 overflow;
const char *name;
int irq;
u64 (*read_counter)(unsigned int idx);
void (*write_counter)(unsigned int idx, u64 val);
const struct mips_perf_event *(*map_raw_event)(u64 config);
const struct mips_perf_event (*general_event_map)[PERF_COUNT_HW_MAX];
const struct mips_perf_event (*cache_event_map)
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX];
unsigned int num_counters;
};
static int counter_bits;
static struct mips_pmu mipspmu;
#define M_PERFCTL_EVENT(event) (((event) << MIPS_PERFCTRL_EVENT_S) & \
MIPS_PERFCTRL_EVENT)
#define M_PERFCTL_VPEID(vpe) ((vpe) << MIPS_PERFCTRL_VPEID_S)
#ifdef CONFIG_CPU_BMIPS5000
#define M_PERFCTL_MT_EN(filter) 0
#else /* !CONFIG_CPU_BMIPS5000 */
#define M_PERFCTL_MT_EN(filter) (filter)
#endif /* CONFIG_CPU_BMIPS5000 */
#define M_TC_EN_ALL M_PERFCTL_MT_EN(MIPS_PERFCTRL_MT_EN_ALL)
#define M_TC_EN_VPE M_PERFCTL_MT_EN(MIPS_PERFCTRL_MT_EN_VPE)
#define M_TC_EN_TC M_PERFCTL_MT_EN(MIPS_PERFCTRL_MT_EN_TC)
#define M_PERFCTL_COUNT_EVENT_WHENEVER (MIPS_PERFCTRL_EXL | \
MIPS_PERFCTRL_K | \
MIPS_PERFCTRL_U | \
MIPS_PERFCTRL_S | \
MIPS_PERFCTRL_IE)
#ifdef CONFIG_MIPS_MT_SMP
#define M_PERFCTL_CONFIG_MASK 0x3fff801f
#else
#define M_PERFCTL_CONFIG_MASK 0x1f
#endif
#define CNTR_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
static DEFINE_RWLOCK(pmuint_rwlock);
#if defined(CONFIG_CPU_BMIPS5000)
#define vpe_id() (cpu_has_mipsmt_pertccounters ? \
0 : (smp_processor_id() & MIPS_CPUID_TO_COUNTER_MASK))
#else
#define vpe_id() (cpu_has_mipsmt_pertccounters ? \
0 : cpu_vpe_id(¤t_cpu_data))
#endif
/* Copied from op_model_mipsxx.c */
static unsigned int vpe_shift(void)
{
if (num_possible_cpus() > 1)
return 1;
return 0;
}
static unsigned int counters_total_to_per_cpu(unsigned int counters)
{
return counters >> vpe_shift();
}
#else /* !CONFIG_MIPS_PERF_SHARED_TC_COUNTERS */
#define vpe_id() 0
#endif /* CONFIG_MIPS_PERF_SHARED_TC_COUNTERS */
static void resume_local_counters(void);
static void pause_local_counters(void);
static irqreturn_t mipsxx_pmu_handle_irq(int, void *);
static int mipsxx_pmu_handle_shared_irq(void);
/* 0: Not Loongson-3
* 1: Loongson-3A1000/3B1000/3B1500
* 2: Loongson-3A2000/3A3000
* 3: Loongson-3A4000+
*/
#define LOONGSON_PMU_TYPE0 0
#define LOONGSON_PMU_TYPE1 1
#define LOONGSON_PMU_TYPE2 2
#define LOONGSON_PMU_TYPE3 3
static inline int get_loongson3_pmu_type(void)
{
if (boot_cpu_type() != CPU_LOONGSON64)
return LOONGSON_PMU_TYPE0;
if ((boot_cpu_data.processor_id & PRID_COMP_MASK) == PRID_COMP_LEGACY)
return LOONGSON_PMU_TYPE1;
if ((boot_cpu_data.processor_id & PRID_IMP_MASK) == PRID_IMP_LOONGSON_64C)
return LOONGSON_PMU_TYPE2;
if ((boot_cpu_data.processor_id & PRID_IMP_MASK) == PRID_IMP_LOONGSON_64G)
return LOONGSON_PMU_TYPE3;
return LOONGSON_PMU_TYPE0;
}
static unsigned int mipsxx_pmu_swizzle_perf_idx(unsigned int idx)
{
if (vpe_id() == 1)
idx = (idx + 2) & 3;
return idx;
}
static u64 mipsxx_pmu_read_counter(unsigned int idx)
{
idx = mipsxx_pmu_swizzle_perf_idx(idx);
switch (idx) {
case 0:
/*
* The counters are unsigned, we must cast to truncate
* off the high bits.
*/
return (u32)read_c0_perfcntr0();
case 1:
return (u32)read_c0_perfcntr1();
case 2:
return (u32)read_c0_perfcntr2();
case 3:
return (u32)read_c0_perfcntr3();
default:
WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
return 0;
}
}
static u64 mipsxx_pmu_read_counter_64(unsigned int idx)
{
u64 mask = CNTR_BIT_MASK(counter_bits);
idx = mipsxx_pmu_swizzle_perf_idx(idx);
switch (idx) {
case 0:
return read_c0_perfcntr0_64() & mask;
case 1:
return read_c0_perfcntr1_64() & mask;
case 2:
return read_c0_perfcntr2_64() & mask;
case 3:
return read_c0_perfcntr3_64() & mask;
default:
WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
return 0;
}
}
static void mipsxx_pmu_write_counter(unsigned int idx, u64 val)
{
idx = mipsxx_pmu_swizzle_perf_idx(idx);
switch (idx) {
case 0:
write_c0_perfcntr0(val);
return;
case 1:
write_c0_perfcntr1(val);
return;
case 2:
write_c0_perfcntr2(val);
return;
case 3:
write_c0_perfcntr3(val);
return;
}
}
static void mipsxx_pmu_write_counter_64(unsigned int idx, u64 val)
{
val &= CNTR_BIT_MASK(counter_bits);
idx = mipsxx_pmu_swizzle_perf_idx(idx);
switch (idx) {
case 0:
write_c0_perfcntr0_64(val);
return;
case 1:
write_c0_perfcntr1_64(val);
return;
case 2:
write_c0_perfcntr2_64(val);
return;
case 3:
write_c0_perfcntr3_64(val);
return;
}
}
static unsigned int mipsxx_pmu_read_control(unsigned int idx)
{
idx = mipsxx_pmu_swizzle_perf_idx(idx);
switch (idx) {
case 0:
return read_c0_perfctrl0();
case 1:
return read_c0_perfctrl1();
case 2:
return read_c0_perfctrl2();
case 3:
return read_c0_perfctrl3();
default:
WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
return 0;
}
}
static void mipsxx_pmu_write_control(unsigned int idx, unsigned int val)
{
idx = mipsxx_pmu_swizzle_perf_idx(idx);
switch (idx) {
case 0:
write_c0_perfctrl0(val);
return;
case 1:
write_c0_perfctrl1(val);
return;
case 2:
write_c0_perfctrl2(val);
return;
case 3:
write_c0_perfctrl3(val);
return;
}
}
static int mipsxx_pmu_alloc_counter(struct cpu_hw_events *cpuc,
struct hw_perf_event *hwc)
{
int i;
unsigned long cntr_mask;
/*
* We only need to care the counter mask. The range has been
* checked definitely.
*/
if (get_loongson3_pmu_type() == LOONGSON_PMU_TYPE2)
cntr_mask = (hwc->event_base >> 10) & 0xffff;
else
cntr_mask = (hwc->event_base >> 8) & 0xffff;
for (i = mipspmu.num_counters - 1; i >= 0; i--) {
/*
* Note that some MIPS perf events can be counted by both
* even and odd counters, whereas many other are only by
* even _or_ odd counters. This introduces an issue that
* when the former kind of event takes the counter the
* latter kind of event wants to use, then the "counter
* allocation" for the latter event will fail. In fact if
* they can be dynamically swapped, they both feel happy.
* But here we leave this issue alone for now.
*/
if (test_bit(i, &cntr_mask) &&
!test_and_set_bit(i, cpuc->used_mask))
return i;
}
return -EAGAIN;
}
static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx)
{
struct perf_event *event = container_of(evt, struct perf_event, hw);
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
unsigned int range = evt->event_base >> 24;
WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
if (get_loongson3_pmu_type() == LOONGSON_PMU_TYPE2)
cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base & 0x3ff) |
(evt->config_base & M_PERFCTL_CONFIG_MASK) |
/* Make sure interrupt enabled. */
MIPS_PERFCTRL_IE;
else
cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base & 0xff) |
(evt->config_base & M_PERFCTL_CONFIG_MASK) |
/* Make sure interrupt enabled. */
MIPS_PERFCTRL_IE;
if (IS_ENABLED(CONFIG_CPU_BMIPS5000)) {
/* enable the counter for the calling thread */
cpuc->saved_ctrl[idx] |=
(1 << (12 + vpe_id())) | BRCM_PERFCTRL_TC;
} else if (IS_ENABLED(CONFIG_MIPS_MT_SMP) && range > V) {
/* The counter is processor wide. Set it up to count all TCs. */
pr_debug("Enabling perf counter for all TCs\n");
cpuc->saved_ctrl[idx] |= M_TC_EN_ALL;
} else {
unsigned int cpu, ctrl;
/*
* Set up the counter for a particular CPU when event->cpu is
* a valid CPU number. Otherwise set up the counter for the CPU
* scheduling this thread.
*/
cpu = (event->cpu >= 0) ? event->cpu : smp_processor_id();
ctrl = M_PERFCTL_VPEID(cpu_vpe_id(&cpu_data[cpu]));
ctrl |= M_TC_EN_VPE;
cpuc->saved_ctrl[idx] |= ctrl;
pr_debug("Enabling perf counter for CPU%d\n", cpu);
}
/*
* We do not actually let the counter run. Leave it until start().
*/
}
static void mipsxx_pmu_disable_event(int idx)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
unsigned long flags;
WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
local_irq_save(flags);
cpuc->saved_ctrl[idx] = mipsxx_pmu_read_control(idx) &
~M_PERFCTL_COUNT_EVENT_WHENEVER;
mipsxx_pmu_write_control(idx, cpuc->saved_ctrl[idx]);
local_irq_restore(flags);
}
static int mipspmu_event_set_period(struct perf_event *event,
struct hw_perf_event *hwc,
int idx)
{
u64 left = local64_read(&hwc->period_left);
u64 period = hwc->sample_period;
int ret = 0;
if (unlikely((left + period) & (1ULL << 63))) {
/* left underflowed by more than period. */
left = period;
local64_set(&hwc->period_left, left);
hwc->last_period = period;
ret = 1;
} else if (unlikely((left + period) <= period)) {
/* left underflowed by less than period. */
left += period;
local64_set(&hwc->period_left, left);
hwc->last_period = period;
ret = 1;
}
if (left > mipspmu.max_period) {
left = mipspmu.max_period;
local64_set(&hwc->period_left, left);
}
local64_set(&hwc->prev_count, mipspmu.overflow - left);
if (get_loongson3_pmu_type() == LOONGSON_PMU_TYPE2)
mipsxx_pmu_write_control(idx,
M_PERFCTL_EVENT(hwc->event_base & 0x3ff));
mipspmu.write_counter(idx, mipspmu.overflow - left);
perf_event_update_userpage(event);
return ret;
}
static void mipspmu_event_update(struct perf_event *event,
struct hw_perf_event *hwc,
int idx)
{
u64 prev_raw_count, new_raw_count;
u64 delta;
again:
prev_raw_count = local64_read(&hwc->prev_count);
new_raw_count = mipspmu.read_counter(idx);
if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
new_raw_count) != prev_raw_count)
goto again;
delta = new_raw_count - prev_raw_count;
local64_add(delta, &event->count);
local64_sub(delta, &hwc->period_left);
}
static void mipspmu_start(struct perf_event *event, int flags)
{
struct hw_perf_event *hwc = &event->hw;
if (flags & PERF_EF_RELOAD)
WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
hwc->state = 0;
/* Set the period for the event. */
mipspmu_event_set_period(event, hwc, hwc->idx);
/* Enable the event. */
mipsxx_pmu_enable_event(hwc, hwc->idx);
}
static void mipspmu_stop(struct perf_event *event, int flags)
{
struct hw_perf_event *hwc = &event->hw;
if (!(hwc->state & PERF_HES_STOPPED)) {
/* We are working on a local event. */
mipsxx_pmu_disable_event(hwc->idx);
barrier();
mipspmu_event_update(event, hwc, hwc->idx);
hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
}
}
static int mipspmu_add(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
int idx;
int err = 0;
perf_pmu_disable(event->pmu);
/* To look for a free counter for this event. */
idx = mipsxx_pmu_alloc_counter(cpuc, hwc);
if (idx < 0) {
err = idx;
goto out;
}
/*
* If there is an event in the counter we are going to use then
* make sure it is disabled.
*/
event->hw.idx = idx;
mipsxx_pmu_disable_event(idx);
cpuc->events[idx] = event;
hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
if (flags & PERF_EF_START)
mipspmu_start(event, PERF_EF_RELOAD);
/* Propagate our changes to the userspace mapping. */
perf_event_update_userpage(event);
out:
perf_pmu_enable(event->pmu);
return err;
}
static void mipspmu_del(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
mipspmu_stop(event, PERF_EF_UPDATE);
cpuc->events[idx] = NULL;
clear_bit(idx, cpuc->used_mask);
perf_event_update_userpage(event);
}
static void mipspmu_read(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
/* Don't read disabled counters! */
if (hwc->idx < 0)
return;
mipspmu_event_update(event, hwc, hwc->idx);
}
static void mipspmu_enable(struct pmu *pmu)
{
#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
write_unlock(&pmuint_rwlock);
#endif
resume_local_counters();
}
/*
* MIPS performance counters can be per-TC. The control registers can
* not be directly accessed across CPUs. Hence if we want to do global
* control, we need cross CPU calls. on_each_cpu() can help us, but we
* can not make sure this function is called with interrupts enabled. So
* here we pause local counters and then grab a rwlock and leave the
* counters on other CPUs alone. If any counter interrupt raises while
* we own the write lock, simply pause local counters on that CPU and
* spin in the handler. Also we know we won't be switched to another
* CPU after pausing local counters and before grabbing the lock.
*/
static void mipspmu_disable(struct pmu *pmu)
{
pause_local_counters();
#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
write_lock(&pmuint_rwlock);
#endif
}
static atomic_t active_events = ATOMIC_INIT(0);
static DEFINE_MUTEX(pmu_reserve_mutex);
static int (*save_perf_irq)(void);
static int mipspmu_get_irq(void)
{
int err;
if (mipspmu.irq >= 0) {
/* Request my own irq handler. */
err = request_irq(mipspmu.irq, mipsxx_pmu_handle_irq,
IRQF_PERCPU | IRQF_NOBALANCING |
IRQF_NO_THREAD | IRQF_NO_SUSPEND |
IRQF_SHARED,
"mips_perf_pmu", &mipspmu);
if (err) {
pr_warn("Unable to request IRQ%d for MIPS performance counters!\n",
mipspmu.irq);
}
} else if (cp0_perfcount_irq < 0) {
/*
* We are sharing the irq number with the timer interrupt.
*/
save_perf_irq = perf_irq;
perf_irq = mipsxx_pmu_handle_shared_irq;
err = 0;
} else {
pr_warn("The platform hasn't properly defined its interrupt controller\n");
err = -ENOENT;
}
return err;
}
static void mipspmu_free_irq(void)
{
if (mipspmu.irq >= 0)
free_irq(mipspmu.irq, &mipspmu);
else if (cp0_perfcount_irq < 0)
perf_irq = save_perf_irq;
}
/*
* mipsxx/rm9000/loongson2 have different performance counters, they have
* specific low-level init routines.
*/
static void reset_counters(void *arg);
static int __hw_perf_event_init(struct perf_event *event);
static void hw_perf_event_destroy(struct perf_event *event)
{
if (atomic_dec_and_mutex_lock(&active_events,
&pmu_reserve_mutex)) {
/*
* We must not call the destroy function with interrupts
* disabled.
*/
on_each_cpu(reset_counters,
(void *)(long)mipspmu.num_counters, 1);
mipspmu_free_irq();
mutex_unlock(&pmu_reserve_mutex);
}
}
static int mipspmu_event_init(struct perf_event *event)
{
int err = 0;
/* does not support taken branch sampling */
if (has_branch_stack(event))
return -EOPNOTSUPP;
switch (event->attr.type) {
case PERF_TYPE_RAW:
case PERF_TYPE_HARDWARE:
case PERF_TYPE_HW_CACHE:
break;
default:
return -ENOENT;
}
if (event->cpu >= 0 && !cpu_online(event->cpu))
return -ENODEV;
if (!atomic_inc_not_zero(&active_events)) {
mutex_lock(&pmu_reserve_mutex);
if (atomic_read(&active_events) == 0)
err = mipspmu_get_irq();
if (!err)
atomic_inc(&active_events);
mutex_unlock(&pmu_reserve_mutex);
}
if (err)
return err;
return __hw_perf_event_init(event);
}
static struct pmu pmu = {
.pmu_enable = mipspmu_enable,
.pmu_disable = mipspmu_disable,
.event_init = mipspmu_event_init,
.add = mipspmu_add,
.del = mipspmu_del,
.start = mipspmu_start,
.stop = mipspmu_stop,
.read = mipspmu_read,
};
static unsigned int mipspmu_perf_event_encode(const struct mips_perf_event *pev)
{
/*
* Top 8 bits for range, next 16 bits for cntr_mask, lowest 8 bits for
* event_id.
*/
#ifdef CONFIG_MIPS_MT_SMP
if (num_possible_cpus() > 1)
return ((unsigned int)pev->range << 24) |
(pev->cntr_mask & 0xffff00) |
(pev->event_id & 0xff);
else
#endif /* CONFIG_MIPS_MT_SMP */
{
if (get_loongson3_pmu_type() == LOONGSON_PMU_TYPE2)
return (pev->cntr_mask & 0xfffc00) |
(pev->event_id & 0x3ff);
else
return (pev->cntr_mask & 0xffff00) |
(pev->event_id & 0xff);
}
}
static const struct mips_perf_event *mipspmu_map_general_event(int idx)
{
if ((*mipspmu.general_event_map)[idx].cntr_mask == 0)
return ERR_PTR(-EOPNOTSUPP);
return &(*mipspmu.general_event_map)[idx];
}
static const struct mips_perf_event *mipspmu_map_cache_event(u64 config)
{
unsigned int cache_type, cache_op, cache_result;
const struct mips_perf_event *pev;
cache_type = (config >> 0) & 0xff;
if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
return ERR_PTR(-EINVAL);
cache_op = (config >> 8) & 0xff;
if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
return ERR_PTR(-EINVAL);
cache_result = (config >> 16) & 0xff;
if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
return ERR_PTR(-EINVAL);
pev = &((*mipspmu.cache_event_map)
[cache_type]
[cache_op]
[cache_result]);
if (pev->cntr_mask == 0)
return ERR_PTR(-EOPNOTSUPP);
return pev;
}
static int validate_group(struct perf_event *event)
{
struct perf_event *sibling, *leader = event->group_leader;
struct cpu_hw_events fake_cpuc;
memset(&fake_cpuc, 0, sizeof(fake_cpuc));
if (mipsxx_pmu_alloc_counter(&fake_cpuc, &leader->hw) < 0)
return -EINVAL;
for_each_sibling_event(sibling, leader) {
if (mipsxx_pmu_alloc_counter(&fake_cpuc, &sibling->hw) < 0)
return -EINVAL;
}
if (mipsxx_pmu_alloc_counter(&fake_cpuc, &event->hw) < 0)
return -EINVAL;
return 0;
}
/* This is needed by specific irq handlers in perf_event_*.c */
static void handle_associated_event(struct cpu_hw_events *cpuc,
int idx, struct perf_sample_data *data,
struct pt_regs *regs)
{
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc = &event->hw;
mipspmu_event_update(event, hwc, idx);
data->period = event->hw.last_period;
if (!mipspmu_event_set_period(event, hwc, idx))
return;
if (perf_event_overflow(event, data, regs))
mipsxx_pmu_disable_event(idx);
}
static int __n_counters(void)
{
if (!cpu_has_perf)
return 0;
if (!(read_c0_perfctrl0() & MIPS_PERFCTRL_M))
return 1;
if (!(read_c0_perfctrl1() & MIPS_PERFCTRL_M))
return 2;
if (!(read_c0_perfctrl2() & MIPS_PERFCTRL_M))
return 3;
return 4;
}
static int n_counters(void)
{
int counters;
switch (current_cpu_type()) {
case CPU_R10000:
counters = 2;
break;
case CPU_R12000:
case CPU_R14000:
case CPU_R16000:
counters = 4;
break;
default:
counters = __n_counters();
}
return counters;
}
static void loongson3_reset_counters(void *arg)
{
int counters = (int)(long)arg;
switch (counters) {
case 4:
mipsxx_pmu_write_control(3, 0);
mipspmu.write_counter(3, 0);
mipsxx_pmu_write_control(3, 127<<5);
mipspmu.write_counter(3, 0);
mipsxx_pmu_write_control(3, 191<<5);
mipspmu.write_counter(3, 0);
mipsxx_pmu_write_control(3, 255<<5);
mipspmu.write_counter(3, 0);
mipsxx_pmu_write_control(3, 319<<5);
mipspmu.write_counter(3, 0);
mipsxx_pmu_write_control(3, 383<<5);
mipspmu.write_counter(3, 0);
mipsxx_pmu_write_control(3, 575<<5);
mipspmu.write_counter(3, 0);
fallthrough;
case 3:
mipsxx_pmu_write_control(2, 0);
mipspmu.write_counter(2, 0);
mipsxx_pmu_write_control(2, 127<<5);
mipspmu.write_counter(2, 0);
mipsxx_pmu_write_control(2, 191<<5);
mipspmu.write_counter(2, 0);
mipsxx_pmu_write_control(2, 255<<5);
mipspmu.write_counter(2, 0);
mipsxx_pmu_write_control(2, 319<<5);
mipspmu.write_counter(2, 0);
mipsxx_pmu_write_control(2, 383<<5);
mipspmu.write_counter(2, 0);
mipsxx_pmu_write_control(2, 575<<5);
mipspmu.write_counter(2, 0);
fallthrough;
case 2:
mipsxx_pmu_write_control(1, 0);
mipspmu.write_counter(1, 0);
mipsxx_pmu_write_control(1, 127<<5);
mipspmu.write_counter(1, 0);
mipsxx_pmu_write_control(1, 191<<5);
mipspmu.write_counter(1, 0);
mipsxx_pmu_write_control(1, 255<<5);
mipspmu.write_counter(1, 0);
mipsxx_pmu_write_control(1, 319<<5);
mipspmu.write_counter(1, 0);
mipsxx_pmu_write_control(1, 383<<5);
mipspmu.write_counter(1, 0);
mipsxx_pmu_write_control(1, 575<<5);
mipspmu.write_counter(1, 0);
fallthrough;
case 1:
mipsxx_pmu_write_control(0, 0);
mipspmu.write_counter(0, 0);
mipsxx_pmu_write_control(0, 127<<5);
mipspmu.write_counter(0, 0);
mipsxx_pmu_write_control(0, 191<<5);
mipspmu.write_counter(0, 0);
mipsxx_pmu_write_control(0, 255<<5);
mipspmu.write_counter(0, 0);
mipsxx_pmu_write_control(0, 319<<5);
mipspmu.write_counter(0, 0);
mipsxx_pmu_write_control(0, 383<<5);
mipspmu.write_counter(0, 0);
mipsxx_pmu_write_control(0, 575<<5);
mipspmu.write_counter(0, 0);
break;
}
}
static void reset_counters(void *arg)
{
int counters = (int)(long)arg;
if (get_loongson3_pmu_type() == LOONGSON_PMU_TYPE2) {
loongson3_reset_counters(arg);
return;
}
switch (counters) {
case 4:
mipsxx_pmu_write_control(3, 0);
mipspmu.write_counter(3, 0);
fallthrough;
case 3:
mipsxx_pmu_write_control(2, 0);
mipspmu.write_counter(2, 0);
fallthrough;
case 2:
mipsxx_pmu_write_control(1, 0);
mipspmu.write_counter(1, 0);
fallthrough;
case 1:
mipsxx_pmu_write_control(0, 0);
mipspmu.write_counter(0, 0);
break;
}
}
/* 24K/34K/1004K/interAptiv/loongson1 cores share the same event map. */
static const struct mips_perf_event mipsxxcore_event_map
[PERF_COUNT_HW_MAX] = {
[PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
[PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x02, CNTR_EVEN, T },
[PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T },
};
/* 74K/proAptiv core has different branch event code. */
static const struct mips_perf_event mipsxxcore_event_map2
[PERF_COUNT_HW_MAX] = {
[PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
[PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x27, CNTR_EVEN, T },
[PERF_COUNT_HW_BRANCH_MISSES] = { 0x27, CNTR_ODD, T },
};
static const struct mips_perf_event i6x00_event_map[PERF_COUNT_HW_MAX] = {
[PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD },
[PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD },
/* These only count dcache, not icache */
[PERF_COUNT_HW_CACHE_REFERENCES] = { 0x45, CNTR_EVEN | CNTR_ODD },
[PERF_COUNT_HW_CACHE_MISSES] = { 0x48, CNTR_EVEN | CNTR_ODD },
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x15, CNTR_EVEN | CNTR_ODD },
[PERF_COUNT_HW_BRANCH_MISSES] = { 0x16, CNTR_EVEN | CNTR_ODD },
};
static const struct mips_perf_event loongson3_event_map1[PERF_COUNT_HW_MAX] = {
[PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN },
[PERF_COUNT_HW_INSTRUCTIONS] = { 0x00, CNTR_ODD },
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x01, CNTR_EVEN },
[PERF_COUNT_HW_BRANCH_MISSES] = { 0x01, CNTR_ODD },
};
static const struct mips_perf_event loongson3_event_map2[PERF_COUNT_HW_MAX] = {
[PERF_COUNT_HW_CPU_CYCLES] = { 0x80, CNTR_ALL },
[PERF_COUNT_HW_INSTRUCTIONS] = { 0x81, CNTR_ALL },
[PERF_COUNT_HW_CACHE_MISSES] = { 0x18, CNTR_ALL },
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x94, CNTR_ALL },
[PERF_COUNT_HW_BRANCH_MISSES] = { 0x9c, CNTR_ALL },
};
static const struct mips_perf_event loongson3_event_map3[PERF_COUNT_HW_MAX] = {
[PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_ALL },
[PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_ALL },
[PERF_COUNT_HW_CACHE_REFERENCES] = { 0x1c, CNTR_ALL },
[PERF_COUNT_HW_CACHE_MISSES] = { 0x1d, CNTR_ALL },
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x02, CNTR_ALL },
[PERF_COUNT_HW_BRANCH_MISSES] = { 0x08, CNTR_ALL },
};
static const struct mips_perf_event octeon_event_map[PERF_COUNT_HW_MAX] = {
[PERF_COUNT_HW_CPU_CYCLES] = { 0x01, CNTR_ALL },
[PERF_COUNT_HW_INSTRUCTIONS] = { 0x03, CNTR_ALL },
[PERF_COUNT_HW_CACHE_REFERENCES] = { 0x2b, CNTR_ALL },
[PERF_COUNT_HW_CACHE_MISSES] = { 0x2e, CNTR_ALL },
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x08, CNTR_ALL },
[PERF_COUNT_HW_BRANCH_MISSES] = { 0x09, CNTR_ALL },
[PERF_COUNT_HW_BUS_CYCLES] = { 0x25, CNTR_ALL },
};
static const struct mips_perf_event bmips5000_event_map
[PERF_COUNT_HW_MAX] = {
[PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, T },
[PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
[PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T },
};
/* 24K/34K/1004K/interAptiv/loongson1 cores share the same cache event map. */
static const struct mips_perf_event mipsxxcore_cache_map
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
[C(L1D)] = {
/*
* Like some other architectures (e.g. ARM), the performance
* counters don't differentiate between read and write
* accesses/misses, so this isn't strictly correct, but it's the
* best we can do. Writes and reads get combined.
*/
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 0x0a, CNTR_EVEN, T },
[C(RESULT_MISS)] = { 0x0b, CNTR_EVEN | CNTR_ODD, T },
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { 0x0a, CNTR_EVEN, T },
[C(RESULT_MISS)] = { 0x0b, CNTR_EVEN | CNTR_ODD, T },
},
},
[C(L1I)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 0x09, CNTR_EVEN, T },
[C(RESULT_MISS)] = { 0x09, CNTR_ODD, T },
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { 0x09, CNTR_EVEN, T },
[C(RESULT_MISS)] = { 0x09, CNTR_ODD, T },
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = { 0x14, CNTR_EVEN, T },
/*
* Note that MIPS has only "hit" events countable for
* the prefetch operation.
*/
},
},
[C(LL)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 0x15, CNTR_ODD, P },
[C(RESULT_MISS)] = { 0x16, CNTR_EVEN, P },
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { 0x15, CNTR_ODD, P },
[C(RESULT_MISS)] = { 0x16, CNTR_EVEN, P },
},
},
[C(DTLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
[C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
[C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
},
},
[C(ITLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 0x05, CNTR_EVEN, T },
[C(RESULT_MISS)] = { 0x05, CNTR_ODD, T },
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { 0x05, CNTR_EVEN, T },
[C(RESULT_MISS)] = { 0x05, CNTR_ODD, T },
},
},
[C(BPU)] = {
/* Using the same code for *HW_BRANCH* */
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN, T },
[C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN, T },
[C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
},
},
};
/* 74K/proAptiv core has completely different cache event map. */
static const struct mips_perf_event mipsxxcore_cache_map2
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
[C(L1D)] = {
/*
* Like some other architectures (e.g. ARM), the performance
* counters don't differentiate between read and write
* accesses/misses, so this isn't strictly correct, but it's the
* best we can do. Writes and reads get combined.
*/
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 0x17, CNTR_ODD, T },
[C(RESULT_MISS)] = { 0x18, CNTR_ODD, T },
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { 0x17, CNTR_ODD, T },
[C(RESULT_MISS)] = { 0x18, CNTR_ODD, T },
},
},
[C(L1I)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
[C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
[C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = { 0x34, CNTR_EVEN, T },
/*
* Note that MIPS has only "hit" events countable for
* the prefetch operation.
*/
},
},
[C(LL)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P },
[C(RESULT_MISS)] = { 0x1d, CNTR_EVEN, P },
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P },
[C(RESULT_MISS)] = { 0x1d, CNTR_EVEN, P },
},
},
/*
* 74K core does not have specific DTLB events. proAptiv core has
* "speculative" DTLB events which are numbered 0x63 (even/odd) and
* not included here. One can use raw events if really needed.
*/
[C(ITLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 0x04, CNTR_EVEN, T },
[C(RESULT_MISS)] = { 0x04, CNTR_ODD, T },
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { 0x04, CNTR_EVEN, T },
[C(RESULT_MISS)] = { 0x04, CNTR_ODD, T },
},
},
[C(BPU)] = {
/* Using the same code for *HW_BRANCH* */
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 0x27, CNTR_EVEN, T },
[C(RESULT_MISS)] = { 0x27, CNTR_ODD, T },
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { 0x27, CNTR_EVEN, T },
[C(RESULT_MISS)] = { 0x27, CNTR_ODD, T },
},
},
};
static const struct mips_perf_event i6x00_cache_map
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
[C(L1D)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 0x46, CNTR_EVEN | CNTR_ODD },
[C(RESULT_MISS)] = { 0x49, CNTR_EVEN | CNTR_ODD },
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { 0x47, CNTR_EVEN | CNTR_ODD },
[C(RESULT_MISS)] = { 0x4a, CNTR_EVEN | CNTR_ODD },
},
},
[C(L1I)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 0x84, CNTR_EVEN | CNTR_ODD },
[C(RESULT_MISS)] = { 0x85, CNTR_EVEN | CNTR_ODD },
},
},
[C(DTLB)] = {
/* Can't distinguish read & write */
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 0x40, CNTR_EVEN | CNTR_ODD },
[C(RESULT_MISS)] = { 0x41, CNTR_EVEN | CNTR_ODD },
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { 0x40, CNTR_EVEN | CNTR_ODD },
[C(RESULT_MISS)] = { 0x41, CNTR_EVEN | CNTR_ODD },
},
},
[C(BPU)] = {
/* Conditional branches / mispredicted */
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 0x15, CNTR_EVEN | CNTR_ODD },
[C(RESULT_MISS)] = { 0x16, CNTR_EVEN | CNTR_ODD },
},
},
};
static const struct mips_perf_event loongson3_cache_map1
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
[C(L1D)] = {
/*
* Like some other architectures (e.g. ARM), the performance
* counters don't differentiate between read and write
* accesses/misses, so this isn't strictly correct, but it's the
* best we can do. Writes and reads get combined.
*/
[C(OP_READ)] = {
[C(RESULT_MISS)] = { 0x04, CNTR_ODD },
},
[C(OP_WRITE)] = {
[C(RESULT_MISS)] = { 0x04, CNTR_ODD },
},
},
[C(L1I)] = {
[C(OP_READ)] = {
[C(RESULT_MISS)] = { 0x04, CNTR_EVEN },
},
[C(OP_WRITE)] = {
[C(RESULT_MISS)] = { 0x04, CNTR_EVEN },
},
},
[C(DTLB)] = {
[C(OP_READ)] = {
[C(RESULT_MISS)] = { 0x09, CNTR_ODD },
},
[C(OP_WRITE)] = {
[C(RESULT_MISS)] = { 0x09, CNTR_ODD },
},
},
[C(ITLB)] = {
[C(OP_READ)] = {
[C(RESULT_MISS)] = { 0x0c, CNTR_ODD },
},
[C(OP_WRITE)] = {
[C(RESULT_MISS)] = { 0x0c, CNTR_ODD },
},
},
[C(BPU)] = {
/* Using the same code for *HW_BRANCH* */
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 0x01, CNTR_EVEN },
[C(RESULT_MISS)] = { 0x01, CNTR_ODD },
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { 0x01, CNTR_EVEN },
[C(RESULT_MISS)] = { 0x01, CNTR_ODD },
},
},
};
static const struct mips_perf_event loongson3_cache_map2
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
[C(L1D)] = {
/*
* Like some other architectures (e.g. ARM), the performance
* counters don't differentiate between read and write
* accesses/misses, so this isn't strictly correct, but it's the
* best we can do. Writes and reads get combined.
*/
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 0x156, CNTR_ALL },
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { 0x155, CNTR_ALL },
[C(RESULT_MISS)] = { 0x153, CNTR_ALL },
},
},
[C(L1I)] = {
[C(OP_READ)] = {
[C(RESULT_MISS)] = { 0x18, CNTR_ALL },
},
[C(OP_WRITE)] = {
[C(RESULT_MISS)] = { 0x18, CNTR_ALL },
},
},
[C(LL)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 0x1b6, CNTR_ALL },
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { 0x1b7, CNTR_ALL },
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = { 0x1bf, CNTR_ALL },
},
},
[C(DTLB)] = {
[C(OP_READ)] = {
[C(RESULT_MISS)] = { 0x92, CNTR_ALL },
},
[C(OP_WRITE)] = {
[C(RESULT_MISS)] = { 0x92, CNTR_ALL },
},
},
[C(ITLB)] = {
[C(OP_READ)] = {
[C(RESULT_MISS)] = { 0x1a, CNTR_ALL },
},
[C(OP_WRITE)] = {
[C(RESULT_MISS)] = { 0x1a, CNTR_ALL },
},
},
[C(BPU)] = {
/* Using the same code for *HW_BRANCH* */
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 0x94, CNTR_ALL },
[C(RESULT_MISS)] = { 0x9c, CNTR_ALL },
},
},
};
static const struct mips_perf_event loongson3_cache_map3
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
[C(L1D)] = {
/*
* Like some other architectures (e.g. ARM), the performance
* counters don't differentiate between read and write
* accesses/misses, so this isn't strictly correct, but it's the
* best we can do. Writes and reads get combined.
*/
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 0x1e, CNTR_ALL },
[C(RESULT_MISS)] = { 0x1f, CNTR_ALL },
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = { 0xaa, CNTR_ALL },
[C(RESULT_MISS)] = { 0xa9, CNTR_ALL },
},
},
[C(L1I)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 0x1c, CNTR_ALL },
[C(RESULT_MISS)] = { 0x1d, CNTR_ALL },
},
},
[C(LL)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 0x2e, CNTR_ALL },
[C(RESULT_MISS)] = { 0x2f, CNTR_ALL },
},
},
[C(DTLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 0x14, CNTR_ALL },
[C(RESULT_MISS)] = { 0x1b, CNTR_ALL },
},
},
[C(ITLB)] = {
[C(OP_READ)] = {
[C(RESULT_MISS)] = { 0x1a, CNTR_ALL },
},
},
[C(BPU)] = {
/* Using the same code for *HW_BRANCH* */
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 0x02, CNTR_ALL },
[C(RESULT_MISS)] = { 0x08, CNTR_ALL },
},
},
};
/* BMIPS5000 */
static const struct mips_perf_event bmips5000_cache_map
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
[C(L1D)] = {
/*
* Like some other architectures (e.g. ARM), the performance
* counters don't differentiate between read and write
* accesses/misses, so this isn't strictly correct, but it's the
* best we can do. Writes and reads get combined.
*/
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 12, CNTR_EVEN, T },
[C(RESULT_MISS)] = { 12, CNTR_ODD, T },
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { 12, CNTR_EVEN, T },
[C(RESULT_MISS)] = { 12, CNTR_ODD, T },
},
},
[C(L1I)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 10, CNTR_EVEN, T },
[C(RESULT_MISS)] = { 10, CNTR_ODD, T },
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { 10, CNTR_EVEN, T },
[C(RESULT_MISS)] = { 10, CNTR_ODD, T },
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = { 23, CNTR_EVEN, T },
/*
* Note that MIPS has only "hit" events countable for
* the prefetch operation.
*/
},
},
[C(LL)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 28, CNTR_EVEN, P },
[C(RESULT_MISS)] = { 28, CNTR_ODD, P },
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { 28, CNTR_EVEN, P },
[C(RESULT_MISS)] = { 28, CNTR_ODD, P },
},
},
[C(BPU)] = {
/* Using the same code for *HW_BRANCH* */
[C(OP_READ)] = {
[C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
},
[C(OP_WRITE)] = {
[C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
},
},
};
static const struct mips_perf_event octeon_cache_map
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
[C(L1D)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 0x2b, CNTR_ALL },
[C(RESULT_MISS)] = { 0x2e, CNTR_ALL },
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { 0x30, CNTR_ALL },
},
},
[C(L1I)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 0x18, CNTR_ALL },
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = { 0x19, CNTR_ALL },
},
},
[C(DTLB)] = {
/*
* Only general DTLB misses are counted use the same event for
* read and write.
*/
[C(OP_READ)] = {
[C(RESULT_MISS)] = { 0x35, CNTR_ALL },
},
[C(OP_WRITE)] = {
[C(RESULT_MISS)] = { 0x35, CNTR_ALL },
},
},
[C(ITLB)] = {
[C(OP_READ)] = {
[C(RESULT_MISS)] = { 0x37, CNTR_ALL },
},
},
};
static int __hw_perf_event_init(struct perf_event *event)
{
struct perf_event_attr *attr = &event->attr;
struct hw_perf_event *hwc = &event->hw;
const struct mips_perf_event *pev;
int err;
/* Returning MIPS event descriptor for generic perf event. */
if (PERF_TYPE_HARDWARE == event->attr.type) {
if (event->attr.config >= PERF_COUNT_HW_MAX)
return -EINVAL;
pev = mipspmu_map_general_event(event->attr.config);
} else if (PERF_TYPE_HW_CACHE == event->attr.type) {
pev = mipspmu_map_cache_event(event->attr.config);
} else if (PERF_TYPE_RAW == event->attr.type) {
/* We are working on the global raw event. */
mutex_lock(&raw_event_mutex);
pev = mipspmu.map_raw_event(event->attr.config);
} else {
/* The event type is not (yet) supported. */
return -EOPNOTSUPP;
}
if (IS_ERR(pev)) {
if (PERF_TYPE_RAW == event->attr.type)
mutex_unlock(&raw_event_mutex);
return PTR_ERR(pev);
}
/*
* We allow max flexibility on how each individual counter shared
* by the single CPU operates (the mode exclusion and the range).
*/
hwc->config_base = MIPS_PERFCTRL_IE;
hwc->event_base = mipspmu_perf_event_encode(pev);
if (PERF_TYPE_RAW == event->attr.type)
mutex_unlock(&raw_event_mutex);
if (!attr->exclude_user)
hwc->config_base |= MIPS_PERFCTRL_U;
if (!attr->exclude_kernel) {
hwc->config_base |= MIPS_PERFCTRL_K;
/* MIPS kernel mode: KSU == 00b || EXL == 1 || ERL == 1 */
hwc->config_base |= MIPS_PERFCTRL_EXL;
}
if (!attr->exclude_hv)
hwc->config_base |= MIPS_PERFCTRL_S;
hwc->config_base &= M_PERFCTL_CONFIG_MASK;
/*
* The event can belong to another cpu. We do not assign a local
* counter for it for now.
*/
hwc->idx = -1;
hwc->config = 0;
if (!hwc->sample_period) {
hwc->sample_period = mipspmu.max_period;
hwc->last_period = hwc->sample_period;
local64_set(&hwc->period_left, hwc->sample_period);
}
err = 0;
if (event->group_leader != event)
err = validate_group(event);
event->destroy = hw_perf_event_destroy;
if (err)
event->destroy(event);
return err;
}
static void pause_local_counters(void)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int ctr = mipspmu.num_counters;
unsigned long flags;
local_irq_save(flags);
do {
ctr--;
cpuc->saved_ctrl[ctr] = mipsxx_pmu_read_control(ctr);
mipsxx_pmu_write_control(ctr, cpuc->saved_ctrl[ctr] &
~M_PERFCTL_COUNT_EVENT_WHENEVER);
} while (ctr > 0);
local_irq_restore(flags);
}
static void resume_local_counters(void)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int ctr = mipspmu.num_counters;
do {
ctr--;
mipsxx_pmu_write_control(ctr, cpuc->saved_ctrl[ctr]);
} while (ctr > 0);
}
static int mipsxx_pmu_handle_shared_irq(void)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct perf_sample_data data;
unsigned int counters = mipspmu.num_counters;
u64 counter;
int n, handled = IRQ_NONE;
struct pt_regs *regs;
if (cpu_has_perf_cntr_intr_bit && !(read_c0_cause() & CAUSEF_PCI))
return handled;
/*
* First we pause the local counters, so that when we are locked
* here, the counters are all paused. When it gets locked due to
* perf_disable(), the timer interrupt handler will be delayed.
*
* See also mipsxx_pmu_start().
*/
pause_local_counters();
#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
read_lock(&pmuint_rwlock);
#endif
regs = get_irq_regs();
perf_sample_data_init(&data, 0, 0);
for (n = counters - 1; n >= 0; n--) {
if (!test_bit(n, cpuc->used_mask))
continue;
counter = mipspmu.read_counter(n);
if (!(counter & mipspmu.overflow))
continue;
handle_associated_event(cpuc, n, &data, regs);
handled = IRQ_HANDLED;
}
#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
read_unlock(&pmuint_rwlock);
#endif
resume_local_counters();
/*
* Do all the work for the pending perf events. We can do this
* in here because the performance counter interrupt is a regular
* interrupt, not NMI.
*/
if (handled == IRQ_HANDLED)
irq_work_run();
return handled;
}
static irqreturn_t mipsxx_pmu_handle_irq(int irq, void *dev)
{
return mipsxx_pmu_handle_shared_irq();
}
/* 24K */
#define IS_BOTH_COUNTERS_24K_EVENT(b) \
((b) == 0 || (b) == 1 || (b) == 11)
/* 34K */
#define IS_BOTH_COUNTERS_34K_EVENT(b) \
((b) == 0 || (b) == 1 || (b) == 11)
#ifdef CONFIG_MIPS_MT_SMP
#define IS_RANGE_P_34K_EVENT(r, b) \
((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \
(b) == 25 || (b) == 39 || (r) == 44 || (r) == 174 || \
(r) == 176 || ((b) >= 50 && (b) <= 55) || \
((b) >= 64 && (b) <= 67))
#define IS_RANGE_V_34K_EVENT(r) ((r) == 47)
#endif
/* 74K */
#define IS_BOTH_COUNTERS_74K_EVENT(b) \
((b) == 0 || (b) == 1)
/* proAptiv */
#define IS_BOTH_COUNTERS_PROAPTIV_EVENT(b) \
((b) == 0 || (b) == 1)
/* P5600 */
#define IS_BOTH_COUNTERS_P5600_EVENT(b) \
((b) == 0 || (b) == 1)
/* 1004K */
#define IS_BOTH_COUNTERS_1004K_EVENT(b) \
((b) == 0 || (b) == 1 || (b) == 11)
#ifdef CONFIG_MIPS_MT_SMP
#define IS_RANGE_P_1004K_EVENT(r, b) \
((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \
(b) == 25 || (b) == 36 || (b) == 39 || (r) == 44 || \
(r) == 174 || (r) == 176 || ((b) >= 50 && (b) <= 59) || \
(r) == 188 || (b) == 61 || (b) == 62 || \
((b) >= 64 && (b) <= 67))
#define IS_RANGE_V_1004K_EVENT(r) ((r) == 47)
#endif
/* interAptiv */
#define IS_BOTH_COUNTERS_INTERAPTIV_EVENT(b) \
((b) == 0 || (b) == 1 || (b) == 11)
#ifdef CONFIG_MIPS_MT_SMP
/* The P/V/T info is not provided for "(b) == 38" in SUM, assume P. */
#define IS_RANGE_P_INTERAPTIV_EVENT(r, b) \
((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \
(b) == 25 || (b) == 36 || (b) == 38 || (b) == 39 || \
(r) == 44 || (r) == 174 || (r) == 176 || ((b) >= 50 && \
(b) <= 59) || (r) == 188 || (b) == 61 || (b) == 62 || \
((b) >= 64 && (b) <= 67))
#define IS_RANGE_V_INTERAPTIV_EVENT(r) ((r) == 47 || (r) == 175)
#endif
/* BMIPS5000 */
#define IS_BOTH_COUNTERS_BMIPS5000_EVENT(b) \
((b) == 0 || (b) == 1)
/*
* For most cores the user can use 0-255 raw events, where 0-127 for the events
* of even counters, and 128-255 for odd counters. Note that bit 7 is used to
* indicate the even/odd bank selector. So, for example, when user wants to take
* the Event Num of 15 for odd counters (by referring to the user manual), then
* 128 needs to be added to 15 as the input for the event config, i.e., 143 (0x8F)
* to be used.
*
* Some newer cores have even more events, in which case the user can use raw
* events 0-511, where 0-255 are for the events of even counters, and 256-511
* are for odd counters, so bit 8 is used to indicate the even/odd bank selector.
*/
static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
{
/* currently most cores have 7-bit event numbers */
int pmu_type;
unsigned int raw_id = config & 0xff;
unsigned int base_id = raw_id & 0x7f;
switch (current_cpu_type()) {
case CPU_24K:
if (IS_BOTH_COUNTERS_24K_EVENT(base_id))
raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
else
raw_event.cntr_mask =
raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
#ifdef CONFIG_MIPS_MT_SMP
/*
* This is actually doing nothing. Non-multithreading
* CPUs will not check and calculate the range.
*/
raw_event.range = P;
#endif
break;
case CPU_34K:
if (IS_BOTH_COUNTERS_34K_EVENT(base_id))
raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
else
raw_event.cntr_mask =
raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
#ifdef CONFIG_MIPS_MT_SMP
if (IS_RANGE_P_34K_EVENT(raw_id, base_id))
raw_event.range = P;
else if (unlikely(IS_RANGE_V_34K_EVENT(raw_id)))
raw_event.range = V;
else
raw_event.range = T;
#endif
break;
case CPU_74K:
case CPU_1074K:
if (IS_BOTH_COUNTERS_74K_EVENT(base_id))
raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
else
raw_event.cntr_mask =
raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
#ifdef CONFIG_MIPS_MT_SMP
raw_event.range = P;
#endif
break;
case CPU_PROAPTIV:
if (IS_BOTH_COUNTERS_PROAPTIV_EVENT(base_id))
raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
else
raw_event.cntr_mask =
raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
#ifdef CONFIG_MIPS_MT_SMP
raw_event.range = P;
#endif
break;
case CPU_P5600:
case CPU_P6600:
/* 8-bit event numbers */
raw_id = config & 0x1ff;
base_id = raw_id & 0xff;
if (IS_BOTH_COUNTERS_P5600_EVENT(base_id))
raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
else
raw_event.cntr_mask =
raw_id > 255 ? CNTR_ODD : CNTR_EVEN;
#ifdef CONFIG_MIPS_MT_SMP
raw_event.range = P;
#endif
break;
case CPU_I6400:
case CPU_I6500:
/* 8-bit event numbers */
base_id = config & 0xff;
raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
break;
case CPU_1004K:
if (IS_BOTH_COUNTERS_1004K_EVENT(base_id))
raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
else
raw_event.cntr_mask =
raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
#ifdef CONFIG_MIPS_MT_SMP
if (IS_RANGE_P_1004K_EVENT(raw_id, base_id))
raw_event.range = P;
else if (unlikely(IS_RANGE_V_1004K_EVENT(raw_id)))
raw_event.range = V;
else
raw_event.range = T;
#endif
break;
case CPU_INTERAPTIV:
if (IS_BOTH_COUNTERS_INTERAPTIV_EVENT(base_id))
raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
else
raw_event.cntr_mask =
raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
#ifdef CONFIG_MIPS_MT_SMP
if (IS_RANGE_P_INTERAPTIV_EVENT(raw_id, base_id))
raw_event.range = P;
else if (unlikely(IS_RANGE_V_INTERAPTIV_EVENT(raw_id)))
raw_event.range = V;
else
raw_event.range = T;
#endif
break;
case CPU_BMIPS5000:
if (IS_BOTH_COUNTERS_BMIPS5000_EVENT(base_id))
raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
else
raw_event.cntr_mask =
raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
break;
case CPU_LOONGSON64:
pmu_type = get_loongson3_pmu_type();
switch (pmu_type) {
case LOONGSON_PMU_TYPE1:
raw_event.cntr_mask =
raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
break;
case LOONGSON_PMU_TYPE2:
base_id = config & 0x3ff;
raw_event.cntr_mask = CNTR_ALL;
if ((base_id >= 1 && base_id < 28) ||
(base_id >= 64 && base_id < 90) ||
(base_id >= 128 && base_id < 164) ||
(base_id >= 192 && base_id < 200) ||
(base_id >= 256 && base_id < 275) ||
(base_id >= 320 && base_id < 361) ||
(base_id >= 384 && base_id < 574))
break;
return ERR_PTR(-EOPNOTSUPP);
case LOONGSON_PMU_TYPE3:
base_id = raw_id;
raw_event.cntr_mask = CNTR_ALL;
break;
}
break;
}
raw_event.event_id = base_id;
return &raw_event;
}
static const struct mips_perf_event *octeon_pmu_map_raw_event(u64 config)
{
unsigned int base_id = config & 0x7f;
unsigned int event_max;
raw_event.cntr_mask = CNTR_ALL;
raw_event.event_id = base_id;
if (current_cpu_type() == CPU_CAVIUM_OCTEON3)
event_max = 0x5f;
else if (current_cpu_type() == CPU_CAVIUM_OCTEON2)
event_max = 0x42;
else
event_max = 0x3a;
if (base_id > event_max) {
return ERR_PTR(-EOPNOTSUPP);
}
switch (base_id) {
case 0x00:
case 0x0f:
case 0x1e:
case 0x1f:
case 0x2f:
case 0x34:
case 0x3e ... 0x3f:
return ERR_PTR(-EOPNOTSUPP);
default:
break;
}
return &raw_event;
}
static int __init
init_hw_perf_events(void)
{
int counters, irq, pmu_type;
pr_info("Performance counters: ");
counters = n_counters();
if (counters == 0) {
pr_cont("No available PMU.\n");
return -ENODEV;
}
#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
if (!cpu_has_mipsmt_pertccounters)
counters = counters_total_to_per_cpu(counters);
#endif
if (get_c0_perfcount_int)
irq = get_c0_perfcount_int();
else if (cp0_perfcount_irq >= 0)
irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
else
irq = -1;
mipspmu.map_raw_event = mipsxx_pmu_map_raw_event;
switch (current_cpu_type()) {
case CPU_24K:
mipspmu.name = "mips/24K";
mipspmu.general_event_map = &mipsxxcore_event_map;
mipspmu.cache_event_map = &mipsxxcore_cache_map;
break;
case CPU_34K:
mipspmu.name = "mips/34K";
mipspmu.general_event_map = &mipsxxcore_event_map;
mipspmu.cache_event_map = &mipsxxcore_cache_map;
break;
case CPU_74K:
mipspmu.name = "mips/74K";
mipspmu.general_event_map = &mipsxxcore_event_map2;
mipspmu.cache_event_map = &mipsxxcore_cache_map2;
break;
case CPU_PROAPTIV:
mipspmu.name = "mips/proAptiv";
mipspmu.general_event_map = &mipsxxcore_event_map2;
mipspmu.cache_event_map = &mipsxxcore_cache_map2;
break;
case CPU_P5600:
mipspmu.name = "mips/P5600";
mipspmu.general_event_map = &mipsxxcore_event_map2;
mipspmu.cache_event_map = &mipsxxcore_cache_map2;
break;
case CPU_P6600:
mipspmu.name = "mips/P6600";
mipspmu.general_event_map = &mipsxxcore_event_map2;
mipspmu.cache_event_map = &mipsxxcore_cache_map2;
break;
case CPU_I6400:
mipspmu.name = "mips/I6400";
mipspmu.general_event_map = &i6x00_event_map;
mipspmu.cache_event_map = &i6x00_cache_map;
break;
case CPU_I6500:
mipspmu.name = "mips/I6500";
mipspmu.general_event_map = &i6x00_event_map;
mipspmu.cache_event_map = &i6x00_cache_map;
break;
case CPU_1004K:
mipspmu.name = "mips/1004K";
mipspmu.general_event_map = &mipsxxcore_event_map;
mipspmu.cache_event_map = &mipsxxcore_cache_map;
break;
case CPU_1074K:
mipspmu.name = "mips/1074K";
mipspmu.general_event_map = &mipsxxcore_event_map;
mipspmu.cache_event_map = &mipsxxcore_cache_map;
break;
case CPU_INTERAPTIV:
mipspmu.name = "mips/interAptiv";
mipspmu.general_event_map = &mipsxxcore_event_map;
mipspmu.cache_event_map = &mipsxxcore_cache_map;
break;
case CPU_LOONGSON32:
mipspmu.name = "mips/loongson1";
mipspmu.general_event_map = &mipsxxcore_event_map;
mipspmu.cache_event_map = &mipsxxcore_cache_map;
break;
case CPU_LOONGSON64:
mipspmu.name = "mips/loongson3";
pmu_type = get_loongson3_pmu_type();
switch (pmu_type) {
case LOONGSON_PMU_TYPE1:
counters = 2;
mipspmu.general_event_map = &loongson3_event_map1;
mipspmu.cache_event_map = &loongson3_cache_map1;
break;
case LOONGSON_PMU_TYPE2:
counters = 4;
mipspmu.general_event_map = &loongson3_event_map2;
mipspmu.cache_event_map = &loongson3_cache_map2;
break;
case LOONGSON_PMU_TYPE3:
counters = 4;
mipspmu.general_event_map = &loongson3_event_map3;
mipspmu.cache_event_map = &loongson3_cache_map3;
break;
}
break;
case CPU_CAVIUM_OCTEON:
case CPU_CAVIUM_OCTEON_PLUS:
case CPU_CAVIUM_OCTEON2:
case CPU_CAVIUM_OCTEON3:
mipspmu.name = "octeon";
mipspmu.general_event_map = &octeon_event_map;
mipspmu.cache_event_map = &octeon_cache_map;
mipspmu.map_raw_event = octeon_pmu_map_raw_event;
break;
case CPU_BMIPS5000:
mipspmu.name = "BMIPS5000";
mipspmu.general_event_map = &bmips5000_event_map;
mipspmu.cache_event_map = &bmips5000_cache_map;
break;
default:
pr_cont("Either hardware does not support performance "
"counters, or not yet implemented.\n");
return -ENODEV;
}
mipspmu.num_counters = counters;
mipspmu.irq = irq;
if (read_c0_perfctrl0() & MIPS_PERFCTRL_W) {
if (get_loongson3_pmu_type() == LOONGSON_PMU_TYPE2) {
counter_bits = 48;
mipspmu.max_period = (1ULL << 47) - 1;
mipspmu.valid_count = (1ULL << 47) - 1;
mipspmu.overflow = 1ULL << 47;
} else {
counter_bits = 64;
mipspmu.max_period = (1ULL << 63) - 1;
mipspmu.valid_count = (1ULL << 63) - 1;
mipspmu.overflow = 1ULL << 63;
}
mipspmu.read_counter = mipsxx_pmu_read_counter_64;
mipspmu.write_counter = mipsxx_pmu_write_counter_64;
} else {
counter_bits = 32;
mipspmu.max_period = (1ULL << 31) - 1;
mipspmu.valid_count = (1ULL << 31) - 1;
mipspmu.overflow = 1ULL << 31;
mipspmu.read_counter = mipsxx_pmu_read_counter;
mipspmu.write_counter = mipsxx_pmu_write_counter;
}
on_each_cpu(reset_counters, (void *)(long)counters, 1);
pr_cont("%s PMU enabled, %d %d-bit counters available to each "
"CPU, irq %d%s\n", mipspmu.name, counters, counter_bits, irq,
irq < 0 ? " (share with timer interrupt)" : "");
perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
return 0;
}
early_initcall(init_hw_perf_events);
| linux-master | arch/mips/kernel/perf_event_mipsxx.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Support for Kernel relocation at boot time
*
* Copyright (C) 2015, Imagination Technologies Ltd.
* Authors: Matt Redfearn ([email protected])
*/
#include <asm/bootinfo.h>
#include <asm/cacheflush.h>
#include <asm/fw/fw.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/timex.h>
#include <linux/elf.h>
#include <linux/kernel.h>
#include <linux/libfdt.h>
#include <linux/of_fdt.h>
#include <linux/panic_notifier.h>
#include <linux/sched/task.h>
#include <linux/start_kernel.h>
#include <linux/string.h>
#include <linux/printk.h>
#define RELOCATED(x) ((void *)((long)x + offset))
extern u32 _relocation_start[]; /* End kernel image / start relocation table */
extern u32 _relocation_end[]; /* End relocation table */
extern long __start___ex_table; /* Start exception table */
extern long __stop___ex_table; /* End exception table */
extern void __weak plat_fdt_relocated(void *new_location);
/*
* This function may be defined for a platform to perform any post-relocation
* fixup necessary.
* Return non-zero to abort relocation
*/
int __weak plat_post_relocation(long offset)
{
return 0;
}
static inline u32 __init get_synci_step(void)
{
u32 res;
__asm__("rdhwr %0, $1" : "=r" (res));
return res;
}
static void __init sync_icache(void *kbase, unsigned long kernel_length)
{
void *kend = kbase + kernel_length;
u32 step = get_synci_step();
do {
__asm__ __volatile__(
"synci 0(%0)"
: /* no output */
: "r" (kbase));
kbase += step;
} while (step && kbase < kend);
/* Completion barrier */
__sync();
}
static void __init apply_r_mips_64_rel(u32 *loc_new, long offset)
{
*(u64 *)loc_new += offset;
}
static void __init apply_r_mips_32_rel(u32 *loc_new, long offset)
{
*loc_new += offset;
}
static int __init apply_r_mips_26_rel(u32 *loc_orig, u32 *loc_new, long offset)
{
unsigned long target_addr = (*loc_orig) & 0x03ffffff;
if (offset % 4) {
pr_err("Dangerous R_MIPS_26 REL relocation\n");
return -ENOEXEC;
}
/* Original target address */
target_addr <<= 2;
target_addr += (unsigned long)loc_orig & 0xf0000000;
/* Get the new target address */
target_addr += offset;
if ((target_addr & 0xf0000000) != ((unsigned long)loc_new & 0xf0000000)) {
pr_err("R_MIPS_26 REL relocation overflow\n");
return -ENOEXEC;
}
target_addr -= (unsigned long)loc_new & 0xf0000000;
target_addr >>= 2;
*loc_new = (*loc_new & ~0x03ffffff) | (target_addr & 0x03ffffff);
return 0;
}
static void __init apply_r_mips_hi16_rel(u32 *loc_orig, u32 *loc_new,
long offset)
{
unsigned long insn = *loc_orig;
unsigned long target = (insn & 0xffff) << 16; /* high 16bits of target */
target += offset;
*loc_new = (insn & ~0xffff) | ((target >> 16) & 0xffff);
}
static int __init reloc_handler(u32 type, u32 *loc_orig, u32 *loc_new,
long offset)
{
switch (type) {
case R_MIPS_64:
apply_r_mips_64_rel(loc_new, offset);
break;
case R_MIPS_32:
apply_r_mips_32_rel(loc_new, offset);
break;
case R_MIPS_26:
return apply_r_mips_26_rel(loc_orig, loc_new, offset);
case R_MIPS_HI16:
apply_r_mips_hi16_rel(loc_orig, loc_new, offset);
break;
default:
pr_err("Unhandled relocation type %d at 0x%pK\n", type,
loc_orig);
return -ENOEXEC;
}
return 0;
}
static int __init do_relocations(void *kbase_old, void *kbase_new, long offset)
{
u32 *r;
u32 *loc_orig;
u32 *loc_new;
int type;
int res;
for (r = _relocation_start; r < _relocation_end; r++) {
/* Sentinel for last relocation */
if (*r == 0)
break;
type = (*r >> 24) & 0xff;
loc_orig = kbase_old + ((*r & 0x00ffffff) << 2);
loc_new = RELOCATED(loc_orig);
res = reloc_handler(type, loc_orig, loc_new, offset);
if (res)
return res;
}
return 0;
}
/*
* The exception table is filled in by the relocs tool after vmlinux is linked.
* It must be relocated separately since there will not be any relocation
* information for it filled in by the linker.
*/
static int __init relocate_exception_table(long offset)
{
unsigned long *etable_start, *etable_end, *e;
etable_start = RELOCATED(&__start___ex_table);
etable_end = RELOCATED(&__stop___ex_table);
for (e = etable_start; e < etable_end; e++)
*e += offset;
return 0;
}
#ifdef CONFIG_RANDOMIZE_BASE
static inline __init unsigned long rotate_xor(unsigned long hash,
const void *area, size_t size)
{
const typeof(hash) *ptr = PTR_ALIGN(area, sizeof(hash));
size_t diff, i;
diff = (void *)ptr - area;
if (unlikely(size < diff + sizeof(hash)))
return hash;
size = ALIGN_DOWN(size - diff, sizeof(hash));
for (i = 0; i < size / sizeof(hash); i++) {
/* Rotate by odd number of bits and XOR. */
hash = (hash << ((sizeof(hash) * 8) - 7)) | (hash >> 7);
hash ^= ptr[i];
}
return hash;
}
static inline __init unsigned long get_random_boot(void)
{
unsigned long entropy = random_get_entropy();
unsigned long hash = 0;
/* Attempt to create a simple but unpredictable starting entropy. */
hash = rotate_xor(hash, linux_banner, strlen(linux_banner));
/* Add in any runtime entropy we can get */
hash = rotate_xor(hash, &entropy, sizeof(entropy));
#if defined(CONFIG_USE_OF)
/* Get any additional entropy passed in device tree */
if (initial_boot_params) {
int node, len;
u64 *prop;
node = fdt_path_offset(initial_boot_params, "/chosen");
if (node >= 0) {
prop = fdt_getprop_w(initial_boot_params, node,
"kaslr-seed", &len);
if (prop && (len == sizeof(u64)))
hash = rotate_xor(hash, prop, sizeof(*prop));
}
}
#endif /* CONFIG_USE_OF */
return hash;
}
static inline __init bool kaslr_disabled(void)
{
char *str;
#if defined(CONFIG_CMDLINE_BOOL)
const char *builtin_cmdline = CONFIG_CMDLINE;
str = strstr(builtin_cmdline, "nokaslr");
if (str == builtin_cmdline ||
(str > builtin_cmdline && *(str - 1) == ' '))
return true;
#endif
str = strstr(arcs_cmdline, "nokaslr");
if (str == arcs_cmdline || (str > arcs_cmdline && *(str - 1) == ' '))
return true;
return false;
}
static inline void __init *determine_relocation_address(void)
{
/* Choose a new address for the kernel */
unsigned long kernel_length;
void *dest = &_text;
unsigned long offset;
if (kaslr_disabled())
return dest;
kernel_length = (long)_end - (long)(&_text);
offset = get_random_boot() << 16;
offset &= (CONFIG_RANDOMIZE_BASE_MAX_OFFSET - 1);
if (offset < kernel_length)
offset += ALIGN(kernel_length, 0xffff);
return RELOCATED(dest);
}
#else
static inline void __init *determine_relocation_address(void)
{
/*
* Choose a new address for the kernel
* For now we'll hard code the destination
*/
return (void *)0xffffffff81000000;
}
#endif
static inline int __init relocation_addr_valid(void *loc_new)
{
if ((unsigned long)loc_new & 0x0000ffff) {
/* Inappropriately aligned new location */
return 0;
}
if ((unsigned long)loc_new < (unsigned long)&_end) {
/* New location overlaps original kernel */
return 0;
}
return 1;
}
static inline void __init update_kaslr_offset(unsigned long *addr, long offset)
{
unsigned long *new_addr = (unsigned long *)RELOCATED(addr);
*new_addr = (unsigned long)offset;
}
#if defined(CONFIG_USE_OF)
void __weak *plat_get_fdt(void)
{
return NULL;
}
#endif
void *__init relocate_kernel(void)
{
void *loc_new;
unsigned long kernel_length;
unsigned long bss_length;
long offset = 0;
int res = 1;
/* Default to original kernel entry point */
void *kernel_entry = start_kernel;
void *fdt = NULL;
/* Get the command line */
fw_init_cmdline();
#if defined(CONFIG_USE_OF)
/* Deal with the device tree */
fdt = plat_get_fdt();
early_init_dt_scan(fdt);
if (boot_command_line[0]) {
/* Boot command line was passed in device tree */
strscpy(arcs_cmdline, boot_command_line, COMMAND_LINE_SIZE);
}
#endif /* CONFIG_USE_OF */
kernel_length = (long)(&_relocation_start) - (long)(&_text);
bss_length = (long)&__bss_stop - (long)&__bss_start;
loc_new = determine_relocation_address();
/* Sanity check relocation address */
if (relocation_addr_valid(loc_new))
offset = (unsigned long)loc_new - (unsigned long)(&_text);
/* Reset the command line now so we don't end up with a duplicate */
arcs_cmdline[0] = '\0';
if (offset) {
void (*fdt_relocated_)(void *) = NULL;
#if defined(CONFIG_USE_OF)
unsigned long fdt_phys = virt_to_phys(fdt);
/*
* If built-in dtb is used then it will have been relocated
* during kernel _text relocation. If appended DTB is used
* then it will not be relocated, but it should remain
* intact in the original location. If dtb is loaded by
* the bootloader then it may need to be moved if it crosses
* the target memory area
*/
if (fdt_phys >= virt_to_phys(RELOCATED(&_text)) &&
fdt_phys <= virt_to_phys(RELOCATED(&_end))) {
void *fdt_relocated =
RELOCATED(ALIGN((long)&_end, PAGE_SIZE));
memcpy(fdt_relocated, fdt, fdt_totalsize(fdt));
fdt = fdt_relocated;
fdt_relocated_ = RELOCATED(&plat_fdt_relocated);
}
#endif /* CONFIG_USE_OF */
/* Copy the kernel to it's new location */
memcpy(loc_new, &_text, kernel_length);
/* Perform relocations on the new kernel */
res = do_relocations(&_text, loc_new, offset);
if (res < 0)
goto out;
/* Sync the caches ready for execution of new kernel */
sync_icache(loc_new, kernel_length);
res = relocate_exception_table(offset);
if (res < 0)
goto out;
/*
* The original .bss has already been cleared, and
* some variables such as command line parameters
* stored to it so make a copy in the new location.
*/
memcpy(RELOCATED(&__bss_start), &__bss_start, bss_length);
/*
* If fdt was stored outside of the kernel image and
* had to be moved then update platform's state data
* with the new fdt location
*/
if (fdt_relocated_)
fdt_relocated_(fdt);
/*
* Last chance for the platform to abort relocation.
* This may also be used by the platform to perform any
* initialisation required now that the new kernel is
* resident in memory and ready to be executed.
*/
if (plat_post_relocation(offset))
goto out;
/* The current thread is now within the relocated image */
__current_thread_info = RELOCATED(&init_thread_union);
/* Return the new kernel's entry point */
kernel_entry = RELOCATED(start_kernel);
/* Error may occur before, so keep it at last */
update_kaslr_offset(&__kaslr_offset, offset);
}
out:
return kernel_entry;
}
/*
* Show relocation information on panic.
*/
static void show_kernel_relocation(const char *level)
{
if (__kaslr_offset > 0) {
printk(level);
pr_cont("Kernel relocated by 0x%pK\n", (void *)__kaslr_offset);
pr_cont(" .text @ 0x%pK\n", _text);
pr_cont(" .data @ 0x%pK\n", _sdata);
pr_cont(" .bss @ 0x%pK\n", __bss_start);
}
}
static int kernel_location_notifier_fn(struct notifier_block *self,
unsigned long v, void *p)
{
show_kernel_relocation(KERN_EMERG);
return NOTIFY_DONE;
}
static struct notifier_block kernel_location_notifier = {
.notifier_call = kernel_location_notifier_fn
};
static int __init register_kernel_offset_dumper(void)
{
atomic_notifier_chain_register(&panic_notifier_list,
&kernel_location_notifier);
return 0;
}
__initcall(register_kernel_offset_dumper);
| linux-master | arch/mips/kernel/relocate.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
*
* Copyright (C) 2001 Rusty Russell.
* Copyright (C) 2003, 2004 Ralf Baechle ([email protected])
* Copyright (C) 2005 Thiemo Seufer
*/
#undef DEBUG
#include <linux/extable.h>
#include <linux/moduleloader.h>
#include <linux/elf.h>
#include <linux/mm.h>
#include <linux/numa.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/jump_label.h>
extern void jump_label_apply_nops(struct module *mod);
struct mips_hi16 {
struct mips_hi16 *next;
Elf_Addr *addr;
Elf_Addr value;
};
static LIST_HEAD(dbe_list);
static DEFINE_SPINLOCK(dbe_lock);
#ifdef MODULE_START
void *module_alloc(unsigned long size)
{
return __vmalloc_node_range(size, 1, MODULE_START, MODULE_END,
GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE,
__builtin_return_address(0));
}
#endif
static void apply_r_mips_32(u32 *location, u32 base, Elf_Addr v)
{
*location = base + v;
}
static int apply_r_mips_26(struct module *me, u32 *location, u32 base,
Elf_Addr v)
{
if (v % 4) {
pr_err("module %s: dangerous R_MIPS_26 relocation\n",
me->name);
return -ENOEXEC;
}
if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) {
pr_err("module %s: relocation overflow\n",
me->name);
return -ENOEXEC;
}
*location = (*location & ~0x03ffffff) |
((base + (v >> 2)) & 0x03ffffff);
return 0;
}
static int apply_r_mips_hi16(struct module *me, u32 *location, Elf_Addr v,
bool rela)
{
struct mips_hi16 *n;
if (rela) {
*location = (*location & 0xffff0000) |
((((long long) v + 0x8000LL) >> 16) & 0xffff);
return 0;
}
/*
* We cannot relocate this one now because we don't know the value of
* the carry we need to add. Save the information, and let LO16 do the
* actual relocation.
*/
n = kmalloc(sizeof *n, GFP_KERNEL);
if (!n)
return -ENOMEM;
n->addr = (Elf_Addr *)location;
n->value = v;
n->next = me->arch.r_mips_hi16_list;
me->arch.r_mips_hi16_list = n;
return 0;
}
static void free_relocation_chain(struct mips_hi16 *l)
{
struct mips_hi16 *next;
while (l) {
next = l->next;
kfree(l);
l = next;
}
}
static int apply_r_mips_lo16(struct module *me, u32 *location,
u32 base, Elf_Addr v, bool rela)
{
unsigned long insnlo = base;
struct mips_hi16 *l;
Elf_Addr val, vallo;
if (rela) {
*location = (*location & 0xffff0000) | (v & 0xffff);
return 0;
}
/* Sign extend the addend we extract from the lo insn. */
vallo = ((insnlo & 0xffff) ^ 0x8000) - 0x8000;
if (me->arch.r_mips_hi16_list != NULL) {
l = me->arch.r_mips_hi16_list;
while (l != NULL) {
struct mips_hi16 *next;
unsigned long insn;
/*
* The value for the HI16 had best be the same.
*/
if (v != l->value)
goto out_danger;
/*
* Do the HI16 relocation. Note that we actually don't
* need to know anything about the LO16 itself, except
* where to find the low 16 bits of the addend needed
* by the LO16.
*/
insn = *l->addr;
val = ((insn & 0xffff) << 16) + vallo;
val += v;
/*
* Account for the sign extension that will happen in
* the low bits.
*/
val = ((val >> 16) + ((val & 0x8000) != 0)) & 0xffff;
insn = (insn & ~0xffff) | val;
*l->addr = insn;
next = l->next;
kfree(l);
l = next;
}
me->arch.r_mips_hi16_list = NULL;
}
/*
* Ok, we're done with the HI16 relocs. Now deal with the LO16.
*/
val = v + vallo;
insnlo = (insnlo & ~0xffff) | (val & 0xffff);
*location = insnlo;
return 0;
out_danger:
free_relocation_chain(l);
me->arch.r_mips_hi16_list = NULL;
pr_err("module %s: dangerous R_MIPS_LO16 relocation\n", me->name);
return -ENOEXEC;
}
static int apply_r_mips_pc(struct module *me, u32 *location, u32 base,
Elf_Addr v, unsigned int bits)
{
unsigned long mask = GENMASK(bits - 1, 0);
unsigned long se_bits;
long offset;
if (v % 4) {
pr_err("module %s: dangerous R_MIPS_PC%u relocation\n",
me->name, bits);
return -ENOEXEC;
}
/* retrieve & sign extend implicit addend if any */
offset = base & mask;
offset |= (offset & BIT(bits - 1)) ? ~mask : 0;
offset += ((long)v - (long)location) >> 2;
/* check the sign bit onwards are identical - ie. we didn't overflow */
se_bits = (offset & BIT(bits - 1)) ? ~0ul : 0;
if ((offset & ~mask) != (se_bits & ~mask)) {
pr_err("module %s: relocation overflow\n", me->name);
return -ENOEXEC;
}
*location = (*location & ~mask) | (offset & mask);
return 0;
}
static int apply_r_mips_pc16(struct module *me, u32 *location, u32 base,
Elf_Addr v)
{
return apply_r_mips_pc(me, location, base, v, 16);
}
static int apply_r_mips_pc21(struct module *me, u32 *location, u32 base,
Elf_Addr v)
{
return apply_r_mips_pc(me, location, base, v, 21);
}
static int apply_r_mips_pc26(struct module *me, u32 *location, u32 base,
Elf_Addr v)
{
return apply_r_mips_pc(me, location, base, v, 26);
}
static int apply_r_mips_64(u32 *location, Elf_Addr v, bool rela)
{
if (WARN_ON(!rela))
return -EINVAL;
*(Elf_Addr *)location = v;
return 0;
}
static int apply_r_mips_higher(u32 *location, Elf_Addr v, bool rela)
{
if (WARN_ON(!rela))
return -EINVAL;
*location = (*location & 0xffff0000) |
((((long long)v + 0x80008000LL) >> 32) & 0xffff);
return 0;
}
static int apply_r_mips_highest(u32 *location, Elf_Addr v, bool rela)
{
if (WARN_ON(!rela))
return -EINVAL;
*location = (*location & 0xffff0000) |
((((long long)v + 0x800080008000LL) >> 48) & 0xffff);
return 0;
}
/**
* reloc_handler() - Apply a particular relocation to a module
* @type: type of the relocation to apply
* @me: the module to apply the reloc to
* @location: the address at which the reloc is to be applied
* @base: the existing value at location for REL-style; 0 for RELA-style
* @v: the value of the reloc, with addend for RELA-style
* @rela: indication of is this a RELA (true) or REL (false) relocation
*
* Each implemented relocation function applies a particular type of
* relocation to the module @me. Relocs that may be found in either REL or RELA
* variants can be handled by making use of the @base & @v parameters which are
* set to values which abstract the difference away from the particular reloc
* implementations.
*
* Return: 0 upon success, else -ERRNO
*/
static int reloc_handler(u32 type, struct module *me, u32 *location, u32 base,
Elf_Addr v, bool rela)
{
switch (type) {
case R_MIPS_NONE:
break;
case R_MIPS_32:
apply_r_mips_32(location, base, v);
break;
case R_MIPS_26:
return apply_r_mips_26(me, location, base, v);
case R_MIPS_HI16:
return apply_r_mips_hi16(me, location, v, rela);
case R_MIPS_LO16:
return apply_r_mips_lo16(me, location, base, v, rela);
case R_MIPS_PC16:
return apply_r_mips_pc16(me, location, base, v);
case R_MIPS_PC21_S2:
return apply_r_mips_pc21(me, location, base, v);
case R_MIPS_PC26_S2:
return apply_r_mips_pc26(me, location, base, v);
case R_MIPS_64:
return apply_r_mips_64(location, v, rela);
case R_MIPS_HIGHER:
return apply_r_mips_higher(location, v, rela);
case R_MIPS_HIGHEST:
return apply_r_mips_highest(location, v, rela);
default:
pr_err("%s: Unknown relocation type %u\n", me->name, type);
return -EINVAL;
}
return 0;
}
static int __apply_relocate(Elf_Shdr *sechdrs, const char *strtab,
unsigned int symindex, unsigned int relsec,
struct module *me, bool rela)
{
union {
Elf_Mips_Rel *rel;
Elf_Mips_Rela *rela;
} r;
Elf_Sym *sym;
u32 *location, base;
unsigned int i, type;
Elf_Addr v;
int err = 0;
size_t reloc_sz;
pr_debug("Applying relocate section %u to %u\n", relsec,
sechdrs[relsec].sh_info);
r.rel = (void *)sechdrs[relsec].sh_addr;
reloc_sz = rela ? sizeof(*r.rela) : sizeof(*r.rel);
me->arch.r_mips_hi16_list = NULL;
for (i = 0; i < sechdrs[relsec].sh_size / reloc_sz; i++) {
/* This is where to make the change */
location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ r.rel->r_offset;
/* This is the symbol it is referring to */
sym = (Elf_Sym *)sechdrs[symindex].sh_addr
+ ELF_MIPS_R_SYM(*r.rel);
if (sym->st_value >= -MAX_ERRNO) {
/* Ignore unresolved weak symbol */
if (ELF_ST_BIND(sym->st_info) == STB_WEAK)
continue;
pr_warn("%s: Unknown symbol %s\n",
me->name, strtab + sym->st_name);
err = -ENOENT;
goto out;
}
type = ELF_MIPS_R_TYPE(*r.rel);
if (rela) {
v = sym->st_value + r.rela->r_addend;
base = 0;
r.rela = &r.rela[1];
} else {
v = sym->st_value;
base = *location;
r.rel = &r.rel[1];
}
err = reloc_handler(type, me, location, base, v, rela);
if (err)
goto out;
}
out:
/*
* Normally the hi16 list should be deallocated at this point. A
* malformed binary however could contain a series of R_MIPS_HI16
* relocations not followed by a R_MIPS_LO16 relocation, or if we hit
* an error processing a reloc we might have gotten here before
* reaching the R_MIPS_LO16. In either case, free up the list and
* return an error.
*/
if (me->arch.r_mips_hi16_list) {
free_relocation_chain(me->arch.r_mips_hi16_list);
me->arch.r_mips_hi16_list = NULL;
err = err ?: -ENOEXEC;
}
return err;
}
int apply_relocate(Elf_Shdr *sechdrs, const char *strtab,
unsigned int symindex, unsigned int relsec,
struct module *me)
{
return __apply_relocate(sechdrs, strtab, symindex, relsec, me, false);
}
#ifdef CONFIG_MODULES_USE_ELF_RELA
int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
unsigned int symindex, unsigned int relsec,
struct module *me)
{
return __apply_relocate(sechdrs, strtab, symindex, relsec, me, true);
}
#endif /* CONFIG_MODULES_USE_ELF_RELA */
/* Given an address, look for it in the module exception tables. */
const struct exception_table_entry *search_module_dbetables(unsigned long addr)
{
unsigned long flags;
const struct exception_table_entry *e = NULL;
struct mod_arch_specific *dbe;
spin_lock_irqsave(&dbe_lock, flags);
list_for_each_entry(dbe, &dbe_list, dbe_list) {
e = search_extable(dbe->dbe_start,
dbe->dbe_end - dbe->dbe_start, addr);
if (e)
break;
}
spin_unlock_irqrestore(&dbe_lock, flags);
/* Now, if we found one, we are running inside it now, hence
we cannot unload the module, hence no refcnt needed. */
return e;
}
/* Put in dbe list if necessary. */
int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
struct module *me)
{
const Elf_Shdr *s;
char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
if (IS_ENABLED(CONFIG_JUMP_LABEL))
jump_label_apply_nops(me);
INIT_LIST_HEAD(&me->arch.dbe_list);
for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
if (strcmp("__dbe_table", secstrings + s->sh_name) != 0)
continue;
me->arch.dbe_start = (void *)s->sh_addr;
me->arch.dbe_end = (void *)s->sh_addr + s->sh_size;
spin_lock_irq(&dbe_lock);
list_add(&me->arch.dbe_list, &dbe_list);
spin_unlock_irq(&dbe_lock);
}
return 0;
}
void module_arch_cleanup(struct module *mod)
{
spin_lock_irq(&dbe_lock);
list_del(&mod->arch.dbe_list);
spin_unlock_irq(&dbe_lock);
}
| linux-master | arch/mips/kernel/module.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2003 Broadcom Corporation
*/
#include <linux/cache.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/errno.h>
#include <linux/wait.h>
#include <linux/ptrace.h>
#include <linux/unistd.h>
#include <linux/compat.h>
#include <linux/bitops.h>
#include <asm/abi.h>
#include <asm/asm.h>
#include <asm/cacheflush.h>
#include <asm/compat-signal.h>
#include <asm/sim.h>
#include <linux/uaccess.h>
#include <asm/ucontext.h>
#include <asm/fpu.h>
#include <asm/cpu-features.h>
#include "signal-common.h"
/*
* Including <asm/unistd.h> would give use the 64-bit syscall numbers ...
*/
#define __NR_N32_restart_syscall 6214
extern int setup_sigcontext(struct pt_regs *, struct sigcontext __user *);
extern int restore_sigcontext(struct pt_regs *, struct sigcontext __user *);
struct ucontextn32 {
u32 uc_flags;
s32 uc_link;
compat_stack_t uc_stack;
struct sigcontext uc_mcontext;
compat_sigset_t uc_sigmask; /* mask last for extensibility */
};
struct rt_sigframe_n32 {
u32 rs_ass[4]; /* argument save space for o32 */
u32 rs_pad[2]; /* Was: signal trampoline */
struct compat_siginfo rs_info;
struct ucontextn32 rs_uc;
};
asmlinkage void sysn32_rt_sigreturn(void)
{
struct rt_sigframe_n32 __user *frame;
struct pt_regs *regs;
sigset_t set;
int sig;
regs = current_pt_regs();
frame = (struct rt_sigframe_n32 __user *)regs->regs[29];
if (!access_ok(frame, sizeof(*frame)))
goto badframe;
if (__copy_conv_sigset_from_user(&set, &frame->rs_uc.uc_sigmask))
goto badframe;
set_current_blocked(&set);
sig = restore_sigcontext(regs, &frame->rs_uc.uc_mcontext);
if (sig < 0)
goto badframe;
else if (sig)
force_sig(sig);
if (compat_restore_altstack(&frame->rs_uc.uc_stack))
goto badframe;
/*
* Don't let your children do this ...
*/
__asm__ __volatile__(
"move\t$29, %0\n\t"
"j\tsyscall_exit"
: /* no outputs */
: "r" (regs));
/* Unreached */
badframe:
force_sig(SIGSEGV);
}
static int setup_rt_frame_n32(void *sig_return, struct ksignal *ksig,
struct pt_regs *regs, sigset_t *set)
{
struct rt_sigframe_n32 __user *frame;
int err = 0;
frame = get_sigframe(ksig, regs, sizeof(*frame));
if (!access_ok(frame, sizeof (*frame)))
return -EFAULT;
/* Create siginfo. */
err |= copy_siginfo_to_user32(&frame->rs_info, &ksig->info);
/* Create the ucontext. */
err |= __put_user(0, &frame->rs_uc.uc_flags);
err |= __put_user(0, &frame->rs_uc.uc_link);
err |= __compat_save_altstack(&frame->rs_uc.uc_stack, regs->regs[29]);
err |= setup_sigcontext(regs, &frame->rs_uc.uc_mcontext);
err |= __copy_conv_sigset_to_user(&frame->rs_uc.uc_sigmask, set);
if (err)
return -EFAULT;
/*
* Arguments to signal handler:
*
* a0 = signal number
* a1 = 0 (should be cause)
* a2 = pointer to ucontext
*
* $25 and c0_epc point to the signal handler, $29 points to
* the struct rt_sigframe.
*/
regs->regs[ 4] = ksig->sig;
regs->regs[ 5] = (unsigned long) &frame->rs_info;
regs->regs[ 6] = (unsigned long) &frame->rs_uc;
regs->regs[29] = (unsigned long) frame;
regs->regs[31] = (unsigned long) sig_return;
regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler;
DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
current->comm, current->pid,
frame, regs->cp0_epc, regs->regs[31]);
return 0;
}
struct mips_abi mips_abi_n32 = {
.setup_rt_frame = setup_rt_frame_n32,
.restart = __NR_N32_restart_syscall,
.off_sc_fpregs = offsetof(struct sigcontext, sc_fpregs),
.off_sc_fpc_csr = offsetof(struct sigcontext, sc_fpc_csr),
.off_sc_used_math = offsetof(struct sigcontext, sc_used_math),
.vdso = &vdso_image_n32,
};
| linux-master | arch/mips/kernel/signal_n32.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2008 David Daney
*/
#include <linux/sched.h>
#include <asm/processor.h>
#include <asm/watch.h>
/*
* Install the watch registers for the current thread. A maximum of
* four registers are installed although the machine may have more.
*/
void mips_install_watch_registers(struct task_struct *t)
{
struct mips3264_watch_reg_state *watches = &t->thread.watch.mips3264;
unsigned int watchhi = MIPS_WATCHHI_G | /* Trap all ASIDs */
MIPS_WATCHHI_IRW; /* Clear result bits */
switch (current_cpu_data.watch_reg_use_cnt) {
default:
BUG();
case 4:
write_c0_watchlo3(watches->watchlo[3]);
write_c0_watchhi3(watchhi | watches->watchhi[3]);
fallthrough;
case 3:
write_c0_watchlo2(watches->watchlo[2]);
write_c0_watchhi2(watchhi | watches->watchhi[2]);
fallthrough;
case 2:
write_c0_watchlo1(watches->watchlo[1]);
write_c0_watchhi1(watchhi | watches->watchhi[1]);
fallthrough;
case 1:
write_c0_watchlo0(watches->watchlo[0]);
write_c0_watchhi0(watchhi | watches->watchhi[0]);
}
}
/*
* Read back the watchhi registers so the user space debugger has
* access to the I, R, and W bits. A maximum of four registers are
* read although the machine may have more.
*/
void mips_read_watch_registers(void)
{
struct mips3264_watch_reg_state *watches =
¤t->thread.watch.mips3264;
unsigned int watchhi_mask = MIPS_WATCHHI_MASK | MIPS_WATCHHI_IRW;
switch (current_cpu_data.watch_reg_use_cnt) {
default:
BUG();
case 4:
watches->watchhi[3] = (read_c0_watchhi3() & watchhi_mask);
fallthrough;
case 3:
watches->watchhi[2] = (read_c0_watchhi2() & watchhi_mask);
fallthrough;
case 2:
watches->watchhi[1] = (read_c0_watchhi1() & watchhi_mask);
fallthrough;
case 1:
watches->watchhi[0] = (read_c0_watchhi0() & watchhi_mask);
}
if (current_cpu_data.watch_reg_use_cnt == 1 &&
(watches->watchhi[0] & MIPS_WATCHHI_IRW) == 0) {
/* Pathological case of release 1 architecture that
* doesn't set the condition bits. We assume that
* since we got here, the watch condition was met and
* signal that the conditions requested in watchlo
* were met. */
watches->watchhi[0] |= (watches->watchlo[0] & MIPS_WATCHHI_IRW);
}
}
/*
* Disable all watch registers. Although only four registers are
* installed, all are cleared to eliminate the possibility of endless
* looping in the watch handler.
*/
void mips_clear_watch_registers(void)
{
switch (current_cpu_data.watch_reg_count) {
default:
BUG();
case 8:
write_c0_watchlo7(0);
fallthrough;
case 7:
write_c0_watchlo6(0);
fallthrough;
case 6:
write_c0_watchlo5(0);
fallthrough;
case 5:
write_c0_watchlo4(0);
fallthrough;
case 4:
write_c0_watchlo3(0);
fallthrough;
case 3:
write_c0_watchlo2(0);
fallthrough;
case 2:
write_c0_watchlo1(0);
fallthrough;
case 1:
write_c0_watchlo0(0);
}
}
void mips_probe_watch_registers(struct cpuinfo_mips *c)
{
unsigned int t;
if ((c->options & MIPS_CPU_WATCH) == 0)
return;
/*
* Check which of the I,R and W bits are supported, then
* disable the register.
*/
write_c0_watchlo0(MIPS_WATCHLO_IRW);
back_to_back_c0_hazard();
t = read_c0_watchlo0();
write_c0_watchlo0(0);
c->watch_reg_masks[0] = t & MIPS_WATCHLO_IRW;
/* Write the mask bits and read them back to determine which
* can be used. */
c->watch_reg_count = 1;
c->watch_reg_use_cnt = 1;
t = read_c0_watchhi0();
write_c0_watchhi0(t | MIPS_WATCHHI_MASK);
back_to_back_c0_hazard();
t = read_c0_watchhi0();
c->watch_reg_masks[0] |= (t & MIPS_WATCHHI_MASK);
if ((t & MIPS_WATCHHI_M) == 0)
return;
write_c0_watchlo1(MIPS_WATCHLO_IRW);
back_to_back_c0_hazard();
t = read_c0_watchlo1();
write_c0_watchlo1(0);
c->watch_reg_masks[1] = t & MIPS_WATCHLO_IRW;
c->watch_reg_count = 2;
c->watch_reg_use_cnt = 2;
t = read_c0_watchhi1();
write_c0_watchhi1(t | MIPS_WATCHHI_MASK);
back_to_back_c0_hazard();
t = read_c0_watchhi1();
c->watch_reg_masks[1] |= (t & MIPS_WATCHHI_MASK);
if ((t & MIPS_WATCHHI_M) == 0)
return;
write_c0_watchlo2(MIPS_WATCHLO_IRW);
back_to_back_c0_hazard();
t = read_c0_watchlo2();
write_c0_watchlo2(0);
c->watch_reg_masks[2] = t & MIPS_WATCHLO_IRW;
c->watch_reg_count = 3;
c->watch_reg_use_cnt = 3;
t = read_c0_watchhi2();
write_c0_watchhi2(t | MIPS_WATCHHI_MASK);
back_to_back_c0_hazard();
t = read_c0_watchhi2();
c->watch_reg_masks[2] |= (t & MIPS_WATCHHI_MASK);
if ((t & MIPS_WATCHHI_M) == 0)
return;
write_c0_watchlo3(MIPS_WATCHLO_IRW);
back_to_back_c0_hazard();
t = read_c0_watchlo3();
write_c0_watchlo3(0);
c->watch_reg_masks[3] = t & MIPS_WATCHLO_IRW;
c->watch_reg_count = 4;
c->watch_reg_use_cnt = 4;
t = read_c0_watchhi3();
write_c0_watchhi3(t | MIPS_WATCHHI_MASK);
back_to_back_c0_hazard();
t = read_c0_watchhi3();
c->watch_reg_masks[3] |= (t & MIPS_WATCHHI_MASK);
if ((t & MIPS_WATCHHI_M) == 0)
return;
/* We use at most 4, but probe and report up to 8. */
c->watch_reg_count = 5;
t = read_c0_watchhi4();
if ((t & MIPS_WATCHHI_M) == 0)
return;
c->watch_reg_count = 6;
t = read_c0_watchhi5();
if ((t & MIPS_WATCHHI_M) == 0)
return;
c->watch_reg_count = 7;
t = read_c0_watchhi6();
if ((t & MIPS_WATCHHI_M) == 0)
return;
c->watch_reg_count = 8;
}
| linux-master | arch/mips/kernel/watch.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved.
* Copyright (C) 2013 Imagination Technologies Ltd.
*/
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <asm/mipsregs.h>
#include <asm/mipsmtregs.h>
#include <asm/mips_mt.h>
#include <asm/vpe.h>
static int major;
/* The number of TCs and VPEs physically available on the core */
static int hw_tcs, hw_vpes;
/* We are prepared so configure and start the VPE... */
int vpe_run(struct vpe *v)
{
unsigned long flags, val, dmt_flag;
struct vpe_notifications *notifier;
unsigned int vpeflags;
struct tc *t;
/* check we are the Master VPE */
local_irq_save(flags);
val = read_c0_vpeconf0();
if (!(val & VPECONF0_MVP)) {
pr_warn("VPE loader: only Master VPE's are able to config MT\n");
local_irq_restore(flags);
return -1;
}
dmt_flag = dmt();
vpeflags = dvpe();
if (list_empty(&v->tc)) {
evpe(vpeflags);
emt(dmt_flag);
local_irq_restore(flags);
pr_warn("VPE loader: No TC's associated with VPE %d\n",
v->minor);
return -ENOEXEC;
}
t = list_first_entry(&v->tc, struct tc, tc);
/* Put MVPE's into 'configuration state' */
set_c0_mvpcontrol(MVPCONTROL_VPC);
settc(t->index);
/* should check it is halted, and not activated */
if ((read_tc_c0_tcstatus() & TCSTATUS_A) ||
!(read_tc_c0_tchalt() & TCHALT_H)) {
evpe(vpeflags);
emt(dmt_flag);
local_irq_restore(flags);
pr_warn("VPE loader: TC %d is already active!\n",
t->index);
return -ENOEXEC;
}
/*
* Write the address we want it to start running from in the TCPC
* register.
*/
write_tc_c0_tcrestart((unsigned long)v->__start);
write_tc_c0_tccontext((unsigned long)0);
/*
* Mark the TC as activated, not interrupt exempt and not dynamically
* allocatable
*/
val = read_tc_c0_tcstatus();
val = (val & ~(TCSTATUS_DA | TCSTATUS_IXMT)) | TCSTATUS_A;
write_tc_c0_tcstatus(val);
write_tc_c0_tchalt(read_tc_c0_tchalt() & ~TCHALT_H);
/*
* We don't pass the memsize here, so VPE programs need to be
* compiled with DFLT_STACK_SIZE and DFLT_HEAP_SIZE defined.
*/
mttgpr(7, 0);
mttgpr(6, v->ntcs);
/* set up VPE1 */
/*
* bind the TC to VPE 1 as late as possible so we only have the final
* VPE registers to set up, and so an EJTAG probe can trigger on it
*/
write_tc_c0_tcbind((read_tc_c0_tcbind() & ~TCBIND_CURVPE) | 1);
write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~(VPECONF0_VPA));
back_to_back_c0_hazard();
/* Set up the XTC bit in vpeconf0 to point at our tc */
write_vpe_c0_vpeconf0((read_vpe_c0_vpeconf0() & ~(VPECONF0_XTC))
| (t->index << VPECONF0_XTC_SHIFT));
back_to_back_c0_hazard();
/* enable this VPE */
write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA);
/* clear out any left overs from a previous program */
write_vpe_c0_status(0);
write_vpe_c0_cause(0);
/* take system out of configuration state */
clear_c0_mvpcontrol(MVPCONTROL_VPC);
/*
* SMVP kernels manage VPE enable independently, but uniprocessor
* kernels need to turn it on, even if that wasn't the pre-dvpe() state.
*/
#ifdef CONFIG_SMP
evpe(vpeflags);
#else
evpe(EVPE_ENABLE);
#endif
emt(dmt_flag);
local_irq_restore(flags);
list_for_each_entry(notifier, &v->notify, list)
notifier->start(VPE_MODULE_MINOR);
return 0;
}
void cleanup_tc(struct tc *tc)
{
unsigned long flags;
unsigned int mtflags, vpflags;
int tmp;
local_irq_save(flags);
mtflags = dmt();
vpflags = dvpe();
/* Put MVPE's into 'configuration state' */
set_c0_mvpcontrol(MVPCONTROL_VPC);
settc(tc->index);
tmp = read_tc_c0_tcstatus();
/* mark not allocated and not dynamically allocatable */
tmp &= ~(TCSTATUS_A | TCSTATUS_DA);
tmp |= TCSTATUS_IXMT; /* interrupt exempt */
write_tc_c0_tcstatus(tmp);
write_tc_c0_tchalt(TCHALT_H);
mips_ihb();
clear_c0_mvpcontrol(MVPCONTROL_VPC);
evpe(vpflags);
emt(mtflags);
local_irq_restore(flags);
}
/* module wrapper entry points */
/* give me a vpe */
void *vpe_alloc(void)
{
int i;
struct vpe *v;
/* find a vpe */
for (i = 1; i < MAX_VPES; i++) {
v = get_vpe(i);
if (v != NULL) {
v->state = VPE_STATE_INUSE;
return v;
}
}
return NULL;
}
EXPORT_SYMBOL(vpe_alloc);
/* start running from here */
int vpe_start(void *vpe, unsigned long start)
{
struct vpe *v = vpe;
v->__start = start;
return vpe_run(v);
}
EXPORT_SYMBOL(vpe_start);
/* halt it for now */
int vpe_stop(void *vpe)
{
struct vpe *v = vpe;
struct tc *t;
unsigned int evpe_flags;
evpe_flags = dvpe();
t = list_entry(v->tc.next, struct tc, tc);
if (t != NULL) {
settc(t->index);
write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~VPECONF0_VPA);
}
evpe(evpe_flags);
return 0;
}
EXPORT_SYMBOL(vpe_stop);
/* I've done with it thank you */
int vpe_free(void *vpe)
{
struct vpe *v = vpe;
struct tc *t;
unsigned int evpe_flags;
t = list_entry(v->tc.next, struct tc, tc);
if (t == NULL)
return -ENOEXEC;
evpe_flags = dvpe();
/* Put MVPE's into 'configuration state' */
set_c0_mvpcontrol(MVPCONTROL_VPC);
settc(t->index);
write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~VPECONF0_VPA);
/* halt the TC */
write_tc_c0_tchalt(TCHALT_H);
mips_ihb();
/* mark the TC unallocated */
write_tc_c0_tcstatus(read_tc_c0_tcstatus() & ~TCSTATUS_A);
v->state = VPE_STATE_UNUSED;
clear_c0_mvpcontrol(MVPCONTROL_VPC);
evpe(evpe_flags);
return 0;
}
EXPORT_SYMBOL(vpe_free);
static ssize_t store_kill(struct device *dev, struct device_attribute *attr,
const char *buf, size_t len)
{
struct vpe *vpe = get_vpe(aprp_cpu_index());
struct vpe_notifications *notifier;
list_for_each_entry(notifier, &vpe->notify, list)
notifier->stop(aprp_cpu_index());
release_progmem(vpe->load_addr);
cleanup_tc(get_tc(aprp_cpu_index()));
vpe_stop(vpe);
vpe_free(vpe);
return len;
}
static DEVICE_ATTR(kill, S_IWUSR, NULL, store_kill);
static ssize_t ntcs_show(struct device *cd, struct device_attribute *attr,
char *buf)
{
struct vpe *vpe = get_vpe(aprp_cpu_index());
return sprintf(buf, "%d\n", vpe->ntcs);
}
static ssize_t ntcs_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t len)
{
struct vpe *vpe = get_vpe(aprp_cpu_index());
unsigned long new;
int ret;
ret = kstrtoul(buf, 0, &new);
if (ret < 0)
return ret;
if (new == 0 || new > (hw_tcs - aprp_cpu_index()))
return -EINVAL;
vpe->ntcs = new;
return len;
}
static DEVICE_ATTR_RW(ntcs);
static struct attribute *vpe_attrs[] = {
&dev_attr_kill.attr,
&dev_attr_ntcs.attr,
NULL,
};
ATTRIBUTE_GROUPS(vpe);
static void vpe_device_release(struct device *cd)
{
}
static struct class vpe_class = {
.name = "vpe",
.dev_release = vpe_device_release,
.dev_groups = vpe_groups,
};
static struct device vpe_device;
int __init vpe_module_init(void)
{
unsigned int mtflags, vpflags;
unsigned long flags, val;
struct vpe *v = NULL;
struct tc *t;
int tc, err;
if (!cpu_has_mipsmt) {
pr_warn("VPE loader: not a MIPS MT capable processor\n");
return -ENODEV;
}
if (vpelimit == 0) {
pr_warn("No VPEs reserved for AP/SP, not initialize VPE loader\n"
"Pass maxvpes=<n> argument as kernel argument\n");
return -ENODEV;
}
if (aprp_cpu_index() == 0) {
pr_warn("No TCs reserved for AP/SP, not initialize VPE loader\n"
"Pass maxtcs=<n> argument as kernel argument\n");
return -ENODEV;
}
major = register_chrdev(0, VPE_MODULE_NAME, &vpe_fops);
if (major < 0) {
pr_warn("VPE loader: unable to register character device\n");
return major;
}
err = class_register(&vpe_class);
if (err) {
pr_err("vpe_class registration failed\n");
goto out_chrdev;
}
device_initialize(&vpe_device);
vpe_device.class = &vpe_class;
vpe_device.parent = NULL;
dev_set_name(&vpe_device, "vpe1");
vpe_device.devt = MKDEV(major, VPE_MODULE_MINOR);
err = device_add(&vpe_device);
if (err) {
pr_err("Adding vpe_device failed\n");
goto out_class;
}
local_irq_save(flags);
mtflags = dmt();
vpflags = dvpe();
/* Put MVPE's into 'configuration state' */
set_c0_mvpcontrol(MVPCONTROL_VPC);
val = read_c0_mvpconf0();
hw_tcs = (val & MVPCONF0_PTC) + 1;
hw_vpes = ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
for (tc = aprp_cpu_index(); tc < hw_tcs; tc++) {
/*
* Must re-enable multithreading temporarily or in case we
* reschedule send IPIs or similar we might hang.
*/
clear_c0_mvpcontrol(MVPCONTROL_VPC);
evpe(vpflags);
emt(mtflags);
local_irq_restore(flags);
t = alloc_tc(tc);
if (!t) {
err = -ENOMEM;
goto out_dev;
}
local_irq_save(flags);
mtflags = dmt();
vpflags = dvpe();
set_c0_mvpcontrol(MVPCONTROL_VPC);
/* VPE's */
if (tc < hw_tcs) {
settc(tc);
v = alloc_vpe(tc);
if (v == NULL) {
pr_warn("VPE: unable to allocate VPE\n");
goto out_reenable;
}
v->ntcs = hw_tcs - aprp_cpu_index();
/* add the tc to the list of this vpe's tc's. */
list_add(&t->tc, &v->tc);
/* deactivate all but vpe0 */
if (tc >= aprp_cpu_index()) {
unsigned long tmp = read_vpe_c0_vpeconf0();
tmp &= ~VPECONF0_VPA;
/* master VPE */
tmp |= VPECONF0_MVP;
write_vpe_c0_vpeconf0(tmp);
}
/* disable multi-threading with TC's */
write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() &
~VPECONTROL_TE);
if (tc >= vpelimit) {
/*
* Set config to be the same as vpe0,
* particularly kseg0 coherency alg
*/
write_vpe_c0_config(read_c0_config());
}
}
/* TC's */
t->pvpe = v; /* set the parent vpe */
if (tc >= aprp_cpu_index()) {
unsigned long tmp;
settc(tc);
/*
* A TC that is bound to any other VPE gets bound to
* VPE0, ideally I'd like to make it homeless but it
* doesn't appear to let me bind a TC to a non-existent
* VPE. Which is perfectly reasonable.
*
* The (un)bound state is visible to an EJTAG probe so
* may notify GDB...
*/
tmp = read_tc_c0_tcbind();
if (tmp & TCBIND_CURVPE) {
/* tc is bound >vpe0 */
write_tc_c0_tcbind(tmp & ~TCBIND_CURVPE);
t->pvpe = get_vpe(0); /* set the parent vpe */
}
/* halt the TC */
write_tc_c0_tchalt(TCHALT_H);
mips_ihb();
tmp = read_tc_c0_tcstatus();
/* mark not activated and not dynamically allocatable */
tmp &= ~(TCSTATUS_A | TCSTATUS_DA);
tmp |= TCSTATUS_IXMT; /* interrupt exempt */
write_tc_c0_tcstatus(tmp);
}
}
out_reenable:
/* release config state */
clear_c0_mvpcontrol(MVPCONTROL_VPC);
evpe(vpflags);
emt(mtflags);
local_irq_restore(flags);
return 0;
out_dev:
device_del(&vpe_device);
out_class:
put_device(&vpe_device);
class_unregister(&vpe_class);
out_chrdev:
unregister_chrdev(major, VPE_MODULE_NAME);
return err;
}
void __exit vpe_module_exit(void)
{
struct vpe *v, *n;
device_unregister(&vpe_device);
class_unregister(&vpe_class);
unregister_chrdev(major, VPE_MODULE_NAME);
/* No locking needed here */
list_for_each_entry_safe(v, n, &vpecontrol.vpe_list, list) {
if (v->state != VPE_STATE_UNUSED)
release_vpe(v);
}
}
| linux-master | arch/mips/kernel/vpe-mt.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Processor capabilities determination functions.
*
* Copyright (C) xxxx the Anonymous
* Copyright (C) 1994 - 2006 Ralf Baechle
* Copyright (C) 2003, 2004 Maciej W. Rozycki
* Copyright (C) 2001, 2004, 2011, 2012 MIPS Technologies, Inc.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/ptrace.h>
#include <linux/smp.h>
#include <linux/stddef.h>
#include <linux/export.h>
#include <asm/bugs.h>
#include <asm/cpu.h>
#include <asm/cpu-features.h>
#include <asm/cpu-type.h>
#include <asm/fpu.h>
#include <asm/mipsregs.h>
#include <asm/elf.h>
#include <asm/traps.h>
#include "fpu-probe.h"
/* Hardware capabilities */
unsigned int elf_hwcap __read_mostly;
EXPORT_SYMBOL_GPL(elf_hwcap);
void __init check_bugs32(void)
{
}
/*
* Probe whether cpu has config register by trying to play with
* alternate cache bit and see whether it matters.
* It's used by cpu_probe to distinguish between R3000A and R3081.
*/
static inline int cpu_has_confreg(void)
{
#ifdef CONFIG_CPU_R3000
extern unsigned long r3k_cache_size(unsigned long);
unsigned long size1, size2;
unsigned long cfg = read_c0_conf();
size1 = r3k_cache_size(ST0_ISC);
write_c0_conf(cfg ^ R30XX_CONF_AC);
size2 = r3k_cache_size(ST0_ISC);
write_c0_conf(cfg);
return size1 != size2;
#else
return 0;
#endif
}
static inline void set_elf_platform(int cpu, const char *plat)
{
if (cpu == 0)
__elf_platform = plat;
}
const char *__cpu_name[NR_CPUS];
const char *__elf_platform;
const char *__elf_base_platform;
void cpu_probe(void)
{
struct cpuinfo_mips *c = ¤t_cpu_data;
unsigned int cpu = smp_processor_id();
/*
* Set a default elf platform, cpu probe may later
* overwrite it with a more precise value
*/
set_elf_platform(cpu, "mips");
c->processor_id = PRID_IMP_UNKNOWN;
c->fpu_id = FPIR_IMP_NONE;
c->cputype = CPU_UNKNOWN;
c->writecombine = _CACHE_UNCACHED;
c->fpu_csr31 = FPU_CSR_RN;
c->fpu_msk31 = FPU_CSR_RSVD | FPU_CSR_ABS2008 | FPU_CSR_NAN2008 |
FPU_CSR_CONDX | FPU_CSR_FS;
c->srsets = 1;
c->processor_id = read_c0_prid();
switch (c->processor_id & (PRID_COMP_MASK | PRID_IMP_MASK)) {
case PRID_COMP_LEGACY | PRID_IMP_R2000:
c->cputype = CPU_R2000;
__cpu_name[cpu] = "R2000";
c->options = MIPS_CPU_TLB | MIPS_CPU_3K_CACHE |
MIPS_CPU_NOFPUEX;
if (__cpu_has_fpu())
c->options |= MIPS_CPU_FPU;
c->tlbsize = 64;
break;
case PRID_COMP_LEGACY | PRID_IMP_R3000:
if ((c->processor_id & PRID_REV_MASK) == PRID_REV_R3000A) {
if (cpu_has_confreg()) {
c->cputype = CPU_R3081E;
__cpu_name[cpu] = "R3081";
} else {
c->cputype = CPU_R3000A;
__cpu_name[cpu] = "R3000A";
}
} else {
c->cputype = CPU_R3000;
__cpu_name[cpu] = "R3000";
}
c->options = MIPS_CPU_TLB | MIPS_CPU_3K_CACHE |
MIPS_CPU_NOFPUEX;
if (__cpu_has_fpu())
c->options |= MIPS_CPU_FPU;
c->tlbsize = 64;
break;
}
BUG_ON(!__cpu_name[cpu]);
BUG_ON(c->cputype == CPU_UNKNOWN);
/*
* Platform code can force the cpu type to optimize code
* generation. In that case be sure the cpu type is correctly
* manually setup otherwise it could trigger some nasty bugs.
*/
BUG_ON(current_cpu_type() != c->cputype);
if (mips_fpu_disabled)
c->options &= ~MIPS_CPU_FPU;
if (c->options & MIPS_CPU_FPU)
cpu_set_fpu_opts(c);
else
cpu_set_nofpu_opts(c);
reserve_exception_space(0, 0x400);
}
void cpu_report(void)
{
struct cpuinfo_mips *c = ¤t_cpu_data;
pr_info("CPU%d revision is: %08x (%s)\n",
smp_processor_id(), c->processor_id, cpu_name_string());
if (c->options & MIPS_CPU_FPU)
pr_info("FPU revision is: %08x\n", c->fpu_id);
}
| linux-master | arch/mips/kernel/cpu-r3k-probe.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Processor capabilities determination functions.
*
* Copyright (C) xxxx the Anonymous
* Copyright (C) 1994 - 2006 Ralf Baechle
* Copyright (C) 2003, 2004 Maciej W. Rozycki
* Copyright (C) 2001, 2004, 2011, 2012 MIPS Technologies, Inc.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <asm/bugs.h>
#include <asm/cpu.h>
#include <asm/cpu-features.h>
#include <asm/cpu-type.h>
#include <asm/elf.h>
#include <asm/fpu.h>
#include <asm/mipsregs.h>
#include "fpu-probe.h"
/*
* Get the FPU Implementation/Revision.
*/
static inline unsigned long cpu_get_fpu_id(void)
{
unsigned long tmp, fpu_id;
tmp = read_c0_status();
__enable_fpu(FPU_AS_IS);
fpu_id = read_32bit_cp1_register(CP1_REVISION);
write_c0_status(tmp);
return fpu_id;
}
/*
* Check if the CPU has an external FPU.
*/
int __cpu_has_fpu(void)
{
return (cpu_get_fpu_id() & FPIR_IMP_MASK) != FPIR_IMP_NONE;
}
/*
* Determine the FCSR mask for FPU hardware.
*/
static inline void cpu_set_fpu_fcsr_mask(struct cpuinfo_mips *c)
{
unsigned long sr, mask, fcsr, fcsr0, fcsr1;
fcsr = c->fpu_csr31;
mask = FPU_CSR_ALL_X | FPU_CSR_ALL_E | FPU_CSR_ALL_S | FPU_CSR_RM;
sr = read_c0_status();
__enable_fpu(FPU_AS_IS);
fcsr0 = fcsr & mask;
write_32bit_cp1_register(CP1_STATUS, fcsr0);
fcsr0 = read_32bit_cp1_register(CP1_STATUS);
fcsr1 = fcsr | ~mask;
write_32bit_cp1_register(CP1_STATUS, fcsr1);
fcsr1 = read_32bit_cp1_register(CP1_STATUS);
write_32bit_cp1_register(CP1_STATUS, fcsr);
write_c0_status(sr);
c->fpu_msk31 = ~(fcsr0 ^ fcsr1) & ~mask;
}
/*
* Determine the IEEE 754 NaN encodings and ABS.fmt/NEG.fmt execution modes
* supported by FPU hardware.
*/
static void cpu_set_fpu_2008(struct cpuinfo_mips *c)
{
if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M64R1 |
MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2 |
MIPS_CPU_ISA_M32R5 | MIPS_CPU_ISA_M64R5 |
MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6)) {
unsigned long sr, fir, fcsr, fcsr0, fcsr1;
sr = read_c0_status();
__enable_fpu(FPU_AS_IS);
fir = read_32bit_cp1_register(CP1_REVISION);
if (fir & MIPS_FPIR_HAS2008) {
fcsr = read_32bit_cp1_register(CP1_STATUS);
/*
* MAC2008 toolchain never landed in real world, so
* we're only testing whether it can be disabled and
* don't try to enabled it.
*/
fcsr0 = fcsr & ~(FPU_CSR_ABS2008 | FPU_CSR_NAN2008 |
FPU_CSR_MAC2008);
write_32bit_cp1_register(CP1_STATUS, fcsr0);
fcsr0 = read_32bit_cp1_register(CP1_STATUS);
fcsr1 = fcsr | FPU_CSR_ABS2008 | FPU_CSR_NAN2008;
write_32bit_cp1_register(CP1_STATUS, fcsr1);
fcsr1 = read_32bit_cp1_register(CP1_STATUS);
write_32bit_cp1_register(CP1_STATUS, fcsr);
if (c->isa_level & (MIPS_CPU_ISA_M32R2 |
MIPS_CPU_ISA_M64R2)) {
/*
* The bit for MAC2008 might be reused by R6
* in future, so we only test for R2-R5.
*/
if (fcsr0 & FPU_CSR_MAC2008)
c->options |= MIPS_CPU_MAC_2008_ONLY;
}
if (!(fcsr0 & FPU_CSR_NAN2008))
c->options |= MIPS_CPU_NAN_LEGACY;
if (fcsr1 & FPU_CSR_NAN2008)
c->options |= MIPS_CPU_NAN_2008;
if ((fcsr0 ^ fcsr1) & FPU_CSR_ABS2008)
c->fpu_msk31 &= ~FPU_CSR_ABS2008;
else
c->fpu_csr31 |= fcsr & FPU_CSR_ABS2008;
if ((fcsr0 ^ fcsr1) & FPU_CSR_NAN2008)
c->fpu_msk31 &= ~FPU_CSR_NAN2008;
else
c->fpu_csr31 |= fcsr & FPU_CSR_NAN2008;
} else {
c->options |= MIPS_CPU_NAN_LEGACY;
}
write_c0_status(sr);
} else {
c->options |= MIPS_CPU_NAN_LEGACY;
}
}
/*
* IEEE 754 conformance mode to use. Affects the NaN encoding and the
* ABS.fmt/NEG.fmt execution mode.
*/
static enum { STRICT, LEGACY, STD2008, RELAXED } ieee754 = STRICT;
/*
* Set the IEEE 754 NaN encodings and the ABS.fmt/NEG.fmt execution modes
* to support by the FPU emulator according to the IEEE 754 conformance
* mode selected. Note that "relaxed" straps the emulator so that it
* allows 2008-NaN binaries even for legacy processors.
*/
static void cpu_set_nofpu_2008(struct cpuinfo_mips *c)
{
c->options &= ~(MIPS_CPU_NAN_2008 | MIPS_CPU_NAN_LEGACY);
c->fpu_csr31 &= ~(FPU_CSR_ABS2008 | FPU_CSR_NAN2008);
c->fpu_msk31 &= ~(FPU_CSR_ABS2008 | FPU_CSR_NAN2008);
switch (ieee754) {
case STRICT:
if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M64R1 |
MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2 |
MIPS_CPU_ISA_M32R5 | MIPS_CPU_ISA_M64R5 |
MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6)) {
c->options |= MIPS_CPU_NAN_2008 | MIPS_CPU_NAN_LEGACY;
} else {
c->options |= MIPS_CPU_NAN_LEGACY;
c->fpu_msk31 |= FPU_CSR_ABS2008 | FPU_CSR_NAN2008;
}
break;
case LEGACY:
c->options |= MIPS_CPU_NAN_LEGACY;
c->fpu_msk31 |= FPU_CSR_ABS2008 | FPU_CSR_NAN2008;
break;
case STD2008:
c->options |= MIPS_CPU_NAN_2008;
c->fpu_csr31 |= FPU_CSR_ABS2008 | FPU_CSR_NAN2008;
c->fpu_msk31 |= FPU_CSR_ABS2008 | FPU_CSR_NAN2008;
break;
case RELAXED:
c->options |= MIPS_CPU_NAN_2008 | MIPS_CPU_NAN_LEGACY;
break;
}
}
/*
* Override the IEEE 754 NaN encoding and ABS.fmt/NEG.fmt execution mode
* according to the "ieee754=" parameter.
*/
static void cpu_set_nan_2008(struct cpuinfo_mips *c)
{
switch (ieee754) {
case STRICT:
mips_use_nan_legacy = !!cpu_has_nan_legacy;
mips_use_nan_2008 = !!cpu_has_nan_2008;
break;
case LEGACY:
mips_use_nan_legacy = !!cpu_has_nan_legacy;
mips_use_nan_2008 = !cpu_has_nan_legacy;
break;
case STD2008:
mips_use_nan_legacy = !cpu_has_nan_2008;
mips_use_nan_2008 = !!cpu_has_nan_2008;
break;
case RELAXED:
mips_use_nan_legacy = true;
mips_use_nan_2008 = true;
break;
}
}
/*
* IEEE 754 NaN encoding and ABS.fmt/NEG.fmt execution mode override
* settings:
*
* strict: accept binaries that request a NaN encoding supported by the FPU
* legacy: only accept legacy-NaN binaries
* 2008: only accept 2008-NaN binaries
* relaxed: accept any binaries regardless of whether supported by the FPU
*/
static int __init ieee754_setup(char *s)
{
if (!s)
return -1;
else if (!strcmp(s, "strict"))
ieee754 = STRICT;
else if (!strcmp(s, "legacy"))
ieee754 = LEGACY;
else if (!strcmp(s, "2008"))
ieee754 = STD2008;
else if (!strcmp(s, "relaxed"))
ieee754 = RELAXED;
else
return -1;
if (!(boot_cpu_data.options & MIPS_CPU_FPU))
cpu_set_nofpu_2008(&boot_cpu_data);
cpu_set_nan_2008(&boot_cpu_data);
return 0;
}
early_param("ieee754", ieee754_setup);
/*
* Set the FIR feature flags for the FPU emulator.
*/
static void cpu_set_nofpu_id(struct cpuinfo_mips *c)
{
u32 value;
value = 0;
if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M64R1 |
MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2 |
MIPS_CPU_ISA_M32R5 | MIPS_CPU_ISA_M64R5 |
MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6))
value |= MIPS_FPIR_D | MIPS_FPIR_S;
if (c->isa_level & (MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2 |
MIPS_CPU_ISA_M32R5 | MIPS_CPU_ISA_M64R5 |
MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6))
value |= MIPS_FPIR_F64 | MIPS_FPIR_L | MIPS_FPIR_W;
if (c->options & MIPS_CPU_NAN_2008)
value |= MIPS_FPIR_HAS2008;
c->fpu_id = value;
}
/* Determined FPU emulator mask to use for the boot CPU with "nofpu". */
static unsigned int mips_nofpu_msk31;
/*
* Set options for FPU hardware.
*/
void cpu_set_fpu_opts(struct cpuinfo_mips *c)
{
c->fpu_id = cpu_get_fpu_id();
mips_nofpu_msk31 = c->fpu_msk31;
if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M64R1 |
MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2 |
MIPS_CPU_ISA_M32R5 | MIPS_CPU_ISA_M64R5 |
MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6)) {
if (c->fpu_id & MIPS_FPIR_3D)
c->ases |= MIPS_ASE_MIPS3D;
if (c->fpu_id & MIPS_FPIR_UFRP)
c->options |= MIPS_CPU_UFR;
if (c->fpu_id & MIPS_FPIR_FREP)
c->options |= MIPS_CPU_FRE;
}
cpu_set_fpu_fcsr_mask(c);
cpu_set_fpu_2008(c);
cpu_set_nan_2008(c);
}
/*
* Set options for the FPU emulator.
*/
void cpu_set_nofpu_opts(struct cpuinfo_mips *c)
{
c->options &= ~MIPS_CPU_FPU;
c->fpu_msk31 = mips_nofpu_msk31;
cpu_set_nofpu_2008(c);
cpu_set_nan_2008(c);
cpu_set_nofpu_id(c);
}
int mips_fpu_disabled;
static int __init fpu_disable(char *s)
{
cpu_set_nofpu_opts(&boot_cpu_data);
mips_fpu_disabled = 1;
return 1;
}
__setup("nofpu", fpu_disable);
| linux-master | arch/mips/kernel/fpu-probe.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
*
* Copyright (c) 2004 MIPS Inc
* Author: [email protected]
*
* Copyright (C) 2004, 06 Ralf Baechle <[email protected]>
*/
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/kernel_stat.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/msc01_ic.h>
#include <asm/traps.h>
static unsigned long _icctrl_msc;
#define MSC01_IC_REG_BASE _icctrl_msc
#define MSCIC_WRITE(reg, data) do { *(volatile u32 *)(reg) = data; } while (0)
#define MSCIC_READ(reg, data) do { data = *(volatile u32 *)(reg); } while (0)
static unsigned int irq_base;
/* mask off an interrupt */
static inline void mask_msc_irq(struct irq_data *d)
{
unsigned int irq = d->irq;
if (irq < (irq_base + 32))
MSCIC_WRITE(MSC01_IC_DISL, 1<<(irq - irq_base));
else
MSCIC_WRITE(MSC01_IC_DISH, 1<<(irq - irq_base - 32));
}
/* unmask an interrupt */
static inline void unmask_msc_irq(struct irq_data *d)
{
unsigned int irq = d->irq;
if (irq < (irq_base + 32))
MSCIC_WRITE(MSC01_IC_ENAL, 1<<(irq - irq_base));
else
MSCIC_WRITE(MSC01_IC_ENAH, 1<<(irq - irq_base - 32));
}
/*
* Masks and ACKs an IRQ
*/
static void level_mask_and_ack_msc_irq(struct irq_data *d)
{
mask_msc_irq(d);
if (!cpu_has_veic)
MSCIC_WRITE(MSC01_IC_EOI, 0);
}
/*
* Masks and ACKs an IRQ
*/
static void edge_mask_and_ack_msc_irq(struct irq_data *d)
{
unsigned int irq = d->irq;
mask_msc_irq(d);
if (!cpu_has_veic)
MSCIC_WRITE(MSC01_IC_EOI, 0);
else {
u32 r;
MSCIC_READ(MSC01_IC_SUP+irq*8, r);
MSCIC_WRITE(MSC01_IC_SUP+irq*8, r | ~MSC01_IC_SUP_EDGE_BIT);
MSCIC_WRITE(MSC01_IC_SUP+irq*8, r);
}
}
/*
* Interrupt handler for interrupts coming from SOC-it.
*/
void ll_msc_irq(void)
{
unsigned int irq;
/* read the interrupt vector register */
MSCIC_READ(MSC01_IC_VEC, irq);
if (irq < 64)
do_IRQ(irq + irq_base);
else {
/* Ignore spurious interrupt */
}
}
static void msc_bind_eic_interrupt(int irq, int set)
{
MSCIC_WRITE(MSC01_IC_RAMW,
(irq<<MSC01_IC_RAMW_ADDR_SHF) | (set<<MSC01_IC_RAMW_DATA_SHF));
}
static struct irq_chip msc_levelirq_type = {
.name = "SOC-it-Level",
.irq_ack = level_mask_and_ack_msc_irq,
.irq_mask = mask_msc_irq,
.irq_mask_ack = level_mask_and_ack_msc_irq,
.irq_unmask = unmask_msc_irq,
.irq_eoi = unmask_msc_irq,
};
static struct irq_chip msc_edgeirq_type = {
.name = "SOC-it-Edge",
.irq_ack = edge_mask_and_ack_msc_irq,
.irq_mask = mask_msc_irq,
.irq_mask_ack = edge_mask_and_ack_msc_irq,
.irq_unmask = unmask_msc_irq,
.irq_eoi = unmask_msc_irq,
};
void __init init_msc_irqs(unsigned long icubase, unsigned int irqbase, msc_irqmap_t *imp, int nirq)
{
_icctrl_msc = (unsigned long) ioremap(icubase, 0x40000);
/* Reset interrupt controller - initialises all registers to 0 */
MSCIC_WRITE(MSC01_IC_RST, MSC01_IC_RST_RST_BIT);
board_bind_eic_interrupt = &msc_bind_eic_interrupt;
for (; nirq > 0; nirq--, imp++) {
int n = imp->im_irq;
switch (imp->im_type) {
case MSC01_IRQ_EDGE:
irq_set_chip_and_handler_name(irqbase + n,
&msc_edgeirq_type,
handle_edge_irq,
"edge");
if (cpu_has_veic)
MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT);
else
MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT | imp->im_lvl);
break;
case MSC01_IRQ_LEVEL:
irq_set_chip_and_handler_name(irqbase + n,
&msc_levelirq_type,
handle_level_irq,
"level");
if (cpu_has_veic)
MSCIC_WRITE(MSC01_IC_SUP+n*8, 0);
else
MSCIC_WRITE(MSC01_IC_SUP+n*8, imp->im_lvl);
}
}
irq_base = irqbase;
MSCIC_WRITE(MSC01_IC_GENA, MSC01_IC_GENA_GENA_BIT); /* Enable interrupt generation */
}
| linux-master | arch/mips/kernel/irq-msc01.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* DEC I/O ASIC's counter clocksource
*
* Copyright (C) 2008 Yoichi Yuasa <[email protected]>
*/
#include <linux/clocksource.h>
#include <linux/sched_clock.h>
#include <linux/init.h>
#include <asm/ds1287.h>
#include <asm/time.h>
#include <asm/dec/ioasic.h>
#include <asm/dec/ioasic_addrs.h>
static u64 dec_ioasic_hpt_read(struct clocksource *cs)
{
return ioasic_read(IO_REG_FCTR);
}
static struct clocksource clocksource_dec = {
.name = "dec-ioasic",
.read = dec_ioasic_hpt_read,
.mask = CLOCKSOURCE_MASK(32),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static u64 notrace dec_ioasic_read_sched_clock(void)
{
return ioasic_read(IO_REG_FCTR);
}
int __init dec_ioasic_clocksource_init(void)
{
unsigned int freq;
u32 start, end;
int i = HZ / 8;
ds1287_timer_state();
while (!ds1287_timer_state())
;
start = dec_ioasic_hpt_read(&clocksource_dec);
while (i--)
while (!ds1287_timer_state())
;
end = dec_ioasic_hpt_read(&clocksource_dec);
freq = (end - start) * 8;
/* An early revision of the I/O ASIC didn't have the counter. */
if (!freq)
return -ENXIO;
printk(KERN_INFO "I/O ASIC clock frequency %dHz\n", freq);
clocksource_dec.rating = 200 + freq / 10000000;
clocksource_register_hz(&clocksource_dec, freq);
sched_clock_register(dec_ioasic_read_sched_clock, 32, freq);
return 0;
}
| linux-master | arch/mips/kernel/csrc-ioasic.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright (C) 1994 - 2000, 2006 Ralf Baechle
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
* Copyright (C) 2016, Imagination Technologies Ltd.
*/
#include <linux/compiler.h>
#include <linux/errno.h>
#include <linux/signal.h>
#include <linux/sched/signal.h>
#include <linux/uaccess.h>
#include <asm/abi.h>
#include <asm/compat-signal.h>
#include <asm/dsp.h>
#include <asm/sim.h>
#include <asm/unistd.h>
#include "signal-common.h"
/*
* Including <asm/unistd.h> would give use the 64-bit syscall numbers ...
*/
#define __NR_O32_restart_syscall 4253
struct sigframe32 {
u32 sf_ass[4]; /* argument save space for o32 */
u32 sf_pad[2]; /* Was: signal trampoline */
struct sigcontext32 sf_sc;
compat_sigset_t sf_mask;
};
struct ucontext32 {
u32 uc_flags;
s32 uc_link;
compat_stack_t uc_stack;
struct sigcontext32 uc_mcontext;
compat_sigset_t uc_sigmask; /* mask last for extensibility */
};
struct rt_sigframe32 {
u32 rs_ass[4]; /* argument save space for o32 */
u32 rs_pad[2]; /* Was: signal trampoline */
compat_siginfo_t rs_info;
struct ucontext32 rs_uc;
};
static int setup_sigcontext32(struct pt_regs *regs,
struct sigcontext32 __user *sc)
{
int err = 0;
int i;
err |= __put_user(regs->cp0_epc, &sc->sc_pc);
err |= __put_user(0, &sc->sc_regs[0]);
for (i = 1; i < 32; i++)
err |= __put_user(regs->regs[i], &sc->sc_regs[i]);
err |= __put_user(regs->hi, &sc->sc_mdhi);
err |= __put_user(regs->lo, &sc->sc_mdlo);
if (cpu_has_dsp) {
err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp);
err |= __put_user(mfhi1(), &sc->sc_hi1);
err |= __put_user(mflo1(), &sc->sc_lo1);
err |= __put_user(mfhi2(), &sc->sc_hi2);
err |= __put_user(mflo2(), &sc->sc_lo2);
err |= __put_user(mfhi3(), &sc->sc_hi3);
err |= __put_user(mflo3(), &sc->sc_lo3);
}
/*
* Save FPU state to signal context. Signal handler
* will "inherit" current FPU state.
*/
err |= protected_save_fp_context(sc);
return err;
}
static int restore_sigcontext32(struct pt_regs *regs,
struct sigcontext32 __user *sc)
{
int err = 0;
s32 treg;
int i;
/* Always make any pending restarted system calls return -EINTR */
current->restart_block.fn = do_no_restart_syscall;
err |= __get_user(regs->cp0_epc, &sc->sc_pc);
err |= __get_user(regs->hi, &sc->sc_mdhi);
err |= __get_user(regs->lo, &sc->sc_mdlo);
if (cpu_has_dsp) {
err |= __get_user(treg, &sc->sc_hi1); mthi1(treg);
err |= __get_user(treg, &sc->sc_lo1); mtlo1(treg);
err |= __get_user(treg, &sc->sc_hi2); mthi2(treg);
err |= __get_user(treg, &sc->sc_lo2); mtlo2(treg);
err |= __get_user(treg, &sc->sc_hi3); mthi3(treg);
err |= __get_user(treg, &sc->sc_lo3); mtlo3(treg);
err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK);
}
for (i = 1; i < 32; i++)
err |= __get_user(regs->regs[i], &sc->sc_regs[i]);
return err ?: protected_restore_fp_context(sc);
}
static int setup_frame_32(void *sig_return, struct ksignal *ksig,
struct pt_regs *regs, sigset_t *set)
{
struct sigframe32 __user *frame;
int err = 0;
frame = get_sigframe(ksig, regs, sizeof(*frame));
if (!access_ok(frame, sizeof (*frame)))
return -EFAULT;
err |= setup_sigcontext32(regs, &frame->sf_sc);
err |= __copy_conv_sigset_to_user(&frame->sf_mask, set);
if (err)
return -EFAULT;
/*
* Arguments to signal handler:
*
* a0 = signal number
* a1 = 0 (should be cause)
* a2 = pointer to struct sigcontext
*
* $25 and c0_epc point to the signal handler, $29 points to the
* struct sigframe.
*/
regs->regs[ 4] = ksig->sig;
regs->regs[ 5] = 0;
regs->regs[ 6] = (unsigned long) &frame->sf_sc;
regs->regs[29] = (unsigned long) frame;
regs->regs[31] = (unsigned long) sig_return;
regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler;
DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
current->comm, current->pid,
frame, regs->cp0_epc, regs->regs[31]);
return 0;
}
asmlinkage void sys32_rt_sigreturn(void)
{
struct rt_sigframe32 __user *frame;
struct pt_regs *regs;
sigset_t set;
int sig;
regs = current_pt_regs();
frame = (struct rt_sigframe32 __user *)regs->regs[29];
if (!access_ok(frame, sizeof(*frame)))
goto badframe;
if (__copy_conv_sigset_from_user(&set, &frame->rs_uc.uc_sigmask))
goto badframe;
set_current_blocked(&set);
sig = restore_sigcontext32(regs, &frame->rs_uc.uc_mcontext);
if (sig < 0)
goto badframe;
else if (sig)
force_sig(sig);
if (compat_restore_altstack(&frame->rs_uc.uc_stack))
goto badframe;
/*
* Don't let your children do this ...
*/
__asm__ __volatile__(
"move\t$29, %0\n\t"
"j\tsyscall_exit"
: /* no outputs */
: "r" (regs));
/* Unreached */
badframe:
force_sig(SIGSEGV);
}
static int setup_rt_frame_32(void *sig_return, struct ksignal *ksig,
struct pt_regs *regs, sigset_t *set)
{
struct rt_sigframe32 __user *frame;
int err = 0;
frame = get_sigframe(ksig, regs, sizeof(*frame));
if (!access_ok(frame, sizeof (*frame)))
return -EFAULT;
/* Convert (siginfo_t -> compat_siginfo_t) and copy to user. */
err |= copy_siginfo_to_user32(&frame->rs_info, &ksig->info);
/* Create the ucontext. */
err |= __put_user(0, &frame->rs_uc.uc_flags);
err |= __put_user(0, &frame->rs_uc.uc_link);
err |= __compat_save_altstack(&frame->rs_uc.uc_stack, regs->regs[29]);
err |= setup_sigcontext32(regs, &frame->rs_uc.uc_mcontext);
err |= __copy_conv_sigset_to_user(&frame->rs_uc.uc_sigmask, set);
if (err)
return -EFAULT;
/*
* Arguments to signal handler:
*
* a0 = signal number
* a1 = 0 (should be cause)
* a2 = pointer to ucontext
*
* $25 and c0_epc point to the signal handler, $29 points to
* the struct rt_sigframe32.
*/
regs->regs[ 4] = ksig->sig;
regs->regs[ 5] = (unsigned long) &frame->rs_info;
regs->regs[ 6] = (unsigned long) &frame->rs_uc;
regs->regs[29] = (unsigned long) frame;
regs->regs[31] = (unsigned long) sig_return;
regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler;
DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
current->comm, current->pid,
frame, regs->cp0_epc, regs->regs[31]);
return 0;
}
/*
* o32 compatibility on 64-bit kernels, without DSP ASE
*/
struct mips_abi mips_abi_32 = {
.setup_frame = setup_frame_32,
.setup_rt_frame = setup_rt_frame_32,
.restart = __NR_O32_restart_syscall,
.off_sc_fpregs = offsetof(struct sigcontext32, sc_fpregs),
.off_sc_fpc_csr = offsetof(struct sigcontext32, sc_fpc_csr),
.off_sc_used_math = offsetof(struct sigcontext32, sc_used_math),
.vdso = &vdso_image_o32,
};
asmlinkage void sys32_sigreturn(void)
{
struct sigframe32 __user *frame;
struct pt_regs *regs;
sigset_t blocked;
int sig;
regs = current_pt_regs();
frame = (struct sigframe32 __user *)regs->regs[29];
if (!access_ok(frame, sizeof(*frame)))
goto badframe;
if (__copy_conv_sigset_from_user(&blocked, &frame->sf_mask))
goto badframe;
set_current_blocked(&blocked);
sig = restore_sigcontext32(regs, &frame->sf_sc);
if (sig < 0)
goto badframe;
else if (sig)
force_sig(sig);
/*
* Don't let your children do this ...
*/
__asm__ __volatile__(
"move\t$29, %0\n\t"
"j\tsyscall_exit"
: /* no outputs */
: "r" (regs));
/* Unreached */
badframe:
force_sig(SIGSEGV);
}
| linux-master | arch/mips/kernel/signal_o32.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2015 Imagination Technologies
* Author: Alex Smith <[email protected]>
*/
#include <linux/binfmts.h>
#include <linux/elf.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/random.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/timekeeper_internal.h>
#include <asm/abi.h>
#include <asm/mips-cps.h>
#include <asm/page.h>
#include <asm/vdso.h>
#include <vdso/helpers.h>
#include <vdso/vsyscall.h>
/* Kernel-provided data used by the VDSO. */
static union mips_vdso_data mips_vdso_data __page_aligned_data;
struct vdso_data *vdso_data = mips_vdso_data.data;
/*
* Mapping for the VDSO data/GIC pages. The real pages are mapped manually, as
* what we map and where within the area they are mapped is determined at
* runtime.
*/
static struct page *no_pages[] = { NULL };
static struct vm_special_mapping vdso_vvar_mapping = {
.name = "[vvar]",
.pages = no_pages,
};
static void __init init_vdso_image(struct mips_vdso_image *image)
{
unsigned long num_pages, i;
unsigned long data_pfn;
BUG_ON(!PAGE_ALIGNED(image->data));
BUG_ON(!PAGE_ALIGNED(image->size));
num_pages = image->size / PAGE_SIZE;
data_pfn = __phys_to_pfn(__pa_symbol(image->data));
for (i = 0; i < num_pages; i++)
image->mapping.pages[i] = pfn_to_page(data_pfn + i);
}
static int __init init_vdso(void)
{
init_vdso_image(&vdso_image);
#ifdef CONFIG_MIPS32_O32
init_vdso_image(&vdso_image_o32);
#endif
#ifdef CONFIG_MIPS32_N32
init_vdso_image(&vdso_image_n32);
#endif
return 0;
}
subsys_initcall(init_vdso);
static unsigned long vdso_base(void)
{
unsigned long base = STACK_TOP;
if (IS_ENABLED(CONFIG_MIPS_FP_SUPPORT)) {
/* Skip the delay slot emulation page */
base += PAGE_SIZE;
}
if (current->flags & PF_RANDOMIZE) {
base += get_random_u32_below(VDSO_RANDOMIZE_SIZE);
base = PAGE_ALIGN(base);
}
return base;
}
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
struct mips_vdso_image *image = current->thread.abi->vdso;
struct mm_struct *mm = current->mm;
unsigned long gic_size, vvar_size, size, base, data_addr, vdso_addr, gic_pfn, gic_base;
struct vm_area_struct *vma;
int ret;
if (mmap_write_lock_killable(mm))
return -EINTR;
if (IS_ENABLED(CONFIG_MIPS_FP_SUPPORT)) {
/* Map delay slot emulation page */
base = mmap_region(NULL, STACK_TOP, PAGE_SIZE,
VM_READ | VM_EXEC |
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
0, NULL);
if (IS_ERR_VALUE(base)) {
ret = base;
goto out;
}
}
/*
* Determine total area size. This includes the VDSO data itself, the
* data page, and the GIC user page if present. Always create a mapping
* for the GIC user area if the GIC is present regardless of whether it
* is the current clocksource, in case it comes into use later on. We
* only map a page even though the total area is 64K, as we only need
* the counter registers at the start.
*/
gic_size = mips_gic_present() ? PAGE_SIZE : 0;
vvar_size = gic_size + PAGE_SIZE;
size = vvar_size + image->size;
/*
* Find a region that's large enough for us to perform the
* colour-matching alignment below.
*/
if (cpu_has_dc_aliases)
size += shm_align_mask + 1;
base = get_unmapped_area(NULL, vdso_base(), size, 0, 0);
if (IS_ERR_VALUE(base)) {
ret = base;
goto out;
}
/*
* If we suffer from dcache aliasing, ensure that the VDSO data page
* mapping is coloured the same as the kernel's mapping of that memory.
* This ensures that when the kernel updates the VDSO data userland
* will observe it without requiring cache invalidations.
*/
if (cpu_has_dc_aliases) {
base = __ALIGN_MASK(base, shm_align_mask);
base += ((unsigned long)vdso_data - gic_size) & shm_align_mask;
}
data_addr = base + gic_size;
vdso_addr = data_addr + PAGE_SIZE;
vma = _install_special_mapping(mm, base, vvar_size,
VM_READ | VM_MAYREAD,
&vdso_vvar_mapping);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto out;
}
/* Map GIC user page. */
if (gic_size) {
gic_base = (unsigned long)mips_gic_base + MIPS_GIC_USER_OFS;
gic_pfn = PFN_DOWN(__pa(gic_base));
ret = io_remap_pfn_range(vma, base, gic_pfn, gic_size,
pgprot_noncached(vma->vm_page_prot));
if (ret)
goto out;
}
/* Map data page. */
ret = remap_pfn_range(vma, data_addr,
virt_to_phys(vdso_data) >> PAGE_SHIFT,
PAGE_SIZE, vma->vm_page_prot);
if (ret)
goto out;
/* Map VDSO image. */
vma = _install_special_mapping(mm, vdso_addr, image->size,
VM_READ | VM_EXEC |
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
&image->mapping);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto out;
}
mm->context.vdso = (void *)vdso_addr;
ret = 0;
out:
mmap_write_unlock(mm);
return ret;
}
| linux-master | arch/mips/kernel/vdso.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/highmem.h>
#include <linux/crash_dump.h>
#include <linux/uio.h>
ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn,
size_t csize, unsigned long offset)
{
void *vaddr;
if (!csize)
return 0;
vaddr = kmap_local_pfn(pfn);
csize = copy_to_iter(vaddr + offset, csize, iter);
kunmap_local(vaddr);
return csize;
}
| linux-master | arch/mips/kernel/crash_dump.c |
// SPDX-License-Identifier: GPL-2.0
/*
* General MIPS MT support routines, usable in AP/SP and SMVP.
* Copyright (C) 2005 Mips Technologies, Inc
*/
#include <linux/cpu.h>
#include <linux/cpuset.h>
#include <linux/cpumask.h>
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/sched/task.h>
#include <linux/cred.h>
#include <linux/security.h>
#include <linux/types.h>
#include <linux/uaccess.h>
/*
* CPU mask used to set process affinity for MT VPEs/TCs with FPUs
*/
cpumask_t mt_fpu_cpumask;
static int fpaff_threshold = -1;
unsigned long mt_fpemul_threshold;
/*
* Replacement functions for the sys_sched_setaffinity() and
* sys_sched_getaffinity() system calls, so that we can integrate
* FPU affinity with the user's requested processor affinity.
* This code is 98% identical with the sys_sched_setaffinity()
* and sys_sched_getaffinity() system calls, and should be
* updated when kernel/sched/core.c changes.
*/
/*
* find_process_by_pid - find a process with a matching PID value.
* used in sys_sched_set/getaffinity() in kernel/sched/core.c, so
* cloned here.
*/
static inline struct task_struct *find_process_by_pid(pid_t pid)
{
return pid ? find_task_by_vpid(pid) : current;
}
/*
* check the target process has a UID that matches the current process's
*/
static bool check_same_owner(struct task_struct *p)
{
const struct cred *cred = current_cred(), *pcred;
bool match;
rcu_read_lock();
pcred = __task_cred(p);
match = (uid_eq(cred->euid, pcred->euid) ||
uid_eq(cred->euid, pcred->uid));
rcu_read_unlock();
return match;
}
/*
* mipsmt_sys_sched_setaffinity - set the cpu affinity of a process
*/
asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len,
unsigned long __user *user_mask_ptr)
{
cpumask_var_t cpus_allowed, new_mask, effective_mask;
struct thread_info *ti;
struct task_struct *p;
int retval;
if (len < sizeof(new_mask))
return -EINVAL;
if (copy_from_user(&new_mask, user_mask_ptr, sizeof(new_mask)))
return -EFAULT;
cpus_read_lock();
rcu_read_lock();
p = find_process_by_pid(pid);
if (!p) {
rcu_read_unlock();
cpus_read_unlock();
return -ESRCH;
}
/* Prevent p going away */
get_task_struct(p);
rcu_read_unlock();
if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
retval = -ENOMEM;
goto out_put_task;
}
if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
retval = -ENOMEM;
goto out_free_cpus_allowed;
}
if (!alloc_cpumask_var(&effective_mask, GFP_KERNEL)) {
retval = -ENOMEM;
goto out_free_new_mask;
}
if (!check_same_owner(p) && !capable(CAP_SYS_NICE)) {
retval = -EPERM;
goto out_unlock;
}
retval = security_task_setscheduler(p);
if (retval)
goto out_unlock;
/* Record new user-specified CPU set for future reference */
cpumask_copy(&p->thread.user_cpus_allowed, new_mask);
again:
/* Compute new global allowed CPU set if necessary */
ti = task_thread_info(p);
if (test_ti_thread_flag(ti, TIF_FPUBOUND) &&
cpumask_intersects(new_mask, &mt_fpu_cpumask)) {
cpumask_and(effective_mask, new_mask, &mt_fpu_cpumask);
retval = set_cpus_allowed_ptr(p, effective_mask);
} else {
cpumask_copy(effective_mask, new_mask);
clear_ti_thread_flag(ti, TIF_FPUBOUND);
retval = set_cpus_allowed_ptr(p, new_mask);
}
if (!retval) {
cpuset_cpus_allowed(p, cpus_allowed);
if (!cpumask_subset(effective_mask, cpus_allowed)) {
/*
* We must have raced with a concurrent cpuset
* update. Just reset the cpus_allowed to the
* cpuset's cpus_allowed
*/
cpumask_copy(new_mask, cpus_allowed);
goto again;
}
}
out_unlock:
free_cpumask_var(effective_mask);
out_free_new_mask:
free_cpumask_var(new_mask);
out_free_cpus_allowed:
free_cpumask_var(cpus_allowed);
out_put_task:
put_task_struct(p);
cpus_read_unlock();
return retval;
}
/*
* mipsmt_sys_sched_getaffinity - get the cpu affinity of a process
*/
asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
unsigned long __user *user_mask_ptr)
{
unsigned int real_len;
cpumask_t allowed, mask;
int retval;
struct task_struct *p;
real_len = sizeof(mask);
if (len < real_len)
return -EINVAL;
cpus_read_lock();
rcu_read_lock();
retval = -ESRCH;
p = find_process_by_pid(pid);
if (!p)
goto out_unlock;
retval = security_task_getscheduler(p);
if (retval)
goto out_unlock;
cpumask_or(&allowed, &p->thread.user_cpus_allowed, p->cpus_ptr);
cpumask_and(&mask, &allowed, cpu_active_mask);
out_unlock:
rcu_read_unlock();
cpus_read_unlock();
if (retval)
return retval;
if (copy_to_user(user_mask_ptr, &mask, real_len))
return -EFAULT;
return real_len;
}
static int __init fpaff_thresh(char *str)
{
get_option(&str, &fpaff_threshold);
return 1;
}
__setup("fpaff=", fpaff_thresh);
/*
* FPU Use Factor empirically derived from experiments on 34K
*/
#define FPUSEFACTOR 2000
static __init int mt_fp_affinity_init(void)
{
if (fpaff_threshold >= 0) {
mt_fpemul_threshold = fpaff_threshold;
} else {
mt_fpemul_threshold =
(FPUSEFACTOR * (loops_per_jiffy/(500000/HZ))) / HZ;
}
printk(KERN_DEBUG "FPU Affinity set after %ld emulations\n",
mt_fpemul_threshold);
return 0;
}
arch_initcall(mt_fp_affinity_init);
| linux-master | arch/mips/kernel/mips-mt-fpaff.c |
// SPDX-License-Identifier: GPL-2.0
/*
* General MIPS MT support routines, usable in AP/SP and SMVP.
* Copyright (C) 2005 Mips Technologies, Inc
*/
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/export.h>
#include <linux/interrupt.h>
#include <linux/security.h>
#include <asm/cpu.h>
#include <asm/processor.h>
#include <linux/atomic.h>
#include <asm/hardirq.h>
#include <asm/mmu_context.h>
#include <asm/mipsmtregs.h>
#include <asm/r4kcache.h>
#include <asm/cacheflush.h>
int vpelimit;
static int __init maxvpes(char *str)
{
get_option(&str, &vpelimit);
return 1;
}
__setup("maxvpes=", maxvpes);
int tclimit;
static int __init maxtcs(char *str)
{
get_option(&str, &tclimit);
return 1;
}
__setup("maxtcs=", maxtcs);
/*
* Dump new MIPS MT state for the core. Does not leave TCs halted.
* Takes an argument which taken to be a pre-call MVPControl value.
*/
void mips_mt_regdump(unsigned long mvpctl)
{
unsigned long flags;
unsigned long vpflags;
unsigned long mvpconf0;
int nvpe;
int ntc;
int i;
int tc;
unsigned long haltval;
unsigned long tcstatval;
local_irq_save(flags);
vpflags = dvpe();
printk("=== MIPS MT State Dump ===\n");
printk("-- Global State --\n");
printk(" MVPControl Passed: %08lx\n", mvpctl);
printk(" MVPControl Read: %08lx\n", vpflags);
printk(" MVPConf0 : %08lx\n", (mvpconf0 = read_c0_mvpconf0()));
nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
ntc = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
printk("-- per-VPE State --\n");
for (i = 0; i < nvpe; i++) {
for (tc = 0; tc < ntc; tc++) {
settc(tc);
if ((read_tc_c0_tcbind() & TCBIND_CURVPE) == i) {
printk(" VPE %d\n", i);
printk(" VPEControl : %08lx\n",
read_vpe_c0_vpecontrol());
printk(" VPEConf0 : %08lx\n",
read_vpe_c0_vpeconf0());
printk(" VPE%d.Status : %08lx\n",
i, read_vpe_c0_status());
printk(" VPE%d.EPC : %08lx %pS\n",
i, read_vpe_c0_epc(),
(void *) read_vpe_c0_epc());
printk(" VPE%d.Cause : %08lx\n",
i, read_vpe_c0_cause());
printk(" VPE%d.Config7 : %08lx\n",
i, read_vpe_c0_config7());
break; /* Next VPE */
}
}
}
printk("-- per-TC State --\n");
for (tc = 0; tc < ntc; tc++) {
settc(tc);
if (read_tc_c0_tcbind() == read_c0_tcbind()) {
/* Are we dumping ourself? */
haltval = 0; /* Then we're not halted, and mustn't be */
tcstatval = flags; /* And pre-dump TCStatus is flags */
printk(" TC %d (current TC with VPE EPC above)\n", tc);
} else {
haltval = read_tc_c0_tchalt();
write_tc_c0_tchalt(1);
tcstatval = read_tc_c0_tcstatus();
printk(" TC %d\n", tc);
}
printk(" TCStatus : %08lx\n", tcstatval);
printk(" TCBind : %08lx\n", read_tc_c0_tcbind());
printk(" TCRestart : %08lx %pS\n",
read_tc_c0_tcrestart(), (void *) read_tc_c0_tcrestart());
printk(" TCHalt : %08lx\n", haltval);
printk(" TCContext : %08lx\n", read_tc_c0_tccontext());
if (!haltval)
write_tc_c0_tchalt(0);
}
printk("===========================\n");
evpe(vpflags);
local_irq_restore(flags);
}
static int mt_opt_rpsctl = -1;
static int mt_opt_nblsu = -1;
static int mt_opt_forceconfig7;
static int mt_opt_config7 = -1;
static int __init rpsctl_set(char *str)
{
get_option(&str, &mt_opt_rpsctl);
return 1;
}
__setup("rpsctl=", rpsctl_set);
static int __init nblsu_set(char *str)
{
get_option(&str, &mt_opt_nblsu);
return 1;
}
__setup("nblsu=", nblsu_set);
static int __init config7_set(char *str)
{
get_option(&str, &mt_opt_config7);
mt_opt_forceconfig7 = 1;
return 1;
}
__setup("config7=", config7_set);
static unsigned int itc_base;
static int __init set_itc_base(char *str)
{
get_option(&str, &itc_base);
return 1;
}
__setup("itcbase=", set_itc_base);
void mips_mt_set_cpuoptions(void)
{
unsigned int oconfig7 = read_c0_config7();
unsigned int nconfig7 = oconfig7;
if (mt_opt_rpsctl >= 0) {
printk("34K return prediction stack override set to %d.\n",
mt_opt_rpsctl);
if (mt_opt_rpsctl)
nconfig7 |= (1 << 2);
else
nconfig7 &= ~(1 << 2);
}
if (mt_opt_nblsu >= 0) {
printk("34K ALU/LSU sync override set to %d.\n", mt_opt_nblsu);
if (mt_opt_nblsu)
nconfig7 |= (1 << 5);
else
nconfig7 &= ~(1 << 5);
}
if (mt_opt_forceconfig7) {
printk("CP0.Config7 forced to 0x%08x.\n", mt_opt_config7);
nconfig7 = mt_opt_config7;
}
if (oconfig7 != nconfig7) {
__asm__ __volatile("sync");
write_c0_config7(nconfig7);
ehb();
printk("Config7: 0x%08x\n", read_c0_config7());
}
if (itc_base != 0) {
/*
* Configure ITC mapping. This code is very
* specific to the 34K core family, which uses
* a special mode bit ("ITC") in the ErrCtl
* register to enable access to ITC control
* registers via cache "tag" operations.
*/
unsigned long ectlval;
unsigned long itcblkgrn;
/* ErrCtl register is known as "ecc" to Linux */
ectlval = read_c0_ecc();
write_c0_ecc(ectlval | (0x1 << 26));
ehb();
#define INDEX_0 (0x80000000)
#define INDEX_8 (0x80000008)
/* Read "cache tag" for Dcache pseudo-index 8 */
cache_op(Index_Load_Tag_D, INDEX_8);
ehb();
itcblkgrn = read_c0_dtaglo();
itcblkgrn &= 0xfffe0000;
/* Set for 128 byte pitch of ITC cells */
itcblkgrn |= 0x00000c00;
/* Stage in Tag register */
write_c0_dtaglo(itcblkgrn);
ehb();
/* Write out to ITU with CACHE op */
cache_op(Index_Store_Tag_D, INDEX_8);
/* Now set base address, and turn ITC on with 0x1 bit */
write_c0_dtaglo((itc_base & 0xfffffc00) | 0x1 );
ehb();
/* Write out to ITU with CACHE op */
cache_op(Index_Store_Tag_D, INDEX_0);
write_c0_ecc(ectlval);
ehb();
printk("Mapped %ld ITC cells starting at 0x%08x\n",
((itcblkgrn & 0x7fe00000) >> 20), itc_base);
}
}
struct class *mt_class;
static int __init mips_mt_init(void)
{
struct class *mtc;
mtc = class_create("mt");
if (IS_ERR(mtc))
return PTR_ERR(mtc);
mt_class = mtc;
return 0;
}
subsys_initcall(mips_mt_init);
| linux-master | arch/mips/kernel/mips-mt.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2003, 2004, 2007 Maciej W. Rozycki
*/
#include <linux/context_tracking.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/ptrace.h>
#include <linux/stddef.h>
#include <asm/bugs.h>
#include <asm/compiler.h>
#include <asm/cpu.h>
#include <asm/fpu.h>
#include <asm/mipsregs.h>
#include <asm/setup.h>
static char bug64hit[] __initdata =
"reliable operation impossible!\n%s";
static char nowar[] __initdata =
"Please report to <[email protected]>.";
static char r4kwar[] __initdata =
"Enable CPU_R4000_WORKAROUNDS to rectify.";
static char daddiwar[] __initdata =
"Enable CPU_DADDI_WORKAROUNDS to rectify.";
static __always_inline __init
void align_mod(const int align, const int mod)
{
asm volatile(
".set push\n\t"
".set noreorder\n\t"
".balign %0\n\t"
".rept %1\n\t"
"nop\n\t"
".endr\n\t"
".set pop"
:
: "n"(align), "n"(mod));
}
static __always_inline __init
void mult_sh_align_mod(long *v1, long *v2, long *w,
const int align, const int mod)
{
unsigned long flags;
int m1, m2;
long p, s, lv1, lv2, lw;
/*
* We want the multiply and the shift to be isolated from the
* rest of the code to disable gcc optimizations. Hence the
* asm statements that execute nothing, but make gcc not know
* what the values of m1, m2 and s are and what lv2 and p are
* used for.
*/
local_irq_save(flags);
/*
* The following code leads to a wrong result of the first
* dsll32 when executed on R4000 rev. 2.2 or 3.0 (PRId
* 00000422 or 00000430, respectively).
*
* See "MIPS R4000PC/SC Errata, Processor Revision 2.2 and
* 3.0" by MIPS Technologies, Inc., errata #16 and #28 for
* details. I got no permission to duplicate them here,
* sigh... --macro
*/
asm volatile(
""
: "=r" (m1), "=r" (m2), "=r" (s)
: "0" (5), "1" (8), "2" (5));
align_mod(align, mod);
/*
* The trailing nop is needed to fulfill the two-instruction
* requirement between reading hi/lo and staring a mult/div.
* Leaving it out may cause gas insert a nop itself breaking
* the desired alignment of the next chunk.
*/
asm volatile(
".set push\n\t"
".set noat\n\t"
".set noreorder\n\t"
".set nomacro\n\t"
"mult %2, %3\n\t"
"dsll32 %0, %4, %5\n\t"
"mflo $0\n\t"
"dsll32 %1, %4, %5\n\t"
"nop\n\t"
".set pop"
: "=&r" (lv1), "=r" (lw)
: "r" (m1), "r" (m2), "r" (s), "I" (0)
: "hi", "lo", "$0");
/* We have to use single integers for m1 and m2 and a double
* one for p to be sure the mulsidi3 gcc's RTL multiplication
* instruction has the workaround applied. Older versions of
* gcc have correct umulsi3 and mulsi3, but other
* multiplication variants lack the workaround.
*/
asm volatile(
""
: "=r" (m1), "=r" (m2), "=r" (s)
: "0" (m1), "1" (m2), "2" (s));
align_mod(align, mod);
p = m1 * m2;
lv2 = s << 32;
asm volatile(
""
: "=r" (lv2)
: "0" (lv2), "r" (p));
local_irq_restore(flags);
*v1 = lv1;
*v2 = lv2;
*w = lw;
}
static __always_inline __init void check_mult_sh(void)
{
long v1[8], v2[8], w[8];
int bug, fix, i;
printk("Checking for the multiply/shift bug... ");
/*
* Testing discovered false negatives for certain code offsets
* into cache lines. Hence we test all possible offsets for
* the worst assumption of an R4000 I-cache line width of 32
* bytes.
*
* We can't use a loop as alignment directives need to be
* immediates.
*/
mult_sh_align_mod(&v1[0], &v2[0], &w[0], 32, 0);
mult_sh_align_mod(&v1[1], &v2[1], &w[1], 32, 1);
mult_sh_align_mod(&v1[2], &v2[2], &w[2], 32, 2);
mult_sh_align_mod(&v1[3], &v2[3], &w[3], 32, 3);
mult_sh_align_mod(&v1[4], &v2[4], &w[4], 32, 4);
mult_sh_align_mod(&v1[5], &v2[5], &w[5], 32, 5);
mult_sh_align_mod(&v1[6], &v2[6], &w[6], 32, 6);
mult_sh_align_mod(&v1[7], &v2[7], &w[7], 32, 7);
bug = 0;
for (i = 0; i < 8; i++)
if (v1[i] != w[i])
bug = 1;
if (bug == 0) {
pr_cont("no.\n");
return;
}
pr_cont("yes, workaround... ");
fix = 1;
for (i = 0; i < 8; i++)
if (v2[i] != w[i])
fix = 0;
if (fix == 1) {
pr_cont("yes.\n");
return;
}
pr_cont("no.\n");
panic(bug64hit,
IS_ENABLED(CONFIG_CPU_R4000_WORKAROUNDS) ? nowar : r4kwar);
}
static volatile int daddi_ov;
asmlinkage void __init do_daddi_ov(struct pt_regs *regs)
{
enum ctx_state prev_state;
prev_state = exception_enter();
daddi_ov = 1;
regs->cp0_epc += 4;
exception_exit(prev_state);
}
static __init void check_daddi(void)
{
extern asmlinkage void handle_daddi_ov(void);
unsigned long flags;
void *handler;
long v, tmp;
printk("Checking for the daddi bug... ");
local_irq_save(flags);
handler = set_except_vector(EXCCODE_OV, handle_daddi_ov);
/*
* The following code fails to trigger an overflow exception
* when executed on R4000 rev. 2.2 or 3.0 (PRId 00000422 or
* 00000430, respectively).
*
* See "MIPS R4000PC/SC Errata, Processor Revision 2.2 and
* 3.0" by MIPS Technologies, Inc., erratum #23 for details.
* I got no permission to duplicate it here, sigh... --macro
*/
asm volatile(
".set push\n\t"
".set noat\n\t"
".set noreorder\n\t"
".set nomacro\n\t"
"addiu %1, $0, %2\n\t"
"dsrl %1, %1, 1\n\t"
#ifdef HAVE_AS_SET_DADDI
".set daddi\n\t"
#endif
"daddi %0, %1, %3\n\t"
".set pop"
: "=r" (v), "=&r" (tmp)
: "I" (0xffffffffffffdb9aUL), "I" (0x1234));
set_except_vector(EXCCODE_OV, handler);
local_irq_restore(flags);
if (daddi_ov) {
pr_cont("no.\n");
return;
}
pr_cont("yes, workaround... ");
local_irq_save(flags);
handler = set_except_vector(EXCCODE_OV, handle_daddi_ov);
asm volatile(
"addiu %1, $0, %2\n\t"
"dsrl %1, %1, 1\n\t"
"daddi %0, %1, %3"
: "=r" (v), "=&r" (tmp)
: "I" (0xffffffffffffdb9aUL), "I" (0x1234));
set_except_vector(EXCCODE_OV, handler);
local_irq_restore(flags);
if (daddi_ov) {
pr_cont("yes.\n");
return;
}
pr_cont("no.\n");
panic(bug64hit,
IS_ENABLED(CONFIG_CPU_DADDI_WORKAROUNDS) ? nowar : daddiwar);
}
int daddiu_bug = -1;
static __init void check_daddiu(void)
{
long v, w, tmp;
printk("Checking for the daddiu bug... ");
/*
* The following code leads to a wrong result of daddiu when
* executed on R4400 rev. 1.0 (PRId 00000440).
*
* See "MIPS R4400PC/SC Errata, Processor Revision 1.0" by
* MIPS Technologies, Inc., erratum #7 for details.
*
* According to "MIPS R4000PC/SC Errata, Processor Revision
* 2.2 and 3.0" by MIPS Technologies, Inc., erratum #41 this
* problem affects R4000 rev. 2.2 and 3.0 (PRId 00000422 and
* 00000430, respectively), too. Testing failed to trigger it
* so far.
*
* I got no permission to duplicate the errata here, sigh...
* --macro
*/
asm volatile(
".set push\n\t"
".set noat\n\t"
".set noreorder\n\t"
".set nomacro\n\t"
"addiu %2, $0, %3\n\t"
"dsrl %2, %2, 1\n\t"
#ifdef HAVE_AS_SET_DADDI
".set daddi\n\t"
#endif
"daddiu %0, %2, %4\n\t"
"addiu %1, $0, %4\n\t"
"daddu %1, %2\n\t"
".set pop"
: "=&r" (v), "=&r" (w), "=&r" (tmp)
: "I" (0xffffffffffffdb9aUL), "I" (0x1234));
daddiu_bug = v != w;
if (!daddiu_bug) {
pr_cont("no.\n");
return;
}
pr_cont("yes, workaround... ");
asm volatile(
"addiu %2, $0, %3\n\t"
"dsrl %2, %2, 1\n\t"
"daddiu %0, %2, %4\n\t"
"addiu %1, $0, %4\n\t"
"daddu %1, %2"
: "=&r" (v), "=&r" (w), "=&r" (tmp)
: "I" (0xffffffffffffdb9aUL), "I" (0x1234));
if (v == w) {
pr_cont("yes.\n");
return;
}
pr_cont("no.\n");
panic(bug64hit,
IS_ENABLED(CONFIG_CPU_DADDI_WORKAROUNDS) ? nowar : daddiwar);
}
void __init check_bugs64_early(void)
{
check_mult_sh();
check_daddiu();
}
void __init check_bugs64(void)
{
check_daddi();
}
| linux-master | arch/mips/kernel/r4k-bugs64.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* 8250/16550-type serial ports prom_putchar()
*
* Copyright (C) 2010 Yoichi Yuasa <[email protected]>
*/
#include <linux/io.h>
#include <linux/serial_core.h>
#include <linux/serial_reg.h>
#include <asm/setup.h>
static void __iomem *serial8250_base;
static unsigned int serial8250_reg_shift;
static unsigned int serial8250_tx_timeout;
void setup_8250_early_printk_port(unsigned long base, unsigned int reg_shift,
unsigned int timeout)
{
serial8250_base = (void __iomem *)base;
serial8250_reg_shift = reg_shift;
serial8250_tx_timeout = timeout;
}
static inline u8 serial_in(int offset)
{
return readb(serial8250_base + (offset << serial8250_reg_shift));
}
static inline void serial_out(int offset, char value)
{
writeb(value, serial8250_base + (offset << serial8250_reg_shift));
}
void prom_putchar(char c)
{
unsigned int timeout;
int status, bits;
if (!serial8250_base)
return;
timeout = serial8250_tx_timeout;
bits = UART_LSR_TEMT | UART_LSR_THRE;
do {
status = serial_in(UART_LSR);
if (--timeout == 0)
break;
} while ((status & bits) != bits);
if (timeout)
serial_out(UART_TX, c);
}
| linux-master | arch/mips/kernel/early_printk_8250.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1995, 1996, 1997, 2000, 2001, 05 by Ralf Baechle
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
* Copyright (C) 2001 MIPS Technologies, Inc.
*/
#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/linkage.h>
#include <linux/fs.h>
#include <linux/smp.h>
#include <linux/ptrace.h>
#include <linux/string.h>
#include <linux/syscalls.h>
#include <linux/file.h>
#include <linux/utsname.h>
#include <linux/unistd.h>
#include <linux/sem.h>
#include <linux/msg.h>
#include <linux/shm.h>
#include <linux/compiler.h>
#include <linux/ipc.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/elf.h>
#include <linux/sched/task_stack.h>
#include <asm/asm.h>
#include <asm/asm-eva.h>
#include <asm/branch.h>
#include <asm/cachectl.h>
#include <asm/cacheflush.h>
#include <asm/asm-offsets.h>
#include <asm/signal.h>
#include <asm/sim.h>
#include <asm/shmparam.h>
#include <asm/sync.h>
#include <asm/sysmips.h>
#include <asm/switch_to.h>
/*
* For historic reasons the pipe(2) syscall on MIPS has an unusual calling
* convention. It returns results in registers $v0 / $v1 which means there
* is no need for it to do verify the validity of a userspace pointer
* argument. Historically that used to be expensive in Linux. These days
* the performance advantage is negligible.
*/
asmlinkage int sysm_pipe(void)
{
int fd[2];
int error = do_pipe_flags(fd, 0);
if (error)
return error;
current_pt_regs()->regs[3] = fd[1];
return fd[0];
}
SYSCALL_DEFINE6(mips_mmap, unsigned long, addr, unsigned long, len,
unsigned long, prot, unsigned long, flags, unsigned long,
fd, off_t, offset)
{
if (offset & ~PAGE_MASK)
return -EINVAL;
return ksys_mmap_pgoff(addr, len, prot, flags, fd,
offset >> PAGE_SHIFT);
}
SYSCALL_DEFINE6(mips_mmap2, unsigned long, addr, unsigned long, len,
unsigned long, prot, unsigned long, flags, unsigned long, fd,
unsigned long, pgoff)
{
if (pgoff & (~PAGE_MASK >> 12))
return -EINVAL;
return ksys_mmap_pgoff(addr, len, prot, flags, fd,
pgoff >> (PAGE_SHIFT - 12));
}
save_static_function(sys_fork);
save_static_function(sys_clone);
save_static_function(sys_clone3);
SYSCALL_DEFINE1(set_thread_area, unsigned long, addr)
{
struct thread_info *ti = task_thread_info(current);
ti->tp_value = addr;
if (cpu_has_userlocal)
write_c0_userlocal(addr);
return 0;
}
static inline int mips_atomic_set(unsigned long addr, unsigned long new)
{
unsigned long old, tmp;
struct pt_regs *regs;
unsigned int err;
if (unlikely(addr & 3))
return -EINVAL;
if (unlikely(!access_ok((const void __user *)addr, 4)))
return -EINVAL;
if (cpu_has_llsc && IS_ENABLED(CONFIG_WAR_R10000_LLSC)) {
__asm__ __volatile__ (
" .set push \n"
" .set arch=r4000 \n"
" li %[err], 0 \n"
"1: ll %[old], (%[addr]) \n"
" move %[tmp], %[new] \n"
"2: sc %[tmp], (%[addr]) \n"
" beqzl %[tmp], 1b \n"
"3: \n"
" .insn \n"
" .section .fixup,\"ax\" \n"
"4: li %[err], %[efault] \n"
" j 3b \n"
" .previous \n"
" .section __ex_table,\"a\" \n"
" "STR(PTR_WD)" 1b, 4b \n"
" "STR(PTR_WD)" 2b, 4b \n"
" .previous \n"
" .set pop \n"
: [old] "=&r" (old),
[err] "=&r" (err),
[tmp] "=&r" (tmp)
: [addr] "r" (addr),
[new] "r" (new),
[efault] "i" (-EFAULT)
: "memory");
} else if (cpu_has_llsc) {
__asm__ __volatile__ (
" .set push \n"
" .set "MIPS_ISA_ARCH_LEVEL" \n"
" li %[err], 0 \n"
"1: \n"
" " __SYNC(full, loongson3_war) " \n"
user_ll("%[old]", "(%[addr])")
" move %[tmp], %[new] \n"
"2: \n"
user_sc("%[tmp]", "(%[addr])")
" beqz %[tmp], 1b \n"
"3: \n"
" .insn \n"
" .section .fixup,\"ax\" \n"
"5: li %[err], %[efault] \n"
" j 3b \n"
" .previous \n"
" .section __ex_table,\"a\" \n"
" "STR(PTR_WD)" 1b, 5b \n"
" "STR(PTR_WD)" 2b, 5b \n"
" .previous \n"
" .set pop \n"
: [old] "=&r" (old),
[err] "=&r" (err),
[tmp] "=&r" (tmp)
: [addr] "r" (addr),
[new] "r" (new),
[efault] "i" (-EFAULT)
: "memory");
} else {
do {
preempt_disable();
ll_bit = 1;
ll_task = current;
preempt_enable();
err = __get_user(old, (unsigned int *) addr);
err |= __put_user(new, (unsigned int *) addr);
if (err)
break;
rmb();
} while (!ll_bit);
}
if (unlikely(err))
return err;
regs = current_pt_regs();
regs->regs[2] = old;
regs->regs[7] = 0; /* No error */
/*
* Don't let your children do this ...
*/
__asm__ __volatile__(
" move $29, %0 \n"
" j syscall_exit \n"
: /* no outputs */
: "r" (regs));
/* unreached. Honestly. */
unreachable();
}
/*
* mips_atomic_set() normally returns directly via syscall_exit potentially
* clobbering static registers, so be sure to preserve them.
*/
save_static_function(sys_sysmips);
SYSCALL_DEFINE3(sysmips, long, cmd, long, arg1, long, arg2)
{
switch (cmd) {
case MIPS_ATOMIC_SET:
return mips_atomic_set(arg1, arg2);
case MIPS_FIXADE:
if (arg1 & ~3)
return -EINVAL;
if (arg1 & 1)
set_thread_flag(TIF_FIXADE);
else
clear_thread_flag(TIF_FIXADE);
if (arg1 & 2)
set_thread_flag(TIF_LOGADE);
else
clear_thread_flag(TIF_LOGADE);
return 0;
case FLUSH_CACHE:
__flush_cache_all();
return 0;
}
return -EINVAL;
}
/*
* No implemented yet ...
*/
SYSCALL_DEFINE3(cachectl, char *, addr, int, nbytes, int, op)
{
return -ENOSYS;
}
| linux-master | arch/mips/kernel/syscall.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001, 06 by Ralf Baechle ([email protected])
* Copyright (C) 2001 MIPS Technologies, Inc.
*/
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/pm.h>
#include <linux/types.h>
#include <linux/reboot.h>
#include <linux/delay.h>
#include <asm/compiler.h>
#include <asm/idle.h>
#include <asm/mipsregs.h>
#include <asm/reboot.h>
/*
* Urgs ... Too many MIPS machines to handle this in a generic way.
* So handle all using function pointers to machine specific
* functions.
*/
void (*_machine_restart)(char *command);
void (*_machine_halt)(void);
void (*pm_power_off)(void);
EXPORT_SYMBOL(pm_power_off);
static void machine_hang(void)
{
/*
* We're hanging the system so we don't want to be interrupted anymore.
* Any interrupt handlers that ran would at best be useless & at worst
* go awry because the system isn't in a functional state.
*/
local_irq_disable();
/*
* Mask all interrupts, giving us a better chance of remaining in the
* low power wait state.
*/
clear_c0_status(ST0_IM);
while (true) {
if (cpu_has_mips_r) {
/*
* We know that the wait instruction is supported so
* make use of it directly, leaving interrupts
* disabled.
*/
asm volatile(
".set push\n\t"
".set " MIPS_ISA_ARCH_LEVEL "\n\t"
"wait\n\t"
".set pop");
} else if (cpu_wait) {
/*
* Try the cpu_wait() callback. This isn't ideal since
* it'll re-enable interrupts, but that ought to be
* harmless given that they're all masked.
*/
cpu_wait();
local_irq_disable();
} else {
/*
* We're going to burn some power running round the
* loop, but we don't really have a choice. This isn't
* a path we should expect to run for long during
* typical use anyway.
*/
}
/*
* In most modern MIPS CPUs interrupts will cause the wait
* instruction to graduate even when disabled, and in some
* cases even when masked. In order to prevent a timer
* interrupt from continuously taking us out of the low power
* wait state, we clear any pending timer interrupt here.
*/
if (cpu_has_counter)
write_c0_compare(0);
}
}
void machine_restart(char *command)
{
if (_machine_restart)
_machine_restart(command);
#ifdef CONFIG_SMP
preempt_disable();
smp_send_stop();
#endif
do_kernel_restart(command);
mdelay(1000);
pr_emerg("Reboot failed -- System halted\n");
machine_hang();
}
void machine_halt(void)
{
if (_machine_halt)
_machine_halt();
#ifdef CONFIG_SMP
preempt_disable();
smp_send_stop();
#endif
machine_hang();
}
void machine_power_off(void)
{
do_kernel_power_off();
#ifdef CONFIG_SMP
preempt_disable();
smp_send_stop();
#endif
machine_hang();
}
| linux-master | arch/mips/kernel/reset.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/init.h>
#include <linux/kthread.h>
#include <linux/hrtimer.h>
#include <linux/fs.h>
#include <linux/debugfs.h>
#include <linux/export.h>
#include <linux/spinlock.h>
#include <asm/debug.h>
static int ss_get(void *data, u64 *val)
{
ktime_t start, finish;
int loops;
int cont;
DEFINE_RAW_SPINLOCK(ss_spin);
loops = 1000000;
cont = 1;
start = ktime_get();
while (cont) {
raw_spin_lock(&ss_spin);
loops--;
if (loops == 0)
cont = 0;
raw_spin_unlock(&ss_spin);
}
finish = ktime_get();
*val = ktime_us_delta(finish, start);
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_ss, ss_get, NULL, "%llu\n");
struct spin_multi_state {
raw_spinlock_t lock;
atomic_t start_wait;
atomic_t enter_wait;
atomic_t exit_wait;
int loops;
};
struct spin_multi_per_thread {
struct spin_multi_state *state;
ktime_t start;
};
static int multi_other(void *data)
{
int loops;
int cont;
struct spin_multi_per_thread *pt = data;
struct spin_multi_state *s = pt->state;
loops = s->loops;
cont = 1;
atomic_dec(&s->enter_wait);
while (atomic_read(&s->enter_wait))
; /* spin */
pt->start = ktime_get();
atomic_dec(&s->start_wait);
while (atomic_read(&s->start_wait))
; /* spin */
while (cont) {
raw_spin_lock(&s->lock);
loops--;
if (loops == 0)
cont = 0;
raw_spin_unlock(&s->lock);
}
atomic_dec(&s->exit_wait);
while (atomic_read(&s->exit_wait))
; /* spin */
return 0;
}
static int multi_get(void *data, u64 *val)
{
ktime_t finish;
struct spin_multi_state ms;
struct spin_multi_per_thread t1, t2;
ms.lock = __RAW_SPIN_LOCK_UNLOCKED("multi_get");
ms.loops = 1000000;
atomic_set(&ms.start_wait, 2);
atomic_set(&ms.enter_wait, 2);
atomic_set(&ms.exit_wait, 2);
t1.state = &ms;
t2.state = &ms;
kthread_run(multi_other, &t2, "multi_get");
multi_other(&t1);
finish = ktime_get();
*val = ktime_us_delta(finish, t1.start);
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_multi, multi_get, NULL, "%llu\n");
static int __init spinlock_test(void)
{
debugfs_create_file_unsafe("spin_single", S_IRUGO, mips_debugfs_dir, NULL,
&fops_ss);
debugfs_create_file_unsafe("spin_multi", S_IRUGO, mips_debugfs_dir, NULL,
&fops_multi);
return 0;
}
device_initcall(spinlock_test);
| linux-master | arch/mips/kernel/spinlock_test.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1995 Linus Torvalds
* Copyright (C) 1995 Waldorf Electronics
* Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03 Ralf Baechle
* Copyright (C) 1996 Stoned Elipot
* Copyright (C) 1999 Silicon Graphics, Inc.
* Copyright (C) 2000, 2001, 2002, 2007 Maciej W. Rozycki
*/
#include <linux/init.h>
#include <linux/cpu.h>
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/export.h>
#include <linux/screen_info.h>
#include <linux/memblock.h>
#include <linux/initrd.h>
#include <linux/root_dev.h>
#include <linux/highmem.h>
#include <linux/console.h>
#include <linux/pfn.h>
#include <linux/debugfs.h>
#include <linux/kexec.h>
#include <linux/sizes.h>
#include <linux/device.h>
#include <linux/dma-map-ops.h>
#include <linux/decompress/generic.h>
#include <linux/of_fdt.h>
#include <linux/dmi.h>
#include <linux/crash_dump.h>
#include <asm/addrspace.h>
#include <asm/bootinfo.h>
#include <asm/bugs.h>
#include <asm/cache.h>
#include <asm/cdmm.h>
#include <asm/cpu.h>
#include <asm/debug.h>
#include <asm/mmzone.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/smp-ops.h>
#include <asm/prom.h>
#include <asm/fw/fw.h>
#ifdef CONFIG_MIPS_ELF_APPENDED_DTB
char __section(".appended_dtb") __appended_dtb[0x100000];
#endif /* CONFIG_MIPS_ELF_APPENDED_DTB */
struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(cpu_data);
#ifdef CONFIG_VT
struct screen_info screen_info;
#endif
/*
* Setup information
*
* These are initialized so they are in the .data section
*/
unsigned long mips_machtype __read_mostly = MACH_UNKNOWN;
EXPORT_SYMBOL(mips_machtype);
static char __initdata command_line[COMMAND_LINE_SIZE];
char __initdata arcs_cmdline[COMMAND_LINE_SIZE];
#ifdef CONFIG_CMDLINE_BOOL
static const char builtin_cmdline[] __initconst = CONFIG_CMDLINE;
#else
static const char builtin_cmdline[] __initconst = "";
#endif
/*
* mips_io_port_base is the begin of the address space to which x86 style
* I/O ports are mapped.
*/
unsigned long mips_io_port_base = -1;
EXPORT_SYMBOL(mips_io_port_base);
static struct resource code_resource = { .name = "Kernel code", };
static struct resource data_resource = { .name = "Kernel data", };
static struct resource bss_resource = { .name = "Kernel bss", };
unsigned long __kaslr_offset __ro_after_init;
EXPORT_SYMBOL(__kaslr_offset);
static void *detect_magic __initdata = detect_memory_region;
#ifdef CONFIG_MIPS_AUTO_PFN_OFFSET
unsigned long ARCH_PFN_OFFSET;
EXPORT_SYMBOL(ARCH_PFN_OFFSET);
#endif
void __init detect_memory_region(phys_addr_t start, phys_addr_t sz_min, phys_addr_t sz_max)
{
void *dm = &detect_magic;
phys_addr_t size;
for (size = sz_min; size < sz_max; size <<= 1) {
if (!memcmp(dm, dm + size, sizeof(detect_magic)))
break;
}
pr_debug("Memory: %lluMB of RAM detected at 0x%llx (min: %lluMB, max: %lluMB)\n",
((unsigned long long) size) / SZ_1M,
(unsigned long long) start,
((unsigned long long) sz_min) / SZ_1M,
((unsigned long long) sz_max) / SZ_1M);
memblock_add(start, size);
}
/*
* Manage initrd
*/
#ifdef CONFIG_BLK_DEV_INITRD
static int __init rd_start_early(char *p)
{
unsigned long start = memparse(p, &p);
#ifdef CONFIG_64BIT
/* Guess if the sign extension was forgotten by bootloader */
if (start < XKPHYS)
start = (int)start;
#endif
initrd_start = start;
initrd_end += start;
return 0;
}
early_param("rd_start", rd_start_early);
static int __init rd_size_early(char *p)
{
initrd_end += memparse(p, &p);
return 0;
}
early_param("rd_size", rd_size_early);
/* it returns the next free pfn after initrd */
static unsigned long __init init_initrd(void)
{
unsigned long end;
/*
* Board specific code or command line parser should have
* already set up initrd_start and initrd_end. In these cases
* perfom sanity checks and use them if all looks good.
*/
if (!initrd_start || initrd_end <= initrd_start)
goto disable;
if (initrd_start & ~PAGE_MASK) {
pr_err("initrd start must be page aligned\n");
goto disable;
}
/*
* Sanitize initrd addresses. For example firmware
* can't guess if they need to pass them through
* 64-bits values if the kernel has been built in pure
* 32-bit. We need also to switch from KSEG0 to XKPHYS
* addresses now, so the code can now safely use __pa().
*/
end = __pa(initrd_end);
initrd_end = (unsigned long)__va(end);
initrd_start = (unsigned long)__va(__pa(initrd_start));
if (initrd_start < PAGE_OFFSET) {
pr_err("initrd start < PAGE_OFFSET\n");
goto disable;
}
ROOT_DEV = Root_RAM0;
return PFN_UP(end);
disable:
initrd_start = 0;
initrd_end = 0;
return 0;
}
/* In some conditions (e.g. big endian bootloader with a little endian
kernel), the initrd might appear byte swapped. Try to detect this and
byte swap it if needed. */
static void __init maybe_bswap_initrd(void)
{
#if defined(CONFIG_CPU_CAVIUM_OCTEON)
u64 buf;
/* Check for CPIO signature */
if (!memcmp((void *)initrd_start, "070701", 6))
return;
/* Check for compressed initrd */
if (decompress_method((unsigned char *)initrd_start, 8, NULL))
return;
/* Try again with a byte swapped header */
buf = swab64p((u64 *)initrd_start);
if (!memcmp(&buf, "070701", 6) ||
decompress_method((unsigned char *)(&buf), 8, NULL)) {
unsigned long i;
pr_info("Byteswapped initrd detected\n");
for (i = initrd_start; i < ALIGN(initrd_end, 8); i += 8)
swab64s((u64 *)i);
}
#endif
}
static void __init finalize_initrd(void)
{
unsigned long size = initrd_end - initrd_start;
if (size == 0) {
printk(KERN_INFO "Initrd not found or empty");
goto disable;
}
if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) {
printk(KERN_ERR "Initrd extends beyond end of memory");
goto disable;
}
maybe_bswap_initrd();
memblock_reserve(__pa(initrd_start), size);
initrd_below_start_ok = 1;
pr_info("Initial ramdisk at: 0x%lx (%lu bytes)\n",
initrd_start, size);
return;
disable:
printk(KERN_CONT " - disabling initrd\n");
initrd_start = 0;
initrd_end = 0;
}
#else /* !CONFIG_BLK_DEV_INITRD */
static unsigned long __init init_initrd(void)
{
return 0;
}
#define finalize_initrd() do {} while (0)
#endif
/*
* Initialize the bootmem allocator. It also setup initrd related data
* if needed.
*/
#if defined(CONFIG_SGI_IP27) || (defined(CONFIG_CPU_LOONGSON64) && defined(CONFIG_NUMA))
static void __init bootmem_init(void)
{
init_initrd();
finalize_initrd();
}
#else /* !CONFIG_SGI_IP27 */
static void __init bootmem_init(void)
{
phys_addr_t ramstart, ramend;
unsigned long start, end;
int i;
ramstart = memblock_start_of_DRAM();
ramend = memblock_end_of_DRAM();
/*
* Sanity check any INITRD first. We don't take it into account
* for bootmem setup initially, rely on the end-of-kernel-code
* as our memory range starting point. Once bootmem is inited we
* will reserve the area used for the initrd.
*/
init_initrd();
/* Reserve memory occupied by kernel. */
memblock_reserve(__pa_symbol(&_text),
__pa_symbol(&_end) - __pa_symbol(&_text));
/* max_low_pfn is not a number of pages but the end pfn of low mem */
#ifdef CONFIG_MIPS_AUTO_PFN_OFFSET
ARCH_PFN_OFFSET = PFN_UP(ramstart);
#else
/*
* Reserve any memory between the start of RAM and PHYS_OFFSET
*/
if (ramstart > PHYS_OFFSET)
memblock_reserve(PHYS_OFFSET, ramstart - PHYS_OFFSET);
if (PFN_UP(ramstart) > ARCH_PFN_OFFSET) {
pr_info("Wasting %lu bytes for tracking %lu unused pages\n",
(unsigned long)((PFN_UP(ramstart) - ARCH_PFN_OFFSET) * sizeof(struct page)),
(unsigned long)(PFN_UP(ramstart) - ARCH_PFN_OFFSET));
}
#endif
min_low_pfn = ARCH_PFN_OFFSET;
max_pfn = PFN_DOWN(ramend);
for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) {
/*
* Skip highmem here so we get an accurate max_low_pfn if low
* memory stops short of high memory.
* If the region overlaps HIGHMEM_START, end is clipped so
* max_pfn excludes the highmem portion.
*/
if (start >= PFN_DOWN(HIGHMEM_START))
continue;
if (end > PFN_DOWN(HIGHMEM_START))
end = PFN_DOWN(HIGHMEM_START);
if (end > max_low_pfn)
max_low_pfn = end;
}
if (min_low_pfn >= max_low_pfn)
panic("Incorrect memory mapping !!!");
if (max_pfn > PFN_DOWN(HIGHMEM_START)) {
#ifdef CONFIG_HIGHMEM
highstart_pfn = PFN_DOWN(HIGHMEM_START);
highend_pfn = max_pfn;
#else
max_low_pfn = PFN_DOWN(HIGHMEM_START);
max_pfn = max_low_pfn;
#endif
}
/*
* Reserve initrd memory if needed.
*/
finalize_initrd();
}
#endif /* CONFIG_SGI_IP27 */
static int usermem __initdata;
static int __init early_parse_mem(char *p)
{
phys_addr_t start, size;
if (!p) {
pr_err("mem parameter is empty, do nothing\n");
return -EINVAL;
}
/*
* If a user specifies memory size, we
* blow away any automatically generated
* size.
*/
if (usermem == 0) {
usermem = 1;
memblock_remove(memblock_start_of_DRAM(),
memblock_end_of_DRAM() - memblock_start_of_DRAM());
}
start = 0;
size = memparse(p, &p);
if (*p == '@')
start = memparse(p + 1, &p);
if (IS_ENABLED(CONFIG_NUMA))
memblock_add_node(start, size, pa_to_nid(start), MEMBLOCK_NONE);
else
memblock_add(start, size);
return 0;
}
early_param("mem", early_parse_mem);
static int __init early_parse_memmap(char *p)
{
char *oldp;
u64 start_at, mem_size;
if (!p)
return -EINVAL;
if (!strncmp(p, "exactmap", 8)) {
pr_err("\"memmap=exactmap\" invalid on MIPS\n");
return 0;
}
oldp = p;
mem_size = memparse(p, &p);
if (p == oldp)
return -EINVAL;
if (*p == '@') {
start_at = memparse(p+1, &p);
memblock_add(start_at, mem_size);
} else if (*p == '#') {
pr_err("\"memmap=nn#ss\" (force ACPI data) invalid on MIPS\n");
return -EINVAL;
} else if (*p == '$') {
start_at = memparse(p+1, &p);
memblock_add(start_at, mem_size);
memblock_reserve(start_at, mem_size);
} else {
pr_err("\"memmap\" invalid format!\n");
return -EINVAL;
}
if (*p == '\0') {
usermem = 1;
return 0;
} else
return -EINVAL;
}
early_param("memmap", early_parse_memmap);
static void __init mips_reserve_vmcore(void)
{
#ifdef CONFIG_PROC_VMCORE
phys_addr_t start, end;
u64 i;
if (!elfcorehdr_size) {
for_each_mem_range(i, &start, &end) {
if (elfcorehdr_addr >= start && elfcorehdr_addr < end) {
/*
* Reserve from the elf core header to the end of
* the memory segment, that should all be kdump
* reserved memory.
*/
elfcorehdr_size = end - elfcorehdr_addr;
break;
}
}
}
pr_info("Reserving %ldKB of memory at %ldKB for kdump\n",
(unsigned long)elfcorehdr_size >> 10, (unsigned long)elfcorehdr_addr >> 10);
memblock_reserve(elfcorehdr_addr, elfcorehdr_size);
#endif
}
#ifdef CONFIG_KEXEC
/* 64M alignment for crash kernel regions */
#define CRASH_ALIGN SZ_64M
#define CRASH_ADDR_MAX SZ_512M
static void __init mips_parse_crashkernel(void)
{
unsigned long long total_mem;
unsigned long long crash_size, crash_base;
int ret;
total_mem = memblock_phys_mem_size();
ret = parse_crashkernel(boot_command_line, total_mem,
&crash_size, &crash_base);
if (ret != 0 || crash_size <= 0)
return;
if (crash_base <= 0) {
crash_base = memblock_phys_alloc_range(crash_size, CRASH_ALIGN,
CRASH_ALIGN,
CRASH_ADDR_MAX);
if (!crash_base) {
pr_warn("crashkernel reservation failed - No suitable area found.\n");
return;
}
} else {
unsigned long long start;
start = memblock_phys_alloc_range(crash_size, 1,
crash_base,
crash_base + crash_size);
if (start != crash_base) {
pr_warn("Invalid memory region reserved for crash kernel\n");
return;
}
}
crashk_res.start = crash_base;
crashk_res.end = crash_base + crash_size - 1;
}
static void __init request_crashkernel(struct resource *res)
{
int ret;
if (crashk_res.start == crashk_res.end)
return;
ret = request_resource(res, &crashk_res);
if (!ret)
pr_info("Reserving %ldMB of memory at %ldMB for crashkernel\n",
(unsigned long)(resource_size(&crashk_res) >> 20),
(unsigned long)(crashk_res.start >> 20));
}
#else /* !defined(CONFIG_KEXEC) */
static void __init mips_parse_crashkernel(void)
{
}
static void __init request_crashkernel(struct resource *res)
{
}
#endif /* !defined(CONFIG_KEXEC) */
static void __init check_kernel_sections_mem(void)
{
phys_addr_t start = __pa_symbol(&_text);
phys_addr_t size = __pa_symbol(&_end) - start;
if (!memblock_is_region_memory(start, size)) {
pr_info("Kernel sections are not in the memory maps\n");
memblock_add(start, size);
}
}
static void __init bootcmdline_append(const char *s, size_t max)
{
if (!s[0] || !max)
return;
if (boot_command_line[0])
strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
strlcat(boot_command_line, s, max);
}
#ifdef CONFIG_OF_EARLY_FLATTREE
static int __init bootcmdline_scan_chosen(unsigned long node, const char *uname,
int depth, void *data)
{
bool *dt_bootargs = data;
const char *p;
int l;
if (depth != 1 || !data ||
(strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0))
return 0;
p = of_get_flat_dt_prop(node, "bootargs", &l);
if (p != NULL && l > 0) {
bootcmdline_append(p, min(l, COMMAND_LINE_SIZE));
*dt_bootargs = true;
}
return 1;
}
#endif /* CONFIG_OF_EARLY_FLATTREE */
static void __init bootcmdline_init(void)
{
bool dt_bootargs = false;
/*
* If CMDLINE_OVERRIDE is enabled then initializing the command line is
* trivial - we simply use the built-in command line unconditionally &
* unmodified.
*/
if (IS_ENABLED(CONFIG_CMDLINE_OVERRIDE)) {
strscpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
return;
}
/*
* If the user specified a built-in command line &
* MIPS_CMDLINE_BUILTIN_EXTEND, then the built-in command line is
* prepended to arguments from the bootloader or DT so we'll copy them
* to the start of boot_command_line here. Otherwise, empty
* boot_command_line to undo anything early_init_dt_scan_chosen() did.
*/
if (IS_ENABLED(CONFIG_MIPS_CMDLINE_BUILTIN_EXTEND))
strscpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
else
boot_command_line[0] = 0;
#ifdef CONFIG_OF_EARLY_FLATTREE
/*
* If we're configured to take boot arguments from DT, look for those
* now.
*/
if (IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_DTB) ||
IS_ENABLED(CONFIG_MIPS_CMDLINE_DTB_EXTEND))
of_scan_flat_dt(bootcmdline_scan_chosen, &dt_bootargs);
#endif
/*
* If we didn't get any arguments from DT (regardless of whether that's
* because we weren't configured to look for them, or because we looked
* & found none) then we'll take arguments from the bootloader.
* plat_mem_setup() should have filled arcs_cmdline with arguments from
* the bootloader.
*/
if (IS_ENABLED(CONFIG_MIPS_CMDLINE_DTB_EXTEND) || !dt_bootargs)
bootcmdline_append(arcs_cmdline, COMMAND_LINE_SIZE);
/*
* If the user specified a built-in command line & we didn't already
* prepend it, we append it to boot_command_line here.
*/
if (IS_ENABLED(CONFIG_CMDLINE_BOOL) &&
!IS_ENABLED(CONFIG_MIPS_CMDLINE_BUILTIN_EXTEND))
bootcmdline_append(builtin_cmdline, COMMAND_LINE_SIZE);
}
/*
* arch_mem_init - initialize memory management subsystem
*
* o plat_mem_setup() detects the memory configuration and will record detected
* memory areas using memblock_add.
*
* At this stage the memory configuration of the system is known to the
* kernel but generic memory management system is still entirely uninitialized.
*
* o bootmem_init()
* o sparse_init()
* o paging_init()
* o dma_contiguous_reserve()
*
* At this stage the bootmem allocator is ready to use.
*
* NOTE: historically plat_mem_setup did the entire platform initialization.
* This was rather impractical because it meant plat_mem_setup had to
* get away without any kind of memory allocator. To keep old code from
* breaking plat_setup was just renamed to plat_mem_setup and a second platform
* initialization hook for anything else was introduced.
*/
static void __init arch_mem_init(char **cmdline_p)
{
/* call board setup routine */
plat_mem_setup();
memblock_set_bottom_up(true);
bootcmdline_init();
strscpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
*cmdline_p = command_line;
parse_early_param();
if (usermem)
pr_info("User-defined physical RAM map overwrite\n");
check_kernel_sections_mem();
early_init_fdt_reserve_self();
early_init_fdt_scan_reserved_mem();
#ifndef CONFIG_NUMA
memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
#endif
bootmem_init();
/*
* Prevent memblock from allocating high memory.
* This cannot be done before max_low_pfn is detected, so up
* to this point is possible to only reserve physical memory
* with memblock_reserve; memblock_alloc* can be used
* only after this point
*/
memblock_set_current_limit(PFN_PHYS(max_low_pfn));
mips_reserve_vmcore();
mips_parse_crashkernel();
device_tree_init();
/*
* In order to reduce the possibility of kernel panic when failed to
* get IO TLB memory under CONFIG_SWIOTLB, it is better to allocate
* low memory as small as possible before plat_swiotlb_setup(), so
* make sparse_init() using top-down allocation.
*/
memblock_set_bottom_up(false);
sparse_init();
memblock_set_bottom_up(true);
plat_swiotlb_setup();
dma_contiguous_reserve(PFN_PHYS(max_low_pfn));
/* Reserve for hibernation. */
memblock_reserve(__pa_symbol(&__nosave_begin),
__pa_symbol(&__nosave_end) - __pa_symbol(&__nosave_begin));
early_memtest(PFN_PHYS(ARCH_PFN_OFFSET), PFN_PHYS(max_low_pfn));
}
static void __init resource_init(void)
{
phys_addr_t start, end;
u64 i;
if (UNCAC_BASE != IO_BASE)
return;
code_resource.start = __pa_symbol(&_text);
code_resource.end = __pa_symbol(&_etext) - 1;
data_resource.start = __pa_symbol(&_etext);
data_resource.end = __pa_symbol(&_edata) - 1;
bss_resource.start = __pa_symbol(&__bss_start);
bss_resource.end = __pa_symbol(&__bss_stop) - 1;
for_each_mem_range(i, &start, &end) {
struct resource *res;
res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
if (!res)
panic("%s: Failed to allocate %zu bytes\n", __func__,
sizeof(struct resource));
res->start = start;
/*
* In memblock, end points to the first byte after the
* range while in resourses, end points to the last byte in
* the range.
*/
res->end = end - 1;
res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
res->name = "System RAM";
request_resource(&iomem_resource, res);
/*
* We don't know which RAM region contains kernel data,
* so we try it repeatedly and let the resource manager
* test it.
*/
request_resource(res, &code_resource);
request_resource(res, &data_resource);
request_resource(res, &bss_resource);
request_crashkernel(res);
}
}
#ifdef CONFIG_SMP
static void __init prefill_possible_map(void)
{
int i, possible = num_possible_cpus();
if (possible > nr_cpu_ids)
possible = nr_cpu_ids;
for (i = 0; i < possible; i++)
set_cpu_possible(i, true);
for (; i < NR_CPUS; i++)
set_cpu_possible(i, false);
set_nr_cpu_ids(possible);
}
#else
static inline void prefill_possible_map(void) {}
#endif
static void __init setup_rng_seed(void)
{
char *rng_seed_hex = fw_getenv("rngseed");
u8 rng_seed[512];
size_t len;
if (!rng_seed_hex)
return;
len = min(sizeof(rng_seed), strlen(rng_seed_hex) / 2);
if (hex2bin(rng_seed, rng_seed_hex, len))
return;
add_bootloader_randomness(rng_seed, len);
memzero_explicit(rng_seed, len);
memzero_explicit(rng_seed_hex, len * 2);
}
void __init setup_arch(char **cmdline_p)
{
cpu_probe();
mips_cm_probe();
prom_init();
setup_early_fdc_console();
#ifdef CONFIG_EARLY_PRINTK
setup_early_printk();
#endif
cpu_report();
if (IS_ENABLED(CONFIG_CPU_R4X00_BUGS64))
check_bugs64_early();
#if defined(CONFIG_VT)
#if defined(CONFIG_VGA_CONSOLE)
conswitchp = &vga_con;
#endif
#endif
arch_mem_init(cmdline_p);
dmi_setup();
resource_init();
plat_smp_setup();
prefill_possible_map();
cpu_cache_init();
paging_init();
memblock_dump_all();
setup_rng_seed();
}
unsigned long kernelsp[NR_CPUS];
unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3;
#ifdef CONFIG_DEBUG_FS
struct dentry *mips_debugfs_dir;
static int __init debugfs_mips(void)
{
mips_debugfs_dir = debugfs_create_dir("mips", NULL);
return 0;
}
arch_initcall(debugfs_mips);
#endif
#ifdef CONFIG_DMA_NONCOHERENT
static int __init setcoherentio(char *str)
{
dma_default_coherent = true;
pr_info("Hardware DMA cache coherency (command line)\n");
return 0;
}
early_param("coherentio", setcoherentio);
static int __init setnocoherentio(char *str)
{
dma_default_coherent = false;
pr_info("Software DMA cache coherency (command line)\n");
return 0;
}
early_param("nocoherentio", setnocoherentio);
#endif
void __init arch_cpu_finalize_init(void)
{
unsigned int cpu = smp_processor_id();
cpu_data[cpu].udelay_val = loops_per_jiffy;
check_bugs32();
if (IS_ENABLED(CONFIG_CPU_R4X00_BUGS64))
check_bugs64();
}
| linux-master | arch/mips/kernel/setup.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* MIPS cacheinfo support
*/
#include <linux/cacheinfo.h>
/* Populates leaf and increments to next leaf */
#define populate_cache(cache, leaf, c_level, c_type) \
do { \
leaf->type = c_type; \
leaf->level = c_level; \
leaf->coherency_line_size = c->cache.linesz; \
leaf->number_of_sets = c->cache.sets; \
leaf->ways_of_associativity = c->cache.ways; \
leaf->size = c->cache.linesz * c->cache.sets * \
c->cache.ways; \
leaf++; \
} while (0)
int init_cache_level(unsigned int cpu)
{
struct cpuinfo_mips *c = ¤t_cpu_data;
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
int levels = 0, leaves = 0;
/*
* If Dcache is not set, we assume the cache structures
* are not properly initialized.
*/
if (c->dcache.waysize)
levels += 1;
else
return -ENOENT;
leaves += (c->icache.waysize) ? 2 : 1;
if (c->vcache.waysize) {
levels++;
leaves++;
}
if (c->scache.waysize) {
levels++;
leaves++;
}
if (c->tcache.waysize) {
levels++;
leaves++;
}
this_cpu_ci->num_levels = levels;
this_cpu_ci->num_leaves = leaves;
return 0;
}
static void fill_cpumask_siblings(int cpu, cpumask_t *cpu_map)
{
int cpu1;
for_each_possible_cpu(cpu1)
if (cpus_are_siblings(cpu, cpu1))
cpumask_set_cpu(cpu1, cpu_map);
}
static void fill_cpumask_cluster(int cpu, cpumask_t *cpu_map)
{
int cpu1;
int cluster = cpu_cluster(&cpu_data[cpu]);
for_each_possible_cpu(cpu1)
if (cpu_cluster(&cpu_data[cpu1]) == cluster)
cpumask_set_cpu(cpu1, cpu_map);
}
int populate_cache_leaves(unsigned int cpu)
{
struct cpuinfo_mips *c = ¤t_cpu_data;
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
struct cacheinfo *this_leaf = this_cpu_ci->info_list;
int level = 1;
if (c->icache.waysize) {
/* I/D caches are per core */
fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map);
populate_cache(dcache, this_leaf, level, CACHE_TYPE_DATA);
fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map);
populate_cache(icache, this_leaf, level, CACHE_TYPE_INST);
level++;
} else {
populate_cache(dcache, this_leaf, level, CACHE_TYPE_UNIFIED);
level++;
}
if (c->vcache.waysize) {
/* Vcache is per core as well */
fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map);
populate_cache(vcache, this_leaf, level, CACHE_TYPE_UNIFIED);
level++;
}
if (c->scache.waysize) {
/* Scache is per cluster */
fill_cpumask_cluster(cpu, &this_leaf->shared_cpu_map);
populate_cache(scache, this_leaf, level, CACHE_TYPE_UNIFIED);
level++;
}
if (c->tcache.waysize)
populate_cache(tcache, this_leaf, level, CACHE_TYPE_UNIFIED);
this_cpu_ci->cpu_map_populated = true;
return 0;
}
| linux-master | arch/mips/kernel/cacheinfo.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved.
* Copyright (C) 2013 Imagination Technologies Ltd.
*
* VPE spport module for loading a MIPS SP program into VPE1. The SP
* environment is rather simple since there are no TLBs. It needs
* to be relocatable (or partiall linked). Initialize your stack in
* the startup-code. The loader looks for the symbol __start and sets
* up the execution to resume from there. To load and run, simply do
* a cat SP 'binary' to the /dev/vpe1 device.
*/
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/vmalloc.h>
#include <linux/elf.h>
#include <linux/seq_file.h>
#include <linux/syscalls.h>
#include <linux/moduleloader.h>
#include <linux/interrupt.h>
#include <linux/poll.h>
#include <linux/memblock.h>
#include <asm/mipsregs.h>
#include <asm/mipsmtregs.h>
#include <asm/cacheflush.h>
#include <linux/atomic.h>
#include <asm/mips_mt.h>
#include <asm/processor.h>
#include <asm/vpe.h>
#ifndef ARCH_SHF_SMALL
#define ARCH_SHF_SMALL 0
#endif
/* If this is set, the section belongs in the init part of the module */
#define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1))
struct vpe_control vpecontrol = {
.vpe_list_lock = __SPIN_LOCK_UNLOCKED(vpe_list_lock),
.vpe_list = LIST_HEAD_INIT(vpecontrol.vpe_list),
.tc_list_lock = __SPIN_LOCK_UNLOCKED(tc_list_lock),
.tc_list = LIST_HEAD_INIT(vpecontrol.tc_list)
};
/* get the vpe associated with this minor */
struct vpe *get_vpe(int minor)
{
struct vpe *res, *v;
if (!cpu_has_mipsmt)
return NULL;
res = NULL;
spin_lock(&vpecontrol.vpe_list_lock);
list_for_each_entry(v, &vpecontrol.vpe_list, list) {
if (v->minor == VPE_MODULE_MINOR) {
res = v;
break;
}
}
spin_unlock(&vpecontrol.vpe_list_lock);
return res;
}
/* get the vpe associated with this minor */
struct tc *get_tc(int index)
{
struct tc *res, *t;
res = NULL;
spin_lock(&vpecontrol.tc_list_lock);
list_for_each_entry(t, &vpecontrol.tc_list, list) {
if (t->index == index) {
res = t;
break;
}
}
spin_unlock(&vpecontrol.tc_list_lock);
return res;
}
/* allocate a vpe and associate it with this minor (or index) */
struct vpe *alloc_vpe(int minor)
{
struct vpe *v;
v = kzalloc(sizeof(struct vpe), GFP_KERNEL);
if (v == NULL)
goto out;
INIT_LIST_HEAD(&v->tc);
spin_lock(&vpecontrol.vpe_list_lock);
list_add_tail(&v->list, &vpecontrol.vpe_list);
spin_unlock(&vpecontrol.vpe_list_lock);
INIT_LIST_HEAD(&v->notify);
v->minor = VPE_MODULE_MINOR;
out:
return v;
}
/* allocate a tc. At startup only tc0 is running, all other can be halted. */
struct tc *alloc_tc(int index)
{
struct tc *tc;
tc = kzalloc(sizeof(struct tc), GFP_KERNEL);
if (tc == NULL)
goto out;
INIT_LIST_HEAD(&tc->tc);
tc->index = index;
spin_lock(&vpecontrol.tc_list_lock);
list_add_tail(&tc->list, &vpecontrol.tc_list);
spin_unlock(&vpecontrol.tc_list_lock);
out:
return tc;
}
/* clean up and free everything */
void release_vpe(struct vpe *v)
{
list_del(&v->list);
if (v->load_addr)
release_progmem(v->load_addr);
kfree(v);
}
/* Find some VPE program space */
void *alloc_progmem(unsigned long len)
{
void *addr;
#ifdef CONFIG_MIPS_VPE_LOADER_TOM
/*
* This means you must tell Linux to use less memory than you
* physically have, for example by passing a mem= boot argument.
*/
addr = pfn_to_kaddr(max_low_pfn);
memset(addr, 0, len);
#else
/* simple grab some mem for now */
addr = kzalloc(len, GFP_KERNEL);
#endif
return addr;
}
void release_progmem(void *ptr)
{
#ifndef CONFIG_MIPS_VPE_LOADER_TOM
kfree(ptr);
#endif
}
/* Update size with this section: return offset. */
static long get_offset(unsigned long *size, Elf_Shdr *sechdr)
{
long ret;
ret = ALIGN(*size, sechdr->sh_addralign ? : 1);
*size = ret + sechdr->sh_size;
return ret;
}
/* Lay out the SHF_ALLOC sections in a way not dissimilar to how ld
might -- code, read-only data, read-write data, small data. Tally
sizes, and place the offsets into sh_entsize fields: high bit means it
belongs in init. */
static void layout_sections(struct module *mod, const Elf_Ehdr *hdr,
Elf_Shdr *sechdrs, const char *secstrings)
{
static unsigned long const masks[][2] = {
/* NOTE: all executable code must be the first section
* in this array; otherwise modify the text_size
* finder in the two loops below */
{SHF_EXECINSTR | SHF_ALLOC, ARCH_SHF_SMALL},
{SHF_ALLOC, SHF_WRITE | ARCH_SHF_SMALL},
{SHF_WRITE | SHF_ALLOC, ARCH_SHF_SMALL},
{ARCH_SHF_SMALL | SHF_ALLOC, 0}
};
unsigned int m, i;
for (i = 0; i < hdr->e_shnum; i++)
sechdrs[i].sh_entsize = ~0UL;
for (m = 0; m < ARRAY_SIZE(masks); ++m) {
for (i = 0; i < hdr->e_shnum; ++i) {
Elf_Shdr *s = &sechdrs[i];
struct module_memory *mod_mem;
mod_mem = &mod->mem[MOD_TEXT];
if ((s->sh_flags & masks[m][0]) != masks[m][0]
|| (s->sh_flags & masks[m][1])
|| s->sh_entsize != ~0UL)
continue;
s->sh_entsize =
get_offset((unsigned long *)&mod_mem->size, s);
}
}
}
/* from module-elf32.c, but subverted a little */
struct mips_hi16 {
struct mips_hi16 *next;
Elf32_Addr *addr;
Elf32_Addr value;
};
static struct mips_hi16 *mips_hi16_list;
static unsigned int gp_offs, gp_addr;
static int apply_r_mips_none(struct module *me, uint32_t *location,
Elf32_Addr v)
{
return 0;
}
static int apply_r_mips_gprel16(struct module *me, uint32_t *location,
Elf32_Addr v)
{
int rel;
if (!(*location & 0xffff)) {
rel = (int)v - gp_addr;
} else {
/* .sbss + gp(relative) + offset */
/* kludge! */
rel = (int)(short)((int)v + gp_offs +
(int)(short)(*location & 0xffff) - gp_addr);
}
if ((rel > 32768) || (rel < -32768)) {
pr_debug("VPE loader: apply_r_mips_gprel16: relative address 0x%x out of range of gp register\n",
rel);
return -ENOEXEC;
}
*location = (*location & 0xffff0000) | (rel & 0xffff);
return 0;
}
static int apply_r_mips_pc16(struct module *me, uint32_t *location,
Elf32_Addr v)
{
int rel;
rel = (((unsigned int)v - (unsigned int)location));
rel >>= 2; /* because the offset is in _instructions_ not bytes. */
rel -= 1; /* and one instruction less due to the branch delay slot. */
if ((rel > 32768) || (rel < -32768)) {
pr_debug("VPE loader: apply_r_mips_pc16: relative address out of range 0x%x\n",
rel);
return -ENOEXEC;
}
*location = (*location & 0xffff0000) | (rel & 0xffff);
return 0;
}
static int apply_r_mips_32(struct module *me, uint32_t *location,
Elf32_Addr v)
{
*location += v;
return 0;
}
static int apply_r_mips_26(struct module *me, uint32_t *location,
Elf32_Addr v)
{
if (v % 4) {
pr_debug("VPE loader: apply_r_mips_26: unaligned relocation\n");
return -ENOEXEC;
}
/*
* Not desperately convinced this is a good check of an overflow condition
* anyway. But it gets in the way of handling undefined weak symbols which
* we want to set to zero.
* if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) {
* printk(KERN_ERR
* "module %s: relocation overflow\n",
* me->name);
* return -ENOEXEC;
* }
*/
*location = (*location & ~0x03ffffff) |
((*location + (v >> 2)) & 0x03ffffff);
return 0;
}
static int apply_r_mips_hi16(struct module *me, uint32_t *location,
Elf32_Addr v)
{
struct mips_hi16 *n;
/*
* We cannot relocate this one now because we don't know the value of
* the carry we need to add. Save the information, and let LO16 do the
* actual relocation.
*/
n = kmalloc(sizeof(*n), GFP_KERNEL);
if (!n)
return -ENOMEM;
n->addr = location;
n->value = v;
n->next = mips_hi16_list;
mips_hi16_list = n;
return 0;
}
static int apply_r_mips_lo16(struct module *me, uint32_t *location,
Elf32_Addr v)
{
unsigned long insnlo = *location;
Elf32_Addr val, vallo;
struct mips_hi16 *l, *next;
/* Sign extend the addend we extract from the lo insn. */
vallo = ((insnlo & 0xffff) ^ 0x8000) - 0x8000;
if (mips_hi16_list != NULL) {
l = mips_hi16_list;
while (l != NULL) {
unsigned long insn;
/*
* The value for the HI16 had best be the same.
*/
if (v != l->value) {
pr_debug("VPE loader: apply_r_mips_lo16/hi16: inconsistent value information\n");
goto out_free;
}
/*
* Do the HI16 relocation. Note that we actually don't
* need to know anything about the LO16 itself, except
* where to find the low 16 bits of the addend needed
* by the LO16.
*/
insn = *l->addr;
val = ((insn & 0xffff) << 16) + vallo;
val += v;
/*
* Account for the sign extension that will happen in
* the low bits.
*/
val = ((val >> 16) + ((val & 0x8000) != 0)) & 0xffff;
insn = (insn & ~0xffff) | val;
*l->addr = insn;
next = l->next;
kfree(l);
l = next;
}
mips_hi16_list = NULL;
}
/*
* Ok, we're done with the HI16 relocs. Now deal with the LO16.
*/
val = v + vallo;
insnlo = (insnlo & ~0xffff) | (val & 0xffff);
*location = insnlo;
return 0;
out_free:
while (l != NULL) {
next = l->next;
kfree(l);
l = next;
}
mips_hi16_list = NULL;
return -ENOEXEC;
}
static int (*reloc_handlers[]) (struct module *me, uint32_t *location,
Elf32_Addr v) = {
[R_MIPS_NONE] = apply_r_mips_none,
[R_MIPS_32] = apply_r_mips_32,
[R_MIPS_26] = apply_r_mips_26,
[R_MIPS_HI16] = apply_r_mips_hi16,
[R_MIPS_LO16] = apply_r_mips_lo16,
[R_MIPS_GPREL16] = apply_r_mips_gprel16,
[R_MIPS_PC16] = apply_r_mips_pc16
};
static char *rstrs[] = {
[R_MIPS_NONE] = "MIPS_NONE",
[R_MIPS_32] = "MIPS_32",
[R_MIPS_26] = "MIPS_26",
[R_MIPS_HI16] = "MIPS_HI16",
[R_MIPS_LO16] = "MIPS_LO16",
[R_MIPS_GPREL16] = "MIPS_GPREL16",
[R_MIPS_PC16] = "MIPS_PC16"
};
static int apply_relocations(Elf32_Shdr *sechdrs,
const char *strtab,
unsigned int symindex,
unsigned int relsec,
struct module *me)
{
Elf32_Rel *rel = (void *) sechdrs[relsec].sh_addr;
Elf32_Sym *sym;
uint32_t *location;
unsigned int i;
Elf32_Addr v;
int res;
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
Elf32_Word r_info = rel[i].r_info;
/* This is where to make the change */
location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ rel[i].r_offset;
/* This is the symbol it is referring to */
sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
+ ELF32_R_SYM(r_info);
if (!sym->st_value) {
pr_debug("%s: undefined weak symbol %s\n",
me->name, strtab + sym->st_name);
/* just print the warning, dont barf */
}
v = sym->st_value;
res = reloc_handlers[ELF32_R_TYPE(r_info)](me, location, v);
if (res) {
char *r = rstrs[ELF32_R_TYPE(r_info)];
pr_warn("VPE loader: .text+0x%x relocation type %s for symbol \"%s\" failed\n",
rel[i].r_offset, r ? r : "UNKNOWN",
strtab + sym->st_name);
return res;
}
}
return 0;
}
static inline void save_gp_address(unsigned int secbase, unsigned int rel)
{
gp_addr = secbase + rel;
gp_offs = gp_addr - (secbase & 0xffff0000);
}
/* end module-elf32.c */
/* Change all symbols so that sh_value encodes the pointer directly. */
static void simplify_symbols(Elf_Shdr *sechdrs,
unsigned int symindex,
const char *strtab,
const char *secstrings,
unsigned int nsecs, struct module *mod)
{
Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr;
unsigned long secbase, bssbase = 0;
unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
int size;
/* find the .bss section for COMMON symbols */
for (i = 0; i < nsecs; i++) {
if (strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) == 0) {
bssbase = sechdrs[i].sh_addr;
break;
}
}
for (i = 1; i < n; i++) {
switch (sym[i].st_shndx) {
case SHN_COMMON:
/* Allocate space for the symbol in the .bss section.
st_value is currently size.
We want it to have the address of the symbol. */
size = sym[i].st_value;
sym[i].st_value = bssbase;
bssbase += size;
break;
case SHN_ABS:
/* Don't need to do anything */
break;
case SHN_UNDEF:
/* ret = -ENOENT; */
break;
case SHN_MIPS_SCOMMON:
pr_debug("simplify_symbols: ignoring SHN_MIPS_SCOMMON symbol <%s> st_shndx %d\n",
strtab + sym[i].st_name, sym[i].st_shndx);
/* .sbss section */
break;
default:
secbase = sechdrs[sym[i].st_shndx].sh_addr;
if (strncmp(strtab + sym[i].st_name, "_gp", 3) == 0)
save_gp_address(secbase, sym[i].st_value);
sym[i].st_value += secbase;
break;
}
}
}
#ifdef DEBUG_ELFLOADER
static void dump_elfsymbols(Elf_Shdr *sechdrs, unsigned int symindex,
const char *strtab, struct module *mod)
{
Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr;
unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
pr_debug("dump_elfsymbols: n %d\n", n);
for (i = 1; i < n; i++) {
pr_debug(" i %d name <%s> 0x%x\n", i, strtab + sym[i].st_name,
sym[i].st_value);
}
}
#endif
static int find_vpe_symbols(struct vpe *v, Elf_Shdr *sechdrs,
unsigned int symindex, const char *strtab,
struct module *mod)
{
Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr;
unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
for (i = 1; i < n; i++) {
if (strcmp(strtab + sym[i].st_name, "__start") == 0)
v->__start = sym[i].st_value;
if (strcmp(strtab + sym[i].st_name, "vpe_shared") == 0)
v->shared_ptr = (void *)sym[i].st_value;
}
if ((v->__start == 0) || (v->shared_ptr == NULL))
return -1;
return 0;
}
/*
* Allocates a VPE with some program code space(the load address), copies the
* contents of the program (p)buffer performing relocatations/etc, free's it
* when finished.
*/
static int vpe_elfload(struct vpe *v)
{
Elf_Ehdr *hdr;
Elf_Shdr *sechdrs;
long err = 0;
char *secstrings, *strtab = NULL;
unsigned int len, i, symindex = 0, strindex = 0, relocate = 0;
struct module mod; /* so we can re-use the relocations code */
memset(&mod, 0, sizeof(struct module));
strcpy(mod.name, "VPE loader");
hdr = (Elf_Ehdr *) v->pbuffer;
len = v->plen;
/* Sanity checks against insmoding binaries or wrong arch,
weird elf version */
if (memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0
|| (hdr->e_type != ET_REL && hdr->e_type != ET_EXEC)
|| !elf_check_arch(hdr)
|| hdr->e_shentsize != sizeof(*sechdrs)) {
pr_warn("VPE loader: program wrong arch or weird elf version\n");
return -ENOEXEC;
}
if (hdr->e_type == ET_REL)
relocate = 1;
if (len < hdr->e_shoff + hdr->e_shnum * sizeof(Elf_Shdr)) {
pr_err("VPE loader: program length %u truncated\n", len);
return -ENOEXEC;
}
/* Convenience variables */
sechdrs = (void *)hdr + hdr->e_shoff;
secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
sechdrs[0].sh_addr = 0;
/* And these should exist, but gcc whinges if we don't init them */
symindex = strindex = 0;
if (relocate) {
for (i = 1; i < hdr->e_shnum; i++) {
if ((sechdrs[i].sh_type != SHT_NOBITS) &&
(len < sechdrs[i].sh_offset + sechdrs[i].sh_size)) {
pr_err("VPE program length %u truncated\n",
len);
return -ENOEXEC;
}
/* Mark all sections sh_addr with their address in the
temporary image. */
sechdrs[i].sh_addr = (size_t) hdr +
sechdrs[i].sh_offset;
/* Internal symbols and strings. */
if (sechdrs[i].sh_type == SHT_SYMTAB) {
symindex = i;
strindex = sechdrs[i].sh_link;
strtab = (char *)hdr +
sechdrs[strindex].sh_offset;
}
}
layout_sections(&mod, hdr, sechdrs, secstrings);
}
v->load_addr = alloc_progmem(mod.mem[MOD_TEXT].size);
if (!v->load_addr)
return -ENOMEM;
pr_info("VPE loader: loading to %p\n", v->load_addr);
if (relocate) {
for (i = 0; i < hdr->e_shnum; i++) {
void *dest;
if (!(sechdrs[i].sh_flags & SHF_ALLOC))
continue;
dest = v->load_addr + sechdrs[i].sh_entsize;
if (sechdrs[i].sh_type != SHT_NOBITS)
memcpy(dest, (void *)sechdrs[i].sh_addr,
sechdrs[i].sh_size);
/* Update sh_addr to point to copy in image. */
sechdrs[i].sh_addr = (unsigned long)dest;
pr_debug(" section sh_name %s sh_addr 0x%x\n",
secstrings + sechdrs[i].sh_name,
sechdrs[i].sh_addr);
}
/* Fix up syms, so that st_value is a pointer to location. */
simplify_symbols(sechdrs, symindex, strtab, secstrings,
hdr->e_shnum, &mod);
/* Now do relocations. */
for (i = 1; i < hdr->e_shnum; i++) {
const char *strtab = (char *)sechdrs[strindex].sh_addr;
unsigned int info = sechdrs[i].sh_info;
/* Not a valid relocation section? */
if (info >= hdr->e_shnum)
continue;
/* Don't bother with non-allocated sections */
if (!(sechdrs[info].sh_flags & SHF_ALLOC))
continue;
if (sechdrs[i].sh_type == SHT_REL)
err = apply_relocations(sechdrs, strtab,
symindex, i, &mod);
else if (sechdrs[i].sh_type == SHT_RELA)
err = apply_relocate_add(sechdrs, strtab,
symindex, i, &mod);
if (err < 0)
return err;
}
} else {
struct elf_phdr *phdr = (struct elf_phdr *)
((char *)hdr + hdr->e_phoff);
for (i = 0; i < hdr->e_phnum; i++) {
if (phdr->p_type == PT_LOAD) {
memcpy((void *)phdr->p_paddr,
(char *)hdr + phdr->p_offset,
phdr->p_filesz);
memset((void *)phdr->p_paddr + phdr->p_filesz,
0, phdr->p_memsz - phdr->p_filesz);
}
phdr++;
}
for (i = 0; i < hdr->e_shnum; i++) {
/* Internal symbols and strings. */
if (sechdrs[i].sh_type == SHT_SYMTAB) {
symindex = i;
strindex = sechdrs[i].sh_link;
strtab = (char *)hdr +
sechdrs[strindex].sh_offset;
/*
* mark symtab's address for when we try
* to find the magic symbols
*/
sechdrs[i].sh_addr = (size_t) hdr +
sechdrs[i].sh_offset;
}
}
}
/* make sure it's physically written out */
flush_icache_range((unsigned long)v->load_addr,
(unsigned long)v->load_addr + v->len);
if ((find_vpe_symbols(v, sechdrs, symindex, strtab, &mod)) < 0) {
if (v->__start == 0) {
pr_warn("VPE loader: program does not contain a __start symbol\n");
return -ENOEXEC;
}
if (v->shared_ptr == NULL)
pr_warn("VPE loader: program does not contain vpe_shared symbol.\n"
" Unable to use AMVP (AP/SP) facilities.\n");
}
pr_info(" elf loaded\n");
return 0;
}
/* checks VPE is unused and gets ready to load program */
static int vpe_open(struct inode *inode, struct file *filp)
{
enum vpe_state state;
struct vpe_notifications *notifier;
struct vpe *v;
if (VPE_MODULE_MINOR != iminor(inode)) {
/* assume only 1 device at the moment. */
pr_warn("VPE loader: only vpe1 is supported\n");
return -ENODEV;
}
v = get_vpe(aprp_cpu_index());
if (v == NULL) {
pr_warn("VPE loader: unable to get vpe\n");
return -ENODEV;
}
state = xchg(&v->state, VPE_STATE_INUSE);
if (state != VPE_STATE_UNUSED) {
pr_debug("VPE loader: tc in use dumping regs\n");
list_for_each_entry(notifier, &v->notify, list)
notifier->stop(aprp_cpu_index());
release_progmem(v->load_addr);
cleanup_tc(get_tc(aprp_cpu_index()));
}
/* this of-course trashes what was there before... */
v->pbuffer = vmalloc(P_SIZE);
if (!v->pbuffer) {
pr_warn("VPE loader: unable to allocate memory\n");
return -ENOMEM;
}
v->plen = P_SIZE;
v->load_addr = NULL;
v->len = 0;
v->shared_ptr = NULL;
v->__start = 0;
return 0;
}
static int vpe_release(struct inode *inode, struct file *filp)
{
#ifdef CONFIG_MIPS_VPE_LOADER_MT
struct vpe *v;
Elf_Ehdr *hdr;
int ret = 0;
v = get_vpe(aprp_cpu_index());
if (v == NULL)
return -ENODEV;
hdr = (Elf_Ehdr *) v->pbuffer;
if (memcmp(hdr->e_ident, ELFMAG, SELFMAG) == 0) {
if (vpe_elfload(v) >= 0) {
vpe_run(v);
} else {
pr_warn("VPE loader: ELF load failed.\n");
ret = -ENOEXEC;
}
} else {
pr_warn("VPE loader: only elf files are supported\n");
ret = -ENOEXEC;
}
/* It's good to be able to run the SP and if it chokes have a look at
the /dev/rt?. But if we reset the pointer to the shared struct we
lose what has happened. So perhaps if garbage is sent to the vpe
device, use it as a trigger for the reset. Hopefully a nice
executable will be along shortly. */
if (ret < 0)
v->shared_ptr = NULL;
vfree(v->pbuffer);
v->plen = 0;
return ret;
#else
pr_warn("VPE loader: ELF load failed.\n");
return -ENOEXEC;
#endif
}
static ssize_t vpe_write(struct file *file, const char __user *buffer,
size_t count, loff_t *ppos)
{
size_t ret = count;
struct vpe *v;
if (iminor(file_inode(file)) != VPE_MODULE_MINOR)
return -ENODEV;
v = get_vpe(aprp_cpu_index());
if (v == NULL)
return -ENODEV;
if ((count + v->len) > v->plen) {
pr_warn("VPE loader: elf size too big. Perhaps strip unneeded symbols\n");
return -ENOMEM;
}
count -= copy_from_user(v->pbuffer + v->len, buffer, count);
if (!count)
return -EFAULT;
v->len += count;
return ret;
}
const struct file_operations vpe_fops = {
.owner = THIS_MODULE,
.open = vpe_open,
.release = vpe_release,
.write = vpe_write,
.llseek = noop_llseek,
};
void *vpe_get_shared(int index)
{
struct vpe *v = get_vpe(index);
if (v == NULL)
return NULL;
return v->shared_ptr;
}
EXPORT_SYMBOL(vpe_get_shared);
int vpe_notify(int index, struct vpe_notifications *notify)
{
struct vpe *v = get_vpe(index);
if (v == NULL)
return -1;
list_add(¬ify->list, &v->notify);
return 0;
}
EXPORT_SYMBOL(vpe_notify);
module_init(vpe_module_init);
module_exit(vpe_module_exit);
MODULE_DESCRIPTION("MIPS VPE Loader");
MODULE_AUTHOR("Elizabeth Oldham, MIPS Technologies, Inc.");
MODULE_LICENSE("GPL");
| linux-master | arch/mips/kernel/vpe.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Copyright (C) 2004, 05, 06 MIPS Technologies, Inc.
* Elizabeth Clarke ([email protected])
* Ralf Baechle ([email protected])
* Copyright (C) 2006 Ralf Baechle ([email protected])
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/cpumask.h>
#include <linux/interrupt.h>
#include <linux/compiler.h>
#include <linux/sched/task_stack.h>
#include <linux/smp.h>
#include <linux/atomic.h>
#include <asm/cacheflush.h>
#include <asm/cpu.h>
#include <asm/processor.h>
#include <asm/hardirq.h>
#include <asm/mmu_context.h>
#include <asm/time.h>
#include <asm/mipsregs.h>
#include <asm/mipsmtregs.h>
#include <asm/mips_mt.h>
#include <asm/mips-cps.h>
static void __init smvp_copy_vpe_config(void)
{
write_vpe_c0_status(
(read_c0_status() & ~(ST0_IM | ST0_IE | ST0_KSU)) | ST0_CU0);
/* set config to be the same as vpe0, particularly kseg0 coherency alg */
write_vpe_c0_config( read_c0_config());
/* make sure there are no software interrupts pending */
write_vpe_c0_cause(0);
/* Propagate Config7 */
write_vpe_c0_config7(read_c0_config7());
write_vpe_c0_count(read_c0_count());
}
static unsigned int __init smvp_vpe_init(unsigned int tc, unsigned int mvpconf0,
unsigned int ncpu)
{
if (tc >= smp_max_threads ||
(tc > ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT)))
return ncpu;
/* Deactivate all but VPE 0 */
if (tc != 0) {
unsigned long tmp = read_vpe_c0_vpeconf0();
tmp &= ~VPECONF0_VPA;
/* master VPE */
tmp |= VPECONF0_MVP;
write_vpe_c0_vpeconf0(tmp);
/* Record this as available CPU */
set_cpu_possible(tc, true);
set_cpu_present(tc, true);
__cpu_number_map[tc] = ++ncpu;
__cpu_logical_map[ncpu] = tc;
}
/* Disable multi-threading with TC's */
write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & ~VPECONTROL_TE);
if (tc != 0)
smvp_copy_vpe_config();
cpu_set_vpe_id(&cpu_data[ncpu], tc);
return ncpu;
}
static void __init smvp_tc_init(unsigned int tc, unsigned int mvpconf0)
{
unsigned long tmp;
if (!tc)
return;
/* bind a TC to each VPE, May as well put all excess TC's
on the last VPE */
if (tc >= (((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT)+1))
write_tc_c0_tcbind(read_tc_c0_tcbind() | ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT));
else {
write_tc_c0_tcbind(read_tc_c0_tcbind() | tc);
/* and set XTC */
write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | (tc << VPECONF0_XTC_SHIFT));
}
tmp = read_tc_c0_tcstatus();
/* mark not allocated and not dynamically allocatable */
tmp &= ~(TCSTATUS_A | TCSTATUS_DA);
tmp |= TCSTATUS_IXMT; /* interrupt exempt */
write_tc_c0_tcstatus(tmp);
write_tc_c0_tchalt(TCHALT_H);
}
static void vsmp_init_secondary(void)
{
/* This is Malta specific: IPI,performance and timer interrupts */
if (mips_gic_present())
change_c0_status(ST0_IM, STATUSF_IP2 | STATUSF_IP3 |
STATUSF_IP4 | STATUSF_IP5 |
STATUSF_IP6 | STATUSF_IP7);
else
change_c0_status(ST0_IM, STATUSF_IP0 | STATUSF_IP1 |
STATUSF_IP6 | STATUSF_IP7);
}
static void vsmp_smp_finish(void)
{
/* CDFIXME: remove this? */
write_c0_compare(read_c0_count() + (8* mips_hpt_frequency/HZ));
#ifdef CONFIG_MIPS_MT_FPAFF
/* If we have an FPU, enroll ourselves in the FPU-full mask */
if (cpu_has_fpu)
cpumask_set_cpu(smp_processor_id(), &mt_fpu_cpumask);
#endif /* CONFIG_MIPS_MT_FPAFF */
local_irq_enable();
}
/*
* Setup the PC, SP, and GP of a secondary processor and start it
* running!
* smp_bootstrap is the place to resume from
* __KSTK_TOS(idle) is apparently the stack pointer
* (unsigned long)idle->thread_info the gp
* assumes a 1:1 mapping of TC => VPE
*/
static int vsmp_boot_secondary(int cpu, struct task_struct *idle)
{
struct thread_info *gp = task_thread_info(idle);
dvpe();
set_c0_mvpcontrol(MVPCONTROL_VPC);
settc(cpu);
/* restart */
write_tc_c0_tcrestart((unsigned long)&smp_bootstrap);
/* enable the tc this vpe/cpu will be running */
write_tc_c0_tcstatus((read_tc_c0_tcstatus() & ~TCSTATUS_IXMT) | TCSTATUS_A);
write_tc_c0_tchalt(0);
/* enable the VPE */
write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA);
/* stack pointer */
write_tc_gpr_sp( __KSTK_TOS(idle));
/* global pointer */
write_tc_gpr_gp((unsigned long)gp);
flush_icache_range((unsigned long)gp,
(unsigned long)(gp + sizeof(struct thread_info)));
/* finally out of configuration and into chaos */
clear_c0_mvpcontrol(MVPCONTROL_VPC);
evpe(EVPE_ENABLE);
return 0;
}
/*
* Common setup before any secondaries are started
* Make sure all CPU's are in a sensible state before we boot any of the
* secondaries
*/
static void __init vsmp_smp_setup(void)
{
unsigned int mvpconf0, ntc, tc, ncpu = 0;
unsigned int nvpe;
#ifdef CONFIG_MIPS_MT_FPAFF
/* If we have an FPU, enroll ourselves in the FPU-full mask */
if (cpu_has_fpu)
cpumask_set_cpu(0, &mt_fpu_cpumask);
#endif /* CONFIG_MIPS_MT_FPAFF */
if (!cpu_has_mipsmt)
return;
/* disable MT so we can configure */
dvpe();
dmt();
/* Put MVPE's into 'configuration state' */
set_c0_mvpcontrol(MVPCONTROL_VPC);
mvpconf0 = read_c0_mvpconf0();
ntc = (mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT;
nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
smp_num_siblings = nvpe;
/* we'll always have more TC's than VPE's, so loop setting everything
to a sensible state */
for (tc = 0; tc <= ntc; tc++) {
settc(tc);
smvp_tc_init(tc, mvpconf0);
ncpu = smvp_vpe_init(tc, mvpconf0, ncpu);
}
/* Release config state */
clear_c0_mvpcontrol(MVPCONTROL_VPC);
/* We'll wait until starting the secondaries before starting MVPE */
printk(KERN_INFO "Detected %i available secondary CPU(s)\n", ncpu);
}
static void __init vsmp_prepare_cpus(unsigned int max_cpus)
{
mips_mt_set_cpuoptions();
}
const struct plat_smp_ops vsmp_smp_ops = {
.send_ipi_single = mips_smp_send_ipi_single,
.send_ipi_mask = mips_smp_send_ipi_mask,
.init_secondary = vsmp_init_secondary,
.smp_finish = vsmp_smp_finish,
.boot_secondary = vsmp_boot_secondary,
.smp_setup = vsmp_smp_setup,
.prepare_cpus = vsmp_prepare_cpus,
};
| linux-master | arch/mips/kernel/smp-mt.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2014 Imagination Technologies Ltd.
*
* CPU PM notifiers for saving/restoring general CPU state.
*/
#include <linux/cpu_pm.h>
#include <linux/init.h>
#include <asm/dsp.h>
#include <asm/fpu.h>
#include <asm/mmu_context.h>
#include <asm/pm.h>
#include <asm/watch.h>
/* Used by PM helper macros in asm/pm.h */
struct mips_static_suspend_state mips_static_suspend_state;
/**
* mips_cpu_save() - Save general CPU state.
* Ensures that general CPU context is saved, notably FPU and DSP.
*/
static int mips_cpu_save(void)
{
/* Save FPU state */
lose_fpu(1);
/* Save DSP state */
save_dsp(current);
return 0;
}
/**
* mips_cpu_restore() - Restore general CPU state.
* Restores important CPU context.
*/
static void mips_cpu_restore(void)
{
unsigned int cpu = smp_processor_id();
/* Restore ASID */
if (current->mm)
write_c0_entryhi(cpu_asid(cpu, current->mm));
/* Restore DSP state */
restore_dsp(current);
/* Restore UserLocal */
if (cpu_has_userlocal)
write_c0_userlocal(current_thread_info()->tp_value);
/* Restore watch registers */
__restore_watch(current);
}
/**
* mips_pm_notifier() - Notifier for preserving general CPU context.
* @self: Notifier block.
* @cmd: CPU PM event.
* @v: Private data (unused).
*
* This is called when a CPU power management event occurs, and is used to
* ensure that important CPU context is preserved across a CPU power down.
*/
static int mips_pm_notifier(struct notifier_block *self, unsigned long cmd,
void *v)
{
int ret;
switch (cmd) {
case CPU_PM_ENTER:
ret = mips_cpu_save();
if (ret)
return NOTIFY_STOP;
break;
case CPU_PM_ENTER_FAILED:
case CPU_PM_EXIT:
mips_cpu_restore();
break;
}
return NOTIFY_OK;
}
static struct notifier_block mips_pm_notifier_block = {
.notifier_call = mips_pm_notifier,
};
static int __init mips_pm_init(void)
{
return cpu_pm_register_notifier(&mips_pm_notifier_block);
}
arch_initcall(mips_pm_init);
| linux-master | arch/mips/kernel/pm.c |
/*
* Handle unaligned accesses by emulation.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1996, 1998, 1999, 2002 by Ralf Baechle
* Copyright (C) 1999 Silicon Graphics, Inc.
* Copyright (C) 2014 Imagination Technologies Ltd.
*
* This file contains exception handler for address error exception with the
* special capability to execute faulting instructions in software. The
* handler does not try to handle the case when the program counter points
* to an address not aligned to a word boundary.
*
* Putting data to unaligned addresses is a bad practice even on Intel where
* only the performance is affected. Much worse is that such code is non-
* portable. Due to several programs that die on MIPS due to alignment
* problems I decided to implement this handler anyway though I originally
* didn't intend to do this at all for user code.
*
* For now I enable fixing of address errors by default to make life easier.
* I however intend to disable this somewhen in the future when the alignment
* problems with user programs have been fixed. For programmers this is the
* right way to go.
*
* Fixing address errors is a per process option. The option is inherited
* across fork(2) and execve(2) calls. If you really want to use the
* option in your user programs - I discourage the use of the software
* emulation strongly - use the following code in your userland stuff:
*
* #include <sys/sysmips.h>
*
* ...
* sysmips(MIPS_FIXADE, x);
* ...
*
* The argument x is 0 for disabling software emulation, enabled otherwise.
*
* Below a little program to play around with this feature.
*
* #include <stdio.h>
* #include <sys/sysmips.h>
*
* struct foo {
* unsigned char bar[8];
* };
*
* main(int argc, char *argv[])
* {
* struct foo x = {0, 1, 2, 3, 4, 5, 6, 7};
* unsigned int *p = (unsigned int *) (x.bar + 3);
* int i;
*
* if (argc > 1)
* sysmips(MIPS_FIXADE, atoi(argv[1]));
*
* printf("*p = %08lx\n", *p);
*
* *p = 0xdeadface;
*
* for(i = 0; i <= 7; i++)
* printf("%02x ", x.bar[i]);
* printf("\n");
* }
*
* Coprocessor loads are not supported; I think this case is unimportant
* in the practice.
*
* TODO: Handle ndc (attempted store to doubleword in uncached memory)
* exception for the R6000.
* A store crossing a page boundary might be executed only partially.
* Undo the partial store in this case.
*/
#include <linux/context_tracking.h>
#include <linux/mm.h>
#include <linux/signal.h>
#include <linux/smp.h>
#include <linux/sched.h>
#include <linux/debugfs.h>
#include <linux/perf_event.h>
#include <asm/asm.h>
#include <asm/branch.h>
#include <asm/byteorder.h>
#include <asm/cop2.h>
#include <asm/debug.h>
#include <asm/fpu.h>
#include <asm/fpu_emulator.h>
#include <asm/inst.h>
#include <asm/unaligned-emul.h>
#include <asm/mmu_context.h>
#include <linux/uaccess.h>
#include "access-helper.h"
enum {
UNALIGNED_ACTION_QUIET,
UNALIGNED_ACTION_SIGNAL,
UNALIGNED_ACTION_SHOW,
};
#ifdef CONFIG_DEBUG_FS
static u32 unaligned_instructions;
static u32 unaligned_action;
#else
#define unaligned_action UNALIGNED_ACTION_QUIET
#endif
extern void show_registers(struct pt_regs *regs);
static void emulate_load_store_insn(struct pt_regs *regs,
void __user *addr, unsigned int *pc)
{
unsigned long origpc, orig31, value;
union mips_instruction insn;
unsigned int res;
bool user = user_mode(regs);
origpc = (unsigned long)pc;
orig31 = regs->regs[31];
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
/*
* This load never faults.
*/
__get_inst32(&insn.word, pc, user);
switch (insn.i_format.opcode) {
/*
* These are instructions that a compiler doesn't generate. We
* can assume therefore that the code is MIPS-aware and
* really buggy. Emulating these instructions would break the
* semantics anyway.
*/
case ll_op:
case lld_op:
case sc_op:
case scd_op:
/*
* For these instructions the only way to create an address
* error is an attempted access to kernel/supervisor address
* space.
*/
case ldl_op:
case ldr_op:
case lwl_op:
case lwr_op:
case sdl_op:
case sdr_op:
case swl_op:
case swr_op:
case lb_op:
case lbu_op:
case sb_op:
goto sigbus;
/*
* The remaining opcodes are the ones that are really of
* interest.
*/
#ifdef CONFIG_MACH_INGENIC
case spec2_op:
if (insn.mxu_lx_format.func != mxu_lx_op)
goto sigbus; /* other MXU instructions we don't care */
switch (insn.mxu_lx_format.op) {
case mxu_lxw_op:
if (user && !access_ok(addr, 4))
goto sigbus;
LoadW(addr, value, res);
if (res)
goto fault;
compute_return_epc(regs);
regs->regs[insn.mxu_lx_format.rd] = value;
break;
case mxu_lxh_op:
if (user && !access_ok(addr, 2))
goto sigbus;
LoadHW(addr, value, res);
if (res)
goto fault;
compute_return_epc(regs);
regs->regs[insn.dsp_format.rd] = value;
break;
case mxu_lxhu_op:
if (user && !access_ok(addr, 2))
goto sigbus;
LoadHWU(addr, value, res);
if (res)
goto fault;
compute_return_epc(regs);
regs->regs[insn.dsp_format.rd] = value;
break;
case mxu_lxb_op:
case mxu_lxbu_op:
goto sigbus;
default:
goto sigill;
}
break;
#endif
case spec3_op:
if (insn.dsp_format.func == lx_op) {
switch (insn.dsp_format.op) {
case lwx_op:
if (user && !access_ok(addr, 4))
goto sigbus;
LoadW(addr, value, res);
if (res)
goto fault;
compute_return_epc(regs);
regs->regs[insn.dsp_format.rd] = value;
break;
case lhx_op:
if (user && !access_ok(addr, 2))
goto sigbus;
LoadHW(addr, value, res);
if (res)
goto fault;
compute_return_epc(regs);
regs->regs[insn.dsp_format.rd] = value;
break;
default:
goto sigill;
}
}
#ifdef CONFIG_EVA
else {
/*
* we can land here only from kernel accessing user
* memory, so we need to "switch" the address limit to
* user space, so that address check can work properly.
*/
switch (insn.spec3_format.func) {
case lhe_op:
if (!access_ok(addr, 2))
goto sigbus;
LoadHWE(addr, value, res);
if (res)
goto fault;
compute_return_epc(regs);
regs->regs[insn.spec3_format.rt] = value;
break;
case lwe_op:
if (!access_ok(addr, 4))
goto sigbus;
LoadWE(addr, value, res);
if (res)
goto fault;
compute_return_epc(regs);
regs->regs[insn.spec3_format.rt] = value;
break;
case lhue_op:
if (!access_ok(addr, 2))
goto sigbus;
LoadHWUE(addr, value, res);
if (res)
goto fault;
compute_return_epc(regs);
regs->regs[insn.spec3_format.rt] = value;
break;
case she_op:
if (!access_ok(addr, 2))
goto sigbus;
compute_return_epc(regs);
value = regs->regs[insn.spec3_format.rt];
StoreHWE(addr, value, res);
if (res)
goto fault;
break;
case swe_op:
if (!access_ok(addr, 4))
goto sigbus;
compute_return_epc(regs);
value = regs->regs[insn.spec3_format.rt];
StoreWE(addr, value, res);
if (res)
goto fault;
break;
default:
goto sigill;
}
}
#endif
break;
case lh_op:
if (user && !access_ok(addr, 2))
goto sigbus;
if (IS_ENABLED(CONFIG_EVA) && user)
LoadHWE(addr, value, res);
else
LoadHW(addr, value, res);
if (res)
goto fault;
compute_return_epc(regs);
regs->regs[insn.i_format.rt] = value;
break;
case lw_op:
if (user && !access_ok(addr, 4))
goto sigbus;
if (IS_ENABLED(CONFIG_EVA) && user)
LoadWE(addr, value, res);
else
LoadW(addr, value, res);
if (res)
goto fault;
compute_return_epc(regs);
regs->regs[insn.i_format.rt] = value;
break;
case lhu_op:
if (user && !access_ok(addr, 2))
goto sigbus;
if (IS_ENABLED(CONFIG_EVA) && user)
LoadHWUE(addr, value, res);
else
LoadHWU(addr, value, res);
if (res)
goto fault;
compute_return_epc(regs);
regs->regs[insn.i_format.rt] = value;
break;
case lwu_op:
#ifdef CONFIG_64BIT
/*
* A 32-bit kernel might be running on a 64-bit processor. But
* if we're on a 32-bit processor and an i-cache incoherency
* or race makes us see a 64-bit instruction here the sdl/sdr
* would blow up, so for now we don't handle unaligned 64-bit
* instructions on 32-bit kernels.
*/
if (user && !access_ok(addr, 4))
goto sigbus;
LoadWU(addr, value, res);
if (res)
goto fault;
compute_return_epc(regs);
regs->regs[insn.i_format.rt] = value;
break;
#endif /* CONFIG_64BIT */
/* Cannot handle 64-bit instructions in 32-bit kernel */
goto sigill;
case ld_op:
#ifdef CONFIG_64BIT
/*
* A 32-bit kernel might be running on a 64-bit processor. But
* if we're on a 32-bit processor and an i-cache incoherency
* or race makes us see a 64-bit instruction here the sdl/sdr
* would blow up, so for now we don't handle unaligned 64-bit
* instructions on 32-bit kernels.
*/
if (user && !access_ok(addr, 8))
goto sigbus;
LoadDW(addr, value, res);
if (res)
goto fault;
compute_return_epc(regs);
regs->regs[insn.i_format.rt] = value;
break;
#endif /* CONFIG_64BIT */
/* Cannot handle 64-bit instructions in 32-bit kernel */
goto sigill;
case sh_op:
if (user && !access_ok(addr, 2))
goto sigbus;
compute_return_epc(regs);
value = regs->regs[insn.i_format.rt];
if (IS_ENABLED(CONFIG_EVA) && user)
StoreHWE(addr, value, res);
else
StoreHW(addr, value, res);
if (res)
goto fault;
break;
case sw_op:
if (user && !access_ok(addr, 4))
goto sigbus;
compute_return_epc(regs);
value = regs->regs[insn.i_format.rt];
if (IS_ENABLED(CONFIG_EVA) && user)
StoreWE(addr, value, res);
else
StoreW(addr, value, res);
if (res)
goto fault;
break;
case sd_op:
#ifdef CONFIG_64BIT
/*
* A 32-bit kernel might be running on a 64-bit processor. But
* if we're on a 32-bit processor and an i-cache incoherency
* or race makes us see a 64-bit instruction here the sdl/sdr
* would blow up, so for now we don't handle unaligned 64-bit
* instructions on 32-bit kernels.
*/
if (user && !access_ok(addr, 8))
goto sigbus;
compute_return_epc(regs);
value = regs->regs[insn.i_format.rt];
StoreDW(addr, value, res);
if (res)
goto fault;
break;
#endif /* CONFIG_64BIT */
/* Cannot handle 64-bit instructions in 32-bit kernel */
goto sigill;
#ifdef CONFIG_MIPS_FP_SUPPORT
case lwc1_op:
case ldc1_op:
case swc1_op:
case sdc1_op:
case cop1x_op: {
void __user *fault_addr = NULL;
die_if_kernel("Unaligned FP access in kernel code", regs);
BUG_ON(!used_math());
res = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1,
&fault_addr);
own_fpu(1); /* Restore FPU state. */
/* Signal if something went wrong. */
process_fpemu_return(res, fault_addr, 0);
if (res == 0)
break;
return;
}
#endif /* CONFIG_MIPS_FP_SUPPORT */
#ifdef CONFIG_CPU_HAS_MSA
case msa_op: {
unsigned int wd, preempted;
enum msa_2b_fmt df;
union fpureg *fpr;
if (!cpu_has_msa)
goto sigill;
/*
* If we've reached this point then userland should have taken
* the MSA disabled exception & initialised vector context at
* some point in the past.
*/
BUG_ON(!thread_msa_context_live());
df = insn.msa_mi10_format.df;
wd = insn.msa_mi10_format.wd;
fpr = ¤t->thread.fpu.fpr[wd];
switch (insn.msa_mi10_format.func) {
case msa_ld_op:
if (!access_ok(addr, sizeof(*fpr)))
goto sigbus;
do {
/*
* If we have live MSA context keep track of
* whether we get preempted in order to avoid
* the register context we load being clobbered
* by the live context as it's saved during
* preemption. If we don't have live context
* then it can't be saved to clobber the value
* we load.
*/
preempted = test_thread_flag(TIF_USEDMSA);
res = __copy_from_user_inatomic(fpr, addr,
sizeof(*fpr));
if (res)
goto fault;
/*
* Update the hardware register if it is in use
* by the task in this quantum, in order to
* avoid having to save & restore the whole
* vector context.
*/
preempt_disable();
if (test_thread_flag(TIF_USEDMSA)) {
write_msa_wr(wd, fpr, df);
preempted = 0;
}
preempt_enable();
} while (preempted);
break;
case msa_st_op:
if (!access_ok(addr, sizeof(*fpr)))
goto sigbus;
/*
* Update from the hardware register if it is in use by
* the task in this quantum, in order to avoid having to
* save & restore the whole vector context.
*/
preempt_disable();
if (test_thread_flag(TIF_USEDMSA))
read_msa_wr(wd, fpr, df);
preempt_enable();
res = __copy_to_user_inatomic(addr, fpr, sizeof(*fpr));
if (res)
goto fault;
break;
default:
goto sigbus;
}
compute_return_epc(regs);
break;
}
#endif /* CONFIG_CPU_HAS_MSA */
#ifndef CONFIG_CPU_MIPSR6
/*
* COP2 is available to implementor for application specific use.
* It's up to applications to register a notifier chain and do
* whatever they have to do, including possible sending of signals.
*
* This instruction has been reallocated in Release 6
*/
case lwc2_op:
cu2_notifier_call_chain(CU2_LWC2_OP, regs);
break;
case ldc2_op:
cu2_notifier_call_chain(CU2_LDC2_OP, regs);
break;
case swc2_op:
cu2_notifier_call_chain(CU2_SWC2_OP, regs);
break;
case sdc2_op:
cu2_notifier_call_chain(CU2_SDC2_OP, regs);
break;
#endif
default:
/*
* Pheeee... We encountered an yet unknown instruction or
* cache coherence problem. Die sucker, die ...
*/
goto sigill;
}
#ifdef CONFIG_DEBUG_FS
unaligned_instructions++;
#endif
return;
fault:
/* roll back jump/branch */
regs->cp0_epc = origpc;
regs->regs[31] = orig31;
/* Did we have an exception handler installed? */
if (fixup_exception(regs))
return;
die_if_kernel("Unhandled kernel unaligned access", regs);
force_sig(SIGSEGV);
return;
sigbus:
die_if_kernel("Unhandled kernel unaligned access", regs);
force_sig(SIGBUS);
return;
sigill:
die_if_kernel
("Unhandled kernel unaligned access or invalid instruction", regs);
force_sig(SIGILL);
}
/* Recode table from 16-bit register notation to 32-bit GPR. */
const int reg16to32[] = { 16, 17, 2, 3, 4, 5, 6, 7 };
/* Recode table from 16-bit STORE register notation to 32-bit GPR. */
static const int reg16to32st[] = { 0, 17, 2, 3, 4, 5, 6, 7 };
static void emulate_load_store_microMIPS(struct pt_regs *regs,
void __user *addr)
{
unsigned long value;
unsigned int res;
int i;
unsigned int reg = 0, rvar;
unsigned long orig31;
u16 __user *pc16;
u16 halfword;
unsigned int word;
unsigned long origpc, contpc;
union mips_instruction insn;
struct mm_decoded_insn mminsn;
bool user = user_mode(regs);
origpc = regs->cp0_epc;
orig31 = regs->regs[31];
mminsn.micro_mips_mode = 1;
/*
* This load never faults.
*/
pc16 = (unsigned short __user *)msk_isa16_mode(regs->cp0_epc);
__get_user(halfword, pc16);
pc16++;
contpc = regs->cp0_epc + 2;
word = ((unsigned int)halfword << 16);
mminsn.pc_inc = 2;
if (!mm_insn_16bit(halfword)) {
__get_user(halfword, pc16);
pc16++;
contpc = regs->cp0_epc + 4;
mminsn.pc_inc = 4;
word |= halfword;
}
mminsn.insn = word;
if (get_user(halfword, pc16))
goto fault;
mminsn.next_pc_inc = 2;
word = ((unsigned int)halfword << 16);
if (!mm_insn_16bit(halfword)) {
pc16++;
if (get_user(halfword, pc16))
goto fault;
mminsn.next_pc_inc = 4;
word |= halfword;
}
mminsn.next_insn = word;
insn = (union mips_instruction)(mminsn.insn);
if (mm_isBranchInstr(regs, mminsn, &contpc))
insn = (union mips_instruction)(mminsn.next_insn);
/* Parse instruction to find what to do */
switch (insn.mm_i_format.opcode) {
case mm_pool32a_op:
switch (insn.mm_x_format.func) {
case mm_lwxs_op:
reg = insn.mm_x_format.rd;
goto loadW;
}
goto sigbus;
case mm_pool32b_op:
switch (insn.mm_m_format.func) {
case mm_lwp_func:
reg = insn.mm_m_format.rd;
if (reg == 31)
goto sigbus;
if (user && !access_ok(addr, 8))
goto sigbus;
LoadW(addr, value, res);
if (res)
goto fault;
regs->regs[reg] = value;
addr += 4;
LoadW(addr, value, res);
if (res)
goto fault;
regs->regs[reg + 1] = value;
goto success;
case mm_swp_func:
reg = insn.mm_m_format.rd;
if (reg == 31)
goto sigbus;
if (user && !access_ok(addr, 8))
goto sigbus;
value = regs->regs[reg];
StoreW(addr, value, res);
if (res)
goto fault;
addr += 4;
value = regs->regs[reg + 1];
StoreW(addr, value, res);
if (res)
goto fault;
goto success;
case mm_ldp_func:
#ifdef CONFIG_64BIT
reg = insn.mm_m_format.rd;
if (reg == 31)
goto sigbus;
if (user && !access_ok(addr, 16))
goto sigbus;
LoadDW(addr, value, res);
if (res)
goto fault;
regs->regs[reg] = value;
addr += 8;
LoadDW(addr, value, res);
if (res)
goto fault;
regs->regs[reg + 1] = value;
goto success;
#endif /* CONFIG_64BIT */
goto sigill;
case mm_sdp_func:
#ifdef CONFIG_64BIT
reg = insn.mm_m_format.rd;
if (reg == 31)
goto sigbus;
if (user && !access_ok(addr, 16))
goto sigbus;
value = regs->regs[reg];
StoreDW(addr, value, res);
if (res)
goto fault;
addr += 8;
value = regs->regs[reg + 1];
StoreDW(addr, value, res);
if (res)
goto fault;
goto success;
#endif /* CONFIG_64BIT */
goto sigill;
case mm_lwm32_func:
reg = insn.mm_m_format.rd;
rvar = reg & 0xf;
if ((rvar > 9) || !reg)
goto sigill;
if (reg & 0x10) {
if (user && !access_ok(addr, 4 * (rvar + 1)))
goto sigbus;
} else {
if (user && !access_ok(addr, 4 * rvar))
goto sigbus;
}
if (rvar == 9)
rvar = 8;
for (i = 16; rvar; rvar--, i++) {
LoadW(addr, value, res);
if (res)
goto fault;
addr += 4;
regs->regs[i] = value;
}
if ((reg & 0xf) == 9) {
LoadW(addr, value, res);
if (res)
goto fault;
addr += 4;
regs->regs[30] = value;
}
if (reg & 0x10) {
LoadW(addr, value, res);
if (res)
goto fault;
regs->regs[31] = value;
}
goto success;
case mm_swm32_func:
reg = insn.mm_m_format.rd;
rvar = reg & 0xf;
if ((rvar > 9) || !reg)
goto sigill;
if (reg & 0x10) {
if (user && !access_ok(addr, 4 * (rvar + 1)))
goto sigbus;
} else {
if (user && !access_ok(addr, 4 * rvar))
goto sigbus;
}
if (rvar == 9)
rvar = 8;
for (i = 16; rvar; rvar--, i++) {
value = regs->regs[i];
StoreW(addr, value, res);
if (res)
goto fault;
addr += 4;
}
if ((reg & 0xf) == 9) {
value = regs->regs[30];
StoreW(addr, value, res);
if (res)
goto fault;
addr += 4;
}
if (reg & 0x10) {
value = regs->regs[31];
StoreW(addr, value, res);
if (res)
goto fault;
}
goto success;
case mm_ldm_func:
#ifdef CONFIG_64BIT
reg = insn.mm_m_format.rd;
rvar = reg & 0xf;
if ((rvar > 9) || !reg)
goto sigill;
if (reg & 0x10) {
if (user && !access_ok(addr, 8 * (rvar + 1)))
goto sigbus;
} else {
if (user && !access_ok(addr, 8 * rvar))
goto sigbus;
}
if (rvar == 9)
rvar = 8;
for (i = 16; rvar; rvar--, i++) {
LoadDW(addr, value, res);
if (res)
goto fault;
addr += 4;
regs->regs[i] = value;
}
if ((reg & 0xf) == 9) {
LoadDW(addr, value, res);
if (res)
goto fault;
addr += 8;
regs->regs[30] = value;
}
if (reg & 0x10) {
LoadDW(addr, value, res);
if (res)
goto fault;
regs->regs[31] = value;
}
goto success;
#endif /* CONFIG_64BIT */
goto sigill;
case mm_sdm_func:
#ifdef CONFIG_64BIT
reg = insn.mm_m_format.rd;
rvar = reg & 0xf;
if ((rvar > 9) || !reg)
goto sigill;
if (reg & 0x10) {
if (user && !access_ok(addr, 8 * (rvar + 1)))
goto sigbus;
} else {
if (user && !access_ok(addr, 8 * rvar))
goto sigbus;
}
if (rvar == 9)
rvar = 8;
for (i = 16; rvar; rvar--, i++) {
value = regs->regs[i];
StoreDW(addr, value, res);
if (res)
goto fault;
addr += 8;
}
if ((reg & 0xf) == 9) {
value = regs->regs[30];
StoreDW(addr, value, res);
if (res)
goto fault;
addr += 8;
}
if (reg & 0x10) {
value = regs->regs[31];
StoreDW(addr, value, res);
if (res)
goto fault;
}
goto success;
#endif /* CONFIG_64BIT */
goto sigill;
/* LWC2, SWC2, LDC2, SDC2 are not serviced */
}
goto sigbus;
case mm_pool32c_op:
switch (insn.mm_m_format.func) {
case mm_lwu_func:
reg = insn.mm_m_format.rd;
goto loadWU;
}
/* LL,SC,LLD,SCD are not serviced */
goto sigbus;
#ifdef CONFIG_MIPS_FP_SUPPORT
case mm_pool32f_op:
switch (insn.mm_x_format.func) {
case mm_lwxc1_func:
case mm_swxc1_func:
case mm_ldxc1_func:
case mm_sdxc1_func:
goto fpu_emul;
}
goto sigbus;
case mm_ldc132_op:
case mm_sdc132_op:
case mm_lwc132_op:
case mm_swc132_op: {
void __user *fault_addr = NULL;
fpu_emul:
/* roll back jump/branch */
regs->cp0_epc = origpc;
regs->regs[31] = orig31;
die_if_kernel("Unaligned FP access in kernel code", regs);
BUG_ON(!used_math());
BUG_ON(!is_fpu_owner());
res = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1,
&fault_addr);
own_fpu(1); /* restore FPU state */
/* If something went wrong, signal */
process_fpemu_return(res, fault_addr, 0);
if (res == 0)
goto success;
return;
}
#endif /* CONFIG_MIPS_FP_SUPPORT */
case mm_lh32_op:
reg = insn.mm_i_format.rt;
goto loadHW;
case mm_lhu32_op:
reg = insn.mm_i_format.rt;
goto loadHWU;
case mm_lw32_op:
reg = insn.mm_i_format.rt;
goto loadW;
case mm_sh32_op:
reg = insn.mm_i_format.rt;
goto storeHW;
case mm_sw32_op:
reg = insn.mm_i_format.rt;
goto storeW;
case mm_ld32_op:
reg = insn.mm_i_format.rt;
goto loadDW;
case mm_sd32_op:
reg = insn.mm_i_format.rt;
goto storeDW;
case mm_pool16c_op:
switch (insn.mm16_m_format.func) {
case mm_lwm16_op:
reg = insn.mm16_m_format.rlist;
rvar = reg + 1;
if (user && !access_ok(addr, 4 * rvar))
goto sigbus;
for (i = 16; rvar; rvar--, i++) {
LoadW(addr, value, res);
if (res)
goto fault;
addr += 4;
regs->regs[i] = value;
}
LoadW(addr, value, res);
if (res)
goto fault;
regs->regs[31] = value;
goto success;
case mm_swm16_op:
reg = insn.mm16_m_format.rlist;
rvar = reg + 1;
if (user && !access_ok(addr, 4 * rvar))
goto sigbus;
for (i = 16; rvar; rvar--, i++) {
value = regs->regs[i];
StoreW(addr, value, res);
if (res)
goto fault;
addr += 4;
}
value = regs->regs[31];
StoreW(addr, value, res);
if (res)
goto fault;
goto success;
}
goto sigbus;
case mm_lhu16_op:
reg = reg16to32[insn.mm16_rb_format.rt];
goto loadHWU;
case mm_lw16_op:
reg = reg16to32[insn.mm16_rb_format.rt];
goto loadW;
case mm_sh16_op:
reg = reg16to32st[insn.mm16_rb_format.rt];
goto storeHW;
case mm_sw16_op:
reg = reg16to32st[insn.mm16_rb_format.rt];
goto storeW;
case mm_lwsp16_op:
reg = insn.mm16_r5_format.rt;
goto loadW;
case mm_swsp16_op:
reg = insn.mm16_r5_format.rt;
goto storeW;
case mm_lwgp16_op:
reg = reg16to32[insn.mm16_r3_format.rt];
goto loadW;
default:
goto sigill;
}
loadHW:
if (user && !access_ok(addr, 2))
goto sigbus;
LoadHW(addr, value, res);
if (res)
goto fault;
regs->regs[reg] = value;
goto success;
loadHWU:
if (user && !access_ok(addr, 2))
goto sigbus;
LoadHWU(addr, value, res);
if (res)
goto fault;
regs->regs[reg] = value;
goto success;
loadW:
if (user && !access_ok(addr, 4))
goto sigbus;
LoadW(addr, value, res);
if (res)
goto fault;
regs->regs[reg] = value;
goto success;
loadWU:
#ifdef CONFIG_64BIT
/*
* A 32-bit kernel might be running on a 64-bit processor. But
* if we're on a 32-bit processor and an i-cache incoherency
* or race makes us see a 64-bit instruction here the sdl/sdr
* would blow up, so for now we don't handle unaligned 64-bit
* instructions on 32-bit kernels.
*/
if (user && !access_ok(addr, 4))
goto sigbus;
LoadWU(addr, value, res);
if (res)
goto fault;
regs->regs[reg] = value;
goto success;
#endif /* CONFIG_64BIT */
/* Cannot handle 64-bit instructions in 32-bit kernel */
goto sigill;
loadDW:
#ifdef CONFIG_64BIT
/*
* A 32-bit kernel might be running on a 64-bit processor. But
* if we're on a 32-bit processor and an i-cache incoherency
* or race makes us see a 64-bit instruction here the sdl/sdr
* would blow up, so for now we don't handle unaligned 64-bit
* instructions on 32-bit kernels.
*/
if (user && !access_ok(addr, 8))
goto sigbus;
LoadDW(addr, value, res);
if (res)
goto fault;
regs->regs[reg] = value;
goto success;
#endif /* CONFIG_64BIT */
/* Cannot handle 64-bit instructions in 32-bit kernel */
goto sigill;
storeHW:
if (user && !access_ok(addr, 2))
goto sigbus;
value = regs->regs[reg];
StoreHW(addr, value, res);
if (res)
goto fault;
goto success;
storeW:
if (user && !access_ok(addr, 4))
goto sigbus;
value = regs->regs[reg];
StoreW(addr, value, res);
if (res)
goto fault;
goto success;
storeDW:
#ifdef CONFIG_64BIT
/*
* A 32-bit kernel might be running on a 64-bit processor. But
* if we're on a 32-bit processor and an i-cache incoherency
* or race makes us see a 64-bit instruction here the sdl/sdr
* would blow up, so for now we don't handle unaligned 64-bit
* instructions on 32-bit kernels.
*/
if (user && !access_ok(addr, 8))
goto sigbus;
value = regs->regs[reg];
StoreDW(addr, value, res);
if (res)
goto fault;
goto success;
#endif /* CONFIG_64BIT */
/* Cannot handle 64-bit instructions in 32-bit kernel */
goto sigill;
success:
regs->cp0_epc = contpc; /* advance or branch */
#ifdef CONFIG_DEBUG_FS
unaligned_instructions++;
#endif
return;
fault:
/* roll back jump/branch */
regs->cp0_epc = origpc;
regs->regs[31] = orig31;
/* Did we have an exception handler installed? */
if (fixup_exception(regs))
return;
die_if_kernel("Unhandled kernel unaligned access", regs);
force_sig(SIGSEGV);
return;
sigbus:
die_if_kernel("Unhandled kernel unaligned access", regs);
force_sig(SIGBUS);
return;
sigill:
die_if_kernel
("Unhandled kernel unaligned access or invalid instruction", regs);
force_sig(SIGILL);
}
static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr)
{
unsigned long value;
unsigned int res;
int reg;
unsigned long orig31;
u16 __user *pc16;
unsigned long origpc;
union mips16e_instruction mips16inst, oldinst;
unsigned int opcode;
int extended = 0;
bool user = user_mode(regs);
origpc = regs->cp0_epc;
orig31 = regs->regs[31];
pc16 = (unsigned short __user *)msk_isa16_mode(origpc);
/*
* This load never faults.
*/
__get_user(mips16inst.full, pc16);
oldinst = mips16inst;
/* skip EXTEND instruction */
if (mips16inst.ri.opcode == MIPS16e_extend_op) {
extended = 1;
pc16++;
__get_user(mips16inst.full, pc16);
} else if (delay_slot(regs)) {
/* skip jump instructions */
/* JAL/JALX are 32 bits but have OPCODE in first short int */
if (mips16inst.ri.opcode == MIPS16e_jal_op)
pc16++;
pc16++;
if (get_user(mips16inst.full, pc16))
goto sigbus;
}
opcode = mips16inst.ri.opcode;
switch (opcode) {
case MIPS16e_i64_op: /* I64 or RI64 instruction */
switch (mips16inst.i64.func) { /* I64/RI64 func field check */
case MIPS16e_ldpc_func:
case MIPS16e_ldsp_func:
reg = reg16to32[mips16inst.ri64.ry];
goto loadDW;
case MIPS16e_sdsp_func:
reg = reg16to32[mips16inst.ri64.ry];
goto writeDW;
case MIPS16e_sdrasp_func:
reg = 29; /* GPRSP */
goto writeDW;
}
goto sigbus;
case MIPS16e_swsp_op:
reg = reg16to32[mips16inst.ri.rx];
if (extended && cpu_has_mips16e2)
switch (mips16inst.ri.imm >> 5) {
case 0: /* SWSP */
case 1: /* SWGP */
break;
case 2: /* SHGP */
opcode = MIPS16e_sh_op;
break;
default:
goto sigbus;
}
break;
case MIPS16e_lwpc_op:
reg = reg16to32[mips16inst.ri.rx];
break;
case MIPS16e_lwsp_op:
reg = reg16to32[mips16inst.ri.rx];
if (extended && cpu_has_mips16e2)
switch (mips16inst.ri.imm >> 5) {
case 0: /* LWSP */
case 1: /* LWGP */
break;
case 2: /* LHGP */
opcode = MIPS16e_lh_op;
break;
case 4: /* LHUGP */
opcode = MIPS16e_lhu_op;
break;
default:
goto sigbus;
}
break;
case MIPS16e_i8_op:
if (mips16inst.i8.func != MIPS16e_swrasp_func)
goto sigbus;
reg = 29; /* GPRSP */
break;
default:
reg = reg16to32[mips16inst.rri.ry];
break;
}
switch (opcode) {
case MIPS16e_lb_op:
case MIPS16e_lbu_op:
case MIPS16e_sb_op:
goto sigbus;
case MIPS16e_lh_op:
if (user && !access_ok(addr, 2))
goto sigbus;
LoadHW(addr, value, res);
if (res)
goto fault;
MIPS16e_compute_return_epc(regs, &oldinst);
regs->regs[reg] = value;
break;
case MIPS16e_lhu_op:
if (user && !access_ok(addr, 2))
goto sigbus;
LoadHWU(addr, value, res);
if (res)
goto fault;
MIPS16e_compute_return_epc(regs, &oldinst);
regs->regs[reg] = value;
break;
case MIPS16e_lw_op:
case MIPS16e_lwpc_op:
case MIPS16e_lwsp_op:
if (user && !access_ok(addr, 4))
goto sigbus;
LoadW(addr, value, res);
if (res)
goto fault;
MIPS16e_compute_return_epc(regs, &oldinst);
regs->regs[reg] = value;
break;
case MIPS16e_lwu_op:
#ifdef CONFIG_64BIT
/*
* A 32-bit kernel might be running on a 64-bit processor. But
* if we're on a 32-bit processor and an i-cache incoherency
* or race makes us see a 64-bit instruction here the sdl/sdr
* would blow up, so for now we don't handle unaligned 64-bit
* instructions on 32-bit kernels.
*/
if (user && !access_ok(addr, 4))
goto sigbus;
LoadWU(addr, value, res);
if (res)
goto fault;
MIPS16e_compute_return_epc(regs, &oldinst);
regs->regs[reg] = value;
break;
#endif /* CONFIG_64BIT */
/* Cannot handle 64-bit instructions in 32-bit kernel */
goto sigill;
case MIPS16e_ld_op:
loadDW:
#ifdef CONFIG_64BIT
/*
* A 32-bit kernel might be running on a 64-bit processor. But
* if we're on a 32-bit processor and an i-cache incoherency
* or race makes us see a 64-bit instruction here the sdl/sdr
* would blow up, so for now we don't handle unaligned 64-bit
* instructions on 32-bit kernels.
*/
if (user && !access_ok(addr, 8))
goto sigbus;
LoadDW(addr, value, res);
if (res)
goto fault;
MIPS16e_compute_return_epc(regs, &oldinst);
regs->regs[reg] = value;
break;
#endif /* CONFIG_64BIT */
/* Cannot handle 64-bit instructions in 32-bit kernel */
goto sigill;
case MIPS16e_sh_op:
if (user && !access_ok(addr, 2))
goto sigbus;
MIPS16e_compute_return_epc(regs, &oldinst);
value = regs->regs[reg];
StoreHW(addr, value, res);
if (res)
goto fault;
break;
case MIPS16e_sw_op:
case MIPS16e_swsp_op:
case MIPS16e_i8_op: /* actually - MIPS16e_swrasp_func */
if (user && !access_ok(addr, 4))
goto sigbus;
MIPS16e_compute_return_epc(regs, &oldinst);
value = regs->regs[reg];
StoreW(addr, value, res);
if (res)
goto fault;
break;
case MIPS16e_sd_op:
writeDW:
#ifdef CONFIG_64BIT
/*
* A 32-bit kernel might be running on a 64-bit processor. But
* if we're on a 32-bit processor and an i-cache incoherency
* or race makes us see a 64-bit instruction here the sdl/sdr
* would blow up, so for now we don't handle unaligned 64-bit
* instructions on 32-bit kernels.
*/
if (user && !access_ok(addr, 8))
goto sigbus;
MIPS16e_compute_return_epc(regs, &oldinst);
value = regs->regs[reg];
StoreDW(addr, value, res);
if (res)
goto fault;
break;
#endif /* CONFIG_64BIT */
/* Cannot handle 64-bit instructions in 32-bit kernel */
goto sigill;
default:
/*
* Pheeee... We encountered an yet unknown instruction or
* cache coherence problem. Die sucker, die ...
*/
goto sigill;
}
#ifdef CONFIG_DEBUG_FS
unaligned_instructions++;
#endif
return;
fault:
/* roll back jump/branch */
regs->cp0_epc = origpc;
regs->regs[31] = orig31;
/* Did we have an exception handler installed? */
if (fixup_exception(regs))
return;
die_if_kernel("Unhandled kernel unaligned access", regs);
force_sig(SIGSEGV);
return;
sigbus:
die_if_kernel("Unhandled kernel unaligned access", regs);
force_sig(SIGBUS);
return;
sigill:
die_if_kernel
("Unhandled kernel unaligned access or invalid instruction", regs);
force_sig(SIGILL);
}
asmlinkage void do_ade(struct pt_regs *regs)
{
enum ctx_state prev_state;
unsigned int *pc;
prev_state = exception_enter();
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS,
1, regs, regs->cp0_badvaddr);
#ifdef CONFIG_64BIT
/*
* check, if we are hitting space between CPU implemented maximum
* virtual user address and 64bit maximum virtual user address
* and do exception handling to get EFAULTs for get_user/put_user
*/
if ((regs->cp0_badvaddr >= (1UL << cpu_vmbits)) &&
(regs->cp0_badvaddr < XKSSEG)) {
if (fixup_exception(regs)) {
current->thread.cp0_baduaddr = regs->cp0_badvaddr;
return;
}
goto sigbus;
}
#endif
/*
* Did we catch a fault trying to load an instruction?
*/
if (regs->cp0_badvaddr == regs->cp0_epc)
goto sigbus;
if (user_mode(regs) && !test_thread_flag(TIF_FIXADE))
goto sigbus;
if (unaligned_action == UNALIGNED_ACTION_SIGNAL)
goto sigbus;
/*
* Do branch emulation only if we didn't forward the exception.
* This is all so but ugly ...
*/
/*
* Are we running in microMIPS mode?
*/
if (get_isa16_mode(regs->cp0_epc)) {
/*
* Did we catch a fault trying to load an instruction in
* 16-bit mode?
*/
if (regs->cp0_badvaddr == msk_isa16_mode(regs->cp0_epc))
goto sigbus;
if (unaligned_action == UNALIGNED_ACTION_SHOW)
show_registers(regs);
if (cpu_has_mmips) {
emulate_load_store_microMIPS(regs,
(void __user *)regs->cp0_badvaddr);
return;
}
if (cpu_has_mips16) {
emulate_load_store_MIPS16e(regs,
(void __user *)regs->cp0_badvaddr);
return;
}
goto sigbus;
}
if (unaligned_action == UNALIGNED_ACTION_SHOW)
show_registers(regs);
pc = (unsigned int *)exception_epc(regs);
emulate_load_store_insn(regs, (void __user *)regs->cp0_badvaddr, pc);
return;
sigbus:
die_if_kernel("Kernel unaligned instruction access", regs);
force_sig(SIGBUS);
/*
* XXX On return from the signal handler we should advance the epc
*/
exception_exit(prev_state);
}
#ifdef CONFIG_DEBUG_FS
static int __init debugfs_unaligned(void)
{
debugfs_create_u32("unaligned_instructions", S_IRUGO, mips_debugfs_dir,
&unaligned_instructions);
debugfs_create_u32("unaligned_action", S_IRUGO | S_IWUSR,
mips_debugfs_dir, &unaligned_action);
return 0;
}
arch_initcall(debugfs_unaligned);
#endif
| linux-master | arch/mips/kernel/unaligned.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* GT641xx clockevent routines.
*
* Copyright (C) 2007 Yoichi Yuasa <[email protected]>
*/
#include <linux/clockchips.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/irq.h>
#include <asm/gt64120.h>
#include <asm/time.h>
static DEFINE_RAW_SPINLOCK(gt641xx_timer_lock);
static unsigned int gt641xx_base_clock;
void gt641xx_set_base_clock(unsigned int clock)
{
gt641xx_base_clock = clock;
}
int gt641xx_timer0_state(void)
{
if (GT_READ(GT_TC0_OFS))
return 0;
GT_WRITE(GT_TC0_OFS, gt641xx_base_clock / HZ);
GT_WRITE(GT_TC_CONTROL_OFS, GT_TC_CONTROL_ENTC0_MSK);
return 1;
}
static int gt641xx_timer0_set_next_event(unsigned long delta,
struct clock_event_device *evt)
{
u32 ctrl;
raw_spin_lock(>641xx_timer_lock);
ctrl = GT_READ(GT_TC_CONTROL_OFS);
ctrl &= ~(GT_TC_CONTROL_ENTC0_MSK | GT_TC_CONTROL_SELTC0_MSK);
ctrl |= GT_TC_CONTROL_ENTC0_MSK;
GT_WRITE(GT_TC0_OFS, delta);
GT_WRITE(GT_TC_CONTROL_OFS, ctrl);
raw_spin_unlock(>641xx_timer_lock);
return 0;
}
static int gt641xx_timer0_shutdown(struct clock_event_device *evt)
{
u32 ctrl;
raw_spin_lock(>641xx_timer_lock);
ctrl = GT_READ(GT_TC_CONTROL_OFS);
ctrl &= ~(GT_TC_CONTROL_ENTC0_MSK | GT_TC_CONTROL_SELTC0_MSK);
GT_WRITE(GT_TC_CONTROL_OFS, ctrl);
raw_spin_unlock(>641xx_timer_lock);
return 0;
}
static int gt641xx_timer0_set_oneshot(struct clock_event_device *evt)
{
u32 ctrl;
raw_spin_lock(>641xx_timer_lock);
ctrl = GT_READ(GT_TC_CONTROL_OFS);
ctrl &= ~GT_TC_CONTROL_SELTC0_MSK;
ctrl |= GT_TC_CONTROL_ENTC0_MSK;
GT_WRITE(GT_TC_CONTROL_OFS, ctrl);
raw_spin_unlock(>641xx_timer_lock);
return 0;
}
static int gt641xx_timer0_set_periodic(struct clock_event_device *evt)
{
u32 ctrl;
raw_spin_lock(>641xx_timer_lock);
ctrl = GT_READ(GT_TC_CONTROL_OFS);
ctrl |= GT_TC_CONTROL_ENTC0_MSK | GT_TC_CONTROL_SELTC0_MSK;
GT_WRITE(GT_TC_CONTROL_OFS, ctrl);
raw_spin_unlock(>641xx_timer_lock);
return 0;
}
static void gt641xx_timer0_event_handler(struct clock_event_device *dev)
{
}
static struct clock_event_device gt641xx_timer0_clockevent = {
.name = "gt641xx-timer0",
.features = CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_ONESHOT,
.irq = GT641XX_TIMER0_IRQ,
.set_next_event = gt641xx_timer0_set_next_event,
.set_state_shutdown = gt641xx_timer0_shutdown,
.set_state_periodic = gt641xx_timer0_set_periodic,
.set_state_oneshot = gt641xx_timer0_set_oneshot,
.tick_resume = gt641xx_timer0_shutdown,
.event_handler = gt641xx_timer0_event_handler,
};
static irqreturn_t gt641xx_timer0_interrupt(int irq, void *dev_id)
{
struct clock_event_device *cd = >641xx_timer0_clockevent;
cd->event_handler(cd);
return IRQ_HANDLED;
}
static int __init gt641xx_timer0_clockevent_init(void)
{
struct clock_event_device *cd;
if (!gt641xx_base_clock)
return 0;
GT_WRITE(GT_TC0_OFS, gt641xx_base_clock / HZ);
cd = >641xx_timer0_clockevent;
cd->rating = 200 + gt641xx_base_clock / 10000000;
clockevent_set_clock(cd, gt641xx_base_clock);
cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
cd->max_delta_ticks = 0x7fffffff;
cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
cd->min_delta_ticks = 0x300;
cd->cpumask = cpumask_of(0);
clockevents_register_device(>641xx_timer0_clockevent);
return request_irq(GT641XX_TIMER0_IRQ, gt641xx_timer0_interrupt,
IRQF_PERCPU | IRQF_TIMER, "gt641xx_timer0", NULL);
}
arch_initcall(gt641xx_timer0_clockevent_init);
| linux-master | arch/mips/kernel/cevt-gt641xx.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2006, 07 by Ralf Baechle ([email protected])
*
* Symmetric Uniprocessor (TM) Support
*/
#include <linux/kernel.h>
#include <linux/sched.h>
/*
* Send inter-processor interrupt
*/
static void up_send_ipi_single(int cpu, unsigned int action)
{
panic(KERN_ERR "%s called", __func__);
}
static inline void up_send_ipi_mask(const struct cpumask *mask,
unsigned int action)
{
panic(KERN_ERR "%s called", __func__);
}
/*
* After we've done initial boot, this function is called to allow the
* board code to clean up state, if needed
*/
static void up_init_secondary(void)
{
}
static void up_smp_finish(void)
{
}
/*
* Firmware CPU startup hook
*/
static int up_boot_secondary(int cpu, struct task_struct *idle)
{
return 0;
}
static void __init up_smp_setup(void)
{
}
static void __init up_prepare_cpus(unsigned int max_cpus)
{
}
#ifdef CONFIG_HOTPLUG_CPU
static int up_cpu_disable(void)
{
return -ENOSYS;
}
static void up_cpu_die(unsigned int cpu)
{
BUG();
}
#endif
const struct plat_smp_ops up_smp_ops = {
.send_ipi_single = up_send_ipi_single,
.send_ipi_mask = up_send_ipi_mask,
.init_secondary = up_init_secondary,
.smp_finish = up_smp_finish,
.boot_secondary = up_boot_secondary,
.smp_setup = up_smp_setup,
.prepare_cpus = up_prepare_cpus,
#ifdef CONFIG_HOTPLUG_CPU
.cpu_disable = up_cpu_disable,
.cpu_die = up_cpu_die,
#endif
};
| linux-master | arch/mips/kernel/smp-up.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2001 MontaVista Software Inc.
* Author: Jun Sun, [email protected] or [email protected]
* Copyright (c) 2003, 2004 Maciej W. Rozycki
*
* Common time service routines for MIPS machines.
*/
#include <linux/bug.h>
#include <linux/clockchips.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/param.h>
#include <linux/time.h>
#include <linux/timex.h>
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/export.h>
#include <linux/cpufreq.h>
#include <linux/delay.h>
#include <asm/cpu-features.h>
#include <asm/cpu-type.h>
#include <asm/div64.h>
#include <asm/time.h>
#ifdef CONFIG_CPU_FREQ
static DEFINE_PER_CPU(unsigned long, pcp_lpj_ref);
static DEFINE_PER_CPU(unsigned long, pcp_lpj_ref_freq);
static unsigned long glb_lpj_ref;
static unsigned long glb_lpj_ref_freq;
static int cpufreq_callback(struct notifier_block *nb,
unsigned long val, void *data)
{
struct cpufreq_freqs *freq = data;
struct cpumask *cpus = freq->policy->cpus;
unsigned long lpj;
int cpu;
/*
* Skip lpj numbers adjustment if the CPU-freq transition is safe for
* the loops delay. (Is this possible?)
*/
if (freq->flags & CPUFREQ_CONST_LOOPS)
return NOTIFY_OK;
/* Save the initial values of the lpjes for future scaling. */
if (!glb_lpj_ref) {
glb_lpj_ref = boot_cpu_data.udelay_val;
glb_lpj_ref_freq = freq->old;
for_each_online_cpu(cpu) {
per_cpu(pcp_lpj_ref, cpu) =
cpu_data[cpu].udelay_val;
per_cpu(pcp_lpj_ref_freq, cpu) = freq->old;
}
}
/*
* Adjust global lpj variable and per-CPU udelay_val number in
* accordance with the new CPU frequency.
*/
if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
(val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) {
loops_per_jiffy = cpufreq_scale(glb_lpj_ref,
glb_lpj_ref_freq,
freq->new);
for_each_cpu(cpu, cpus) {
lpj = cpufreq_scale(per_cpu(pcp_lpj_ref, cpu),
per_cpu(pcp_lpj_ref_freq, cpu),
freq->new);
cpu_data[cpu].udelay_val = (unsigned int)lpj;
}
}
return NOTIFY_OK;
}
static struct notifier_block cpufreq_notifier = {
.notifier_call = cpufreq_callback,
};
static int __init register_cpufreq_notifier(void)
{
return cpufreq_register_notifier(&cpufreq_notifier,
CPUFREQ_TRANSITION_NOTIFIER);
}
core_initcall(register_cpufreq_notifier);
#endif /* CONFIG_CPU_FREQ */
/*
* forward reference
*/
DEFINE_SPINLOCK(rtc_lock);
EXPORT_SYMBOL(rtc_lock);
static int null_perf_irq(void)
{
return 0;
}
int (*perf_irq)(void) = null_perf_irq;
EXPORT_SYMBOL(perf_irq);
/*
* time_init() - it does the following things.
*
* 1) plat_time_init() -
* a) (optional) set up RTC routines,
* b) (optional) calibrate and set the mips_hpt_frequency
* (only needed if you intended to use cpu counter as timer interrupt
* source)
* 2) calculate a couple of cached variables for later usage
*/
unsigned int mips_hpt_frequency;
EXPORT_SYMBOL_GPL(mips_hpt_frequency);
static __init int cpu_has_mfc0_count_bug(void)
{
switch (current_cpu_type()) {
case CPU_R4000PC:
case CPU_R4000SC:
case CPU_R4000MC:
/*
* V3.0 is documented as suffering from the mfc0 from count bug.
* Afaik this is the last version of the R4000. Later versions
* were marketed as R4400.
*/
return 1;
case CPU_R4400PC:
case CPU_R4400SC:
case CPU_R4400MC:
/*
* The published errata for the R4400 up to 3.0 say the CPU
* has the mfc0 from count bug. This seems the last version
* produced.
*/
return 1;
}
return 0;
}
void __init time_init(void)
{
plat_time_init();
/*
* The use of the R4k timer as a clock event takes precedence;
* if reading the Count register might interfere with the timer
* interrupt, then we don't use the timer as a clock source.
* We may still use the timer as a clock source though if the
* timer interrupt isn't reliable; the interference doesn't
* matter then, because we don't use the interrupt.
*/
if (mips_clockevent_init() != 0 || !cpu_has_mfc0_count_bug())
init_mips_clocksource();
}
| linux-master | arch/mips/kernel/time.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* machine_kexec.c for kexec
* Created by <[email protected]> on Thu Oct 12 15:15:06 2006
*/
#include <linux/compiler.h>
#include <linux/kexec.h>
#include <linux/mm.h>
#include <linux/delay.h>
#include <linux/libfdt.h>
#include <asm/cacheflush.h>
#include <asm/page.h>
extern const unsigned char relocate_new_kernel[];
extern const size_t relocate_new_kernel_size;
extern unsigned long kexec_start_address;
extern unsigned long kexec_indirection_page;
static unsigned long reboot_code_buffer;
#ifdef CONFIG_SMP
static void (*relocated_kexec_smp_wait)(void *);
atomic_t kexec_ready_to_reboot = ATOMIC_INIT(0);
void (*_crash_smp_send_stop)(void) = NULL;
#endif
void (*_machine_kexec_shutdown)(void) = NULL;
void (*_machine_crash_shutdown)(struct pt_regs *regs) = NULL;
static void kexec_image_info(const struct kimage *kimage)
{
unsigned long i;
pr_debug("kexec kimage info:\n");
pr_debug(" type: %d\n", kimage->type);
pr_debug(" start: %lx\n", kimage->start);
pr_debug(" head: %lx\n", kimage->head);
pr_debug(" nr_segments: %lu\n", kimage->nr_segments);
for (i = 0; i < kimage->nr_segments; i++) {
pr_debug(" segment[%lu]: %016lx - %016lx, 0x%lx bytes, %lu pages\n",
i,
kimage->segment[i].mem,
kimage->segment[i].mem + kimage->segment[i].memsz,
(unsigned long)kimage->segment[i].memsz,
(unsigned long)kimage->segment[i].memsz / PAGE_SIZE);
}
}
#ifdef CONFIG_UHI_BOOT
static int uhi_machine_kexec_prepare(struct kimage *kimage)
{
int i;
/*
* In case DTB file is not passed to the new kernel, a flat device
* tree will be created by kexec tool. It holds modified command
* line for the new kernel.
*/
for (i = 0; i < kimage->nr_segments; i++) {
struct fdt_header fdt;
if (kimage->segment[i].memsz <= sizeof(fdt))
continue;
if (copy_from_user(&fdt, kimage->segment[i].buf, sizeof(fdt)))
continue;
if (fdt_check_header(&fdt))
continue;
kexec_args[0] = -2;
kexec_args[1] = (unsigned long)
phys_to_virt((unsigned long)kimage->segment[i].mem);
break;
}
return 0;
}
int (*_machine_kexec_prepare)(struct kimage *) = uhi_machine_kexec_prepare;
#else
int (*_machine_kexec_prepare)(struct kimage *) = NULL;
#endif /* CONFIG_UHI_BOOT */
int
machine_kexec_prepare(struct kimage *kimage)
{
#ifdef CONFIG_SMP
if (!kexec_nonboot_cpu_func())
return -EINVAL;
#endif
kexec_image_info(kimage);
if (_machine_kexec_prepare)
return _machine_kexec_prepare(kimage);
return 0;
}
void
machine_kexec_cleanup(struct kimage *kimage)
{
}
#ifdef CONFIG_SMP
static void kexec_shutdown_secondary(void *param)
{
int cpu = smp_processor_id();
if (!cpu_online(cpu))
return;
/* We won't be sent IPIs any more. */
set_cpu_online(cpu, false);
local_irq_disable();
while (!atomic_read(&kexec_ready_to_reboot))
cpu_relax();
kexec_reboot();
/* NOTREACHED */
}
#endif
void
machine_shutdown(void)
{
if (_machine_kexec_shutdown)
_machine_kexec_shutdown();
#ifdef CONFIG_SMP
smp_call_function(kexec_shutdown_secondary, NULL, 0);
while (num_online_cpus() > 1) {
cpu_relax();
mdelay(1);
}
#endif
}
void
machine_crash_shutdown(struct pt_regs *regs)
{
if (_machine_crash_shutdown)
_machine_crash_shutdown(regs);
else
default_machine_crash_shutdown(regs);
}
#ifdef CONFIG_SMP
void kexec_nonboot_cpu_jump(void)
{
local_flush_icache_range((unsigned long)relocated_kexec_smp_wait,
reboot_code_buffer + relocate_new_kernel_size);
relocated_kexec_smp_wait(NULL);
}
#endif
void kexec_reboot(void)
{
void (*do_kexec)(void) __noreturn;
/*
* We know we were online, and there will be no incoming IPIs at
* this point. Mark online again before rebooting so that the crash
* analysis tool will see us correctly.
*/
set_cpu_online(smp_processor_id(), true);
/* Ensure remote CPUs observe that we're online before rebooting. */
smp_mb__after_atomic();
#ifdef CONFIG_SMP
if (smp_processor_id() > 0) {
/*
* Instead of cpu_relax() or wait, this is needed for kexec
* smp reboot. Kdump usually doesn't require an smp new
* kernel, but kexec may do.
*/
kexec_nonboot_cpu();
/* NOTREACHED */
}
#endif
/*
* Make sure we get correct instructions written by the
* machine_kexec() CPU.
*/
local_flush_icache_range(reboot_code_buffer,
reboot_code_buffer + relocate_new_kernel_size);
do_kexec = (void *)reboot_code_buffer;
do_kexec();
}
void
machine_kexec(struct kimage *image)
{
unsigned long entry;
unsigned long *ptr;
reboot_code_buffer =
(unsigned long)page_address(image->control_code_page);
kexec_start_address =
(unsigned long) phys_to_virt(image->start);
if (image->type == KEXEC_TYPE_DEFAULT) {
kexec_indirection_page =
(unsigned long) phys_to_virt(image->head & PAGE_MASK);
} else {
kexec_indirection_page = (unsigned long)&image->head;
}
memcpy((void*)reboot_code_buffer, relocate_new_kernel,
relocate_new_kernel_size);
/*
* The generic kexec code builds a page list with physical
* addresses. they are directly accessible through KSEG0 (or
* CKSEG0 or XPHYS if on 64bit system), hence the
* phys_to_virt() call.
*/
for (ptr = &image->head; (entry = *ptr) && !(entry &IND_DONE);
ptr = (entry & IND_INDIRECTION) ?
phys_to_virt(entry & PAGE_MASK) : ptr + 1) {
if (*ptr & IND_SOURCE || *ptr & IND_INDIRECTION ||
*ptr & IND_DESTINATION)
*ptr = (unsigned long) phys_to_virt(*ptr);
}
/* Mark offline BEFORE disabling local irq. */
set_cpu_online(smp_processor_id(), false);
/*
* we do not want to be bothered.
*/
local_irq_disable();
printk("Will call new kernel at %08lx\n", image->start);
printk("Bye ...\n");
/* Make reboot code buffer available to the boot CPU. */
__flush_cache_all();
#ifdef CONFIG_SMP
/* All secondary cpus now may jump to kexec_wait cycle */
relocated_kexec_smp_wait = reboot_code_buffer +
(void *)(kexec_smp_wait - relocate_new_kernel);
smp_wmb();
atomic_set(&kexec_ready_to_reboot, 1);
#endif
kexec_reboot();
}
| linux-master | arch/mips/kernel/machine_kexec.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Linux performance counter support for MIPS.
*
* Copyright (C) 2010 MIPS Technologies, Inc.
* Author: Deng-Cheng Zhu
*
* This code is based on the implementation for ARM, which is in turn
* based on the sparc64 perf event code and the x86 code. Performance
* counter access is based on the MIPS Oprofile code. And the callchain
* support references the code of MIPS stacktrace.c.
*/
#include <linux/perf_event.h>
#include <linux/sched/task_stack.h>
#include <asm/stacktrace.h>
/* Callchain handling code. */
/*
* Leave userspace callchain empty for now. When we find a way to trace
* the user stack callchains, we will add it here.
*/
static void save_raw_perf_callchain(struct perf_callchain_entry_ctx *entry,
unsigned long reg29)
{
unsigned long *sp = (unsigned long *)reg29;
unsigned long addr;
while (!kstack_end(sp)) {
addr = *sp++;
if (__kernel_text_address(addr)) {
perf_callchain_store(entry, addr);
if (entry->nr >= entry->max_stack)
break;
}
}
}
void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs)
{
unsigned long sp = regs->regs[29];
#ifdef CONFIG_KALLSYMS
unsigned long ra = regs->regs[31];
unsigned long pc = regs->cp0_epc;
if (raw_show_trace || !__kernel_text_address(pc)) {
unsigned long stack_page =
(unsigned long)task_stack_page(current);
if (stack_page && sp >= stack_page &&
sp <= stack_page + THREAD_SIZE - 32)
save_raw_perf_callchain(entry, sp);
return;
}
do {
perf_callchain_store(entry, pc);
if (entry->nr >= entry->max_stack)
break;
pc = unwind_stack(current, &sp, pc, &ra);
} while (pc);
#else
save_raw_perf_callchain(entry, sp);
#endif
}
| linux-master | arch/mips/kernel/perf_event.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2011 by Kevin Cernekee ([email protected])
*
* SMP support for BMIPS
*/
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/sched/hotplug.h>
#include <linux/sched/task_stack.h>
#include <linux/mm.h>
#include <linux/delay.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/reboot.h>
#include <linux/io.h>
#include <linux/compiler.h>
#include <linux/linkage.h>
#include <linux/bug.h>
#include <linux/kernel.h>
#include <linux/kexec.h>
#include <linux/irq.h>
#include <asm/time.h>
#include <asm/processor.h>
#include <asm/bootinfo.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <asm/mipsregs.h>
#include <asm/bmips.h>
#include <asm/traps.h>
#include <asm/barrier.h>
#include <asm/cpu-features.h>
static int __maybe_unused max_cpus = 1;
/* these may be configured by the platform code */
int bmips_smp_enabled = 1;
int bmips_cpu_offset;
cpumask_t bmips_booted_mask;
unsigned long bmips_tp1_irqs = IE_IRQ1;
#define RESET_FROM_KSEG0 0x80080800
#define RESET_FROM_KSEG1 0xa0080800
static void bmips_set_reset_vec(int cpu, u32 val);
#ifdef CONFIG_SMP
#include <asm/smp.h>
/* initial $sp, $gp - used by arch/mips/kernel/bmips_vec.S */
unsigned long bmips_smp_boot_sp;
unsigned long bmips_smp_boot_gp;
static void bmips43xx_send_ipi_single(int cpu, unsigned int action);
static void bmips5000_send_ipi_single(int cpu, unsigned int action);
static irqreturn_t bmips43xx_ipi_interrupt(int irq, void *dev_id);
static irqreturn_t bmips5000_ipi_interrupt(int irq, void *dev_id);
/* SW interrupts 0,1 are used for interprocessor signaling */
#define IPI0_IRQ (MIPS_CPU_IRQ_BASE + 0)
#define IPI1_IRQ (MIPS_CPU_IRQ_BASE + 1)
#define CPUNUM(cpu, shift) (((cpu) + bmips_cpu_offset) << (shift))
#define ACTION_CLR_IPI(cpu, ipi) (0x2000 | CPUNUM(cpu, 9) | ((ipi) << 8))
#define ACTION_SET_IPI(cpu, ipi) (0x3000 | CPUNUM(cpu, 9) | ((ipi) << 8))
#define ACTION_BOOT_THREAD(cpu) (0x08 | CPUNUM(cpu, 0))
static void __init bmips_smp_setup(void)
{
int i, cpu = 1, boot_cpu = 0;
int cpu_hw_intr;
switch (current_cpu_type()) {
case CPU_BMIPS4350:
case CPU_BMIPS4380:
/* arbitration priority */
clear_c0_brcm_cmt_ctrl(0x30);
/* NBK and weak order flags */
set_c0_brcm_config_0(0x30000);
/* Find out if we are running on TP0 or TP1 */
boot_cpu = !!(read_c0_brcm_cmt_local() & (1 << 31));
/*
* MIPS interrupts 0,1 (SW INT 0,1) cross over to the other
* thread
* MIPS interrupt 2 (HW INT 0) is the CPU0 L1 controller output
* MIPS interrupt 3 (HW INT 1) is the CPU1 L1 controller output
*/
if (boot_cpu == 0)
cpu_hw_intr = 0x02;
else
cpu_hw_intr = 0x1d;
change_c0_brcm_cmt_intr(0xf8018000,
(cpu_hw_intr << 27) | (0x03 << 15));
/* single core, 2 threads (2 pipelines) */
max_cpus = 2;
break;
case CPU_BMIPS5000:
/* enable raceless SW interrupts */
set_c0_brcm_config(0x03 << 22);
/* route HW interrupt 0 to CPU0, HW interrupt 1 to CPU1 */
change_c0_brcm_mode(0x1f << 27, 0x02 << 27);
/* N cores, 2 threads per core */
max_cpus = (((read_c0_brcm_config() >> 6) & 0x03) + 1) << 1;
/* clear any pending SW interrupts */
for (i = 0; i < max_cpus; i++) {
write_c0_brcm_action(ACTION_CLR_IPI(i, 0));
write_c0_brcm_action(ACTION_CLR_IPI(i, 1));
}
break;
default:
max_cpus = 1;
}
if (!bmips_smp_enabled)
max_cpus = 1;
/* this can be overridden by the BSP */
if (!board_ebase_setup)
board_ebase_setup = &bmips_ebase_setup;
if (max_cpus > 1) {
__cpu_number_map[boot_cpu] = 0;
__cpu_logical_map[0] = boot_cpu;
for (i = 0; i < max_cpus; i++) {
if (i != boot_cpu) {
__cpu_number_map[i] = cpu;
__cpu_logical_map[cpu] = i;
cpu++;
}
set_cpu_possible(i, 1);
set_cpu_present(i, 1);
}
} else {
__cpu_number_map[0] = boot_cpu;
__cpu_logical_map[0] = 0;
set_cpu_possible(0, 1);
set_cpu_present(0, 1);
}
}
/*
* IPI IRQ setup - runs on CPU0
*/
static void bmips_prepare_cpus(unsigned int max_cpus)
{
irqreturn_t (*bmips_ipi_interrupt)(int irq, void *dev_id);
switch (current_cpu_type()) {
case CPU_BMIPS4350:
case CPU_BMIPS4380:
bmips_ipi_interrupt = bmips43xx_ipi_interrupt;
break;
case CPU_BMIPS5000:
bmips_ipi_interrupt = bmips5000_ipi_interrupt;
break;
default:
return;
}
if (request_irq(IPI0_IRQ, bmips_ipi_interrupt,
IRQF_PERCPU | IRQF_NO_SUSPEND, "smp_ipi0", NULL))
panic("Can't request IPI0 interrupt");
if (request_irq(IPI1_IRQ, bmips_ipi_interrupt,
IRQF_PERCPU | IRQF_NO_SUSPEND, "smp_ipi1", NULL))
panic("Can't request IPI1 interrupt");
}
/*
* Tell the hardware to boot CPUx - runs on CPU0
*/
static int bmips_boot_secondary(int cpu, struct task_struct *idle)
{
bmips_smp_boot_sp = __KSTK_TOS(idle);
bmips_smp_boot_gp = (unsigned long)task_thread_info(idle);
mb();
/*
* Initial boot sequence for secondary CPU:
* bmips_reset_nmi_vec @ a000_0000 ->
* bmips_smp_entry ->
* plat_wired_tlb_setup (cached function call; optional) ->
* start_secondary (cached jump)
*
* Warm restart sequence:
* play_dead WAIT loop ->
* bmips_smp_int_vec @ BMIPS_WARM_RESTART_VEC ->
* eret to play_dead ->
* bmips_secondary_reentry ->
* start_secondary
*/
pr_info("SMP: Booting CPU%d...\n", cpu);
if (cpumask_test_cpu(cpu, &bmips_booted_mask)) {
/* kseg1 might not exist if this CPU enabled XKS01 */
bmips_set_reset_vec(cpu, RESET_FROM_KSEG0);
switch (current_cpu_type()) {
case CPU_BMIPS4350:
case CPU_BMIPS4380:
bmips43xx_send_ipi_single(cpu, 0);
break;
case CPU_BMIPS5000:
bmips5000_send_ipi_single(cpu, 0);
break;
}
} else {
bmips_set_reset_vec(cpu, RESET_FROM_KSEG1);
switch (current_cpu_type()) {
case CPU_BMIPS4350:
case CPU_BMIPS4380:
/* Reset slave TP1 if booting from TP0 */
if (cpu_logical_map(cpu) == 1)
set_c0_brcm_cmt_ctrl(0x01);
break;
case CPU_BMIPS5000:
write_c0_brcm_action(ACTION_BOOT_THREAD(cpu));
break;
}
cpumask_set_cpu(cpu, &bmips_booted_mask);
}
return 0;
}
/*
* Early setup - runs on secondary CPU after cache probe
*/
static void bmips_init_secondary(void)
{
bmips_cpu_setup();
switch (current_cpu_type()) {
case CPU_BMIPS4350:
case CPU_BMIPS4380:
clear_c0_cause(smp_processor_id() ? C_SW1 : C_SW0);
break;
case CPU_BMIPS5000:
write_c0_brcm_action(ACTION_CLR_IPI(smp_processor_id(), 0));
cpu_set_core(¤t_cpu_data, (read_c0_brcm_config() >> 25) & 3);
break;
}
}
/*
* Late setup - runs on secondary CPU before entering the idle loop
*/
static void bmips_smp_finish(void)
{
pr_info("SMP: CPU%d is running\n", smp_processor_id());
/* make sure there won't be a timer interrupt for a little while */
write_c0_compare(read_c0_count() + mips_hpt_frequency / HZ);
irq_enable_hazard();
set_c0_status(IE_SW0 | IE_SW1 | bmips_tp1_irqs | IE_IRQ5 | ST0_IE);
irq_enable_hazard();
}
/*
* BMIPS5000 raceless IPIs
*
* Each CPU has two inbound SW IRQs which are independent of all other CPUs.
* IPI0 is used for SMP_RESCHEDULE_YOURSELF
* IPI1 is used for SMP_CALL_FUNCTION
*/
static void bmips5000_send_ipi_single(int cpu, unsigned int action)
{
write_c0_brcm_action(ACTION_SET_IPI(cpu, action == SMP_CALL_FUNCTION));
}
static irqreturn_t bmips5000_ipi_interrupt(int irq, void *dev_id)
{
int action = irq - IPI0_IRQ;
write_c0_brcm_action(ACTION_CLR_IPI(smp_processor_id(), action));
if (action == 0)
scheduler_ipi();
else
generic_smp_call_function_interrupt();
return IRQ_HANDLED;
}
static void bmips5000_send_ipi_mask(const struct cpumask *mask,
unsigned int action)
{
unsigned int i;
for_each_cpu(i, mask)
bmips5000_send_ipi_single(i, action);
}
/*
* BMIPS43xx racey IPIs
*
* We use one inbound SW IRQ for each CPU.
*
* A spinlock must be held in order to keep CPUx from accidentally clearing
* an incoming IPI when it writes CP0 CAUSE to raise an IPI on CPUy. The
* same spinlock is used to protect the action masks.
*/
static DEFINE_SPINLOCK(ipi_lock);
static DEFINE_PER_CPU(int, ipi_action_mask);
static void bmips43xx_send_ipi_single(int cpu, unsigned int action)
{
unsigned long flags;
spin_lock_irqsave(&ipi_lock, flags);
set_c0_cause(cpu ? C_SW1 : C_SW0);
per_cpu(ipi_action_mask, cpu) |= action;
irq_enable_hazard();
spin_unlock_irqrestore(&ipi_lock, flags);
}
static irqreturn_t bmips43xx_ipi_interrupt(int irq, void *dev_id)
{
unsigned long flags;
int action, cpu = irq - IPI0_IRQ;
spin_lock_irqsave(&ipi_lock, flags);
action = __this_cpu_read(ipi_action_mask);
per_cpu(ipi_action_mask, cpu) = 0;
clear_c0_cause(cpu ? C_SW1 : C_SW0);
spin_unlock_irqrestore(&ipi_lock, flags);
if (action & SMP_RESCHEDULE_YOURSELF)
scheduler_ipi();
if (action & SMP_CALL_FUNCTION)
generic_smp_call_function_interrupt();
return IRQ_HANDLED;
}
static void bmips43xx_send_ipi_mask(const struct cpumask *mask,
unsigned int action)
{
unsigned int i;
for_each_cpu(i, mask)
bmips43xx_send_ipi_single(i, action);
}
#ifdef CONFIG_HOTPLUG_CPU
static int bmips_cpu_disable(void)
{
unsigned int cpu = smp_processor_id();
pr_info("SMP: CPU%d is offline\n", cpu);
set_cpu_online(cpu, false);
calculate_cpu_foreign_map();
irq_migrate_all_off_this_cpu();
clear_c0_status(IE_IRQ5);
local_flush_tlb_all();
local_flush_icache_range(0, ~0);
return 0;
}
static void bmips_cpu_die(unsigned int cpu)
{
}
void __ref play_dead(void)
{
idle_task_exit();
cpuhp_ap_report_dead();
/* flush data cache */
_dma_cache_wback_inv(0, ~0);
/*
* Wakeup is on SW0 or SW1; disable everything else
* Use BEV !IV (BMIPS_WARM_RESTART_VEC) to avoid the regular Linux
* IRQ handlers; this clears ST0_IE and returns immediately.
*/
clear_c0_cause(CAUSEF_IV | C_SW0 | C_SW1);
change_c0_status(
IE_IRQ5 | bmips_tp1_irqs | IE_SW0 | IE_SW1 | ST0_IE | ST0_BEV,
IE_SW0 | IE_SW1 | ST0_IE | ST0_BEV);
irq_disable_hazard();
/*
* wait for SW interrupt from bmips_boot_secondary(), then jump
* back to start_secondary()
*/
__asm__ __volatile__(
" wait\n"
" j bmips_secondary_reentry\n"
: : : "memory");
BUG();
}
#endif /* CONFIG_HOTPLUG_CPU */
const struct plat_smp_ops bmips43xx_smp_ops = {
.smp_setup = bmips_smp_setup,
.prepare_cpus = bmips_prepare_cpus,
.boot_secondary = bmips_boot_secondary,
.smp_finish = bmips_smp_finish,
.init_secondary = bmips_init_secondary,
.send_ipi_single = bmips43xx_send_ipi_single,
.send_ipi_mask = bmips43xx_send_ipi_mask,
#ifdef CONFIG_HOTPLUG_CPU
.cpu_disable = bmips_cpu_disable,
.cpu_die = bmips_cpu_die,
#endif
#ifdef CONFIG_KEXEC
.kexec_nonboot_cpu = kexec_nonboot_cpu_jump,
#endif
};
const struct plat_smp_ops bmips5000_smp_ops = {
.smp_setup = bmips_smp_setup,
.prepare_cpus = bmips_prepare_cpus,
.boot_secondary = bmips_boot_secondary,
.smp_finish = bmips_smp_finish,
.init_secondary = bmips_init_secondary,
.send_ipi_single = bmips5000_send_ipi_single,
.send_ipi_mask = bmips5000_send_ipi_mask,
#ifdef CONFIG_HOTPLUG_CPU
.cpu_disable = bmips_cpu_disable,
.cpu_die = bmips_cpu_die,
#endif
#ifdef CONFIG_KEXEC
.kexec_nonboot_cpu = kexec_nonboot_cpu_jump,
#endif
};
#endif /* CONFIG_SMP */
/***********************************************************************
* BMIPS vector relocation
* This is primarily used for SMP boot, but it is applicable to some
* UP BMIPS systems as well.
***********************************************************************/
static void bmips_wr_vec(unsigned long dst, char *start, char *end)
{
memcpy((void *)dst, start, end - start);
dma_cache_wback(dst, end - start);
local_flush_icache_range(dst, dst + (end - start));
instruction_hazard();
}
static inline void bmips_nmi_handler_setup(void)
{
bmips_wr_vec(BMIPS_NMI_RESET_VEC, bmips_reset_nmi_vec,
bmips_reset_nmi_vec_end);
bmips_wr_vec(BMIPS_WARM_RESTART_VEC, bmips_smp_int_vec,
bmips_smp_int_vec_end);
}
struct reset_vec_info {
int cpu;
u32 val;
};
static void bmips_set_reset_vec_remote(void *vinfo)
{
struct reset_vec_info *info = vinfo;
int shift = info->cpu & 0x01 ? 16 : 0;
u32 mask = ~(0xffff << shift), val = info->val >> 16;
preempt_disable();
if (smp_processor_id() > 0) {
smp_call_function_single(0, &bmips_set_reset_vec_remote,
info, 1);
} else {
if (info->cpu & 0x02) {
/* BMIPS5200 "should" use mask/shift, but it's buggy */
bmips_write_zscm_reg(0xa0, (val << 16) | val);
bmips_read_zscm_reg(0xa0);
} else {
write_c0_brcm_bootvec((read_c0_brcm_bootvec() & mask) |
(val << shift));
}
}
preempt_enable();
}
static void bmips_set_reset_vec(int cpu, u32 val)
{
struct reset_vec_info info;
if (current_cpu_type() == CPU_BMIPS5000) {
/* this needs to run from CPU0 (which is always online) */
info.cpu = cpu;
info.val = val;
bmips_set_reset_vec_remote(&info);
} else {
void __iomem *cbr = BMIPS_GET_CBR();
if (cpu == 0)
__raw_writel(val, cbr + BMIPS_RELO_VECTOR_CONTROL_0);
else {
if (current_cpu_type() != CPU_BMIPS4380)
return;
__raw_writel(val, cbr + BMIPS_RELO_VECTOR_CONTROL_1);
}
}
__sync();
back_to_back_c0_hazard();
}
void bmips_ebase_setup(void)
{
unsigned long new_ebase = ebase;
BUG_ON(ebase != CKSEG0);
switch (current_cpu_type()) {
case CPU_BMIPS4350:
/*
* BMIPS4350 cannot relocate the normal vectors, but it
* can relocate the BEV=1 vectors. So CPU1 starts up at
* the relocated BEV=1, IV=0 general exception vector @
* 0xa000_0380.
*
* set_uncached_handler() is used here because:
* - CPU1 will run this from uncached space
* - None of the cacheflush functions are set up yet
*/
set_uncached_handler(BMIPS_WARM_RESTART_VEC - CKSEG0,
&bmips_smp_int_vec, 0x80);
__sync();
return;
case CPU_BMIPS3300:
case CPU_BMIPS4380:
/*
* 0x8000_0000: reset/NMI (initially in kseg1)
* 0x8000_0400: normal vectors
*/
new_ebase = 0x80000400;
bmips_set_reset_vec(0, RESET_FROM_KSEG0);
break;
case CPU_BMIPS5000:
/*
* 0x8000_0000: reset/NMI (initially in kseg1)
* 0x8000_1000: normal vectors
*/
new_ebase = 0x80001000;
bmips_set_reset_vec(0, RESET_FROM_KSEG0);
write_c0_ebase(new_ebase);
break;
default:
return;
}
board_nmi_handler_setup = &bmips_nmi_handler_setup;
ebase = new_ebase;
}
asmlinkage void __weak plat_wired_tlb_setup(void)
{
/*
* Called when starting/restarting a secondary CPU.
* Kernel stacks and other important data might only be accessible
* once the wired entries are present.
*/
}
void bmips_cpu_setup(void)
{
void __iomem __maybe_unused *cbr = BMIPS_GET_CBR();
u32 __maybe_unused cfg;
switch (current_cpu_type()) {
case CPU_BMIPS3300:
/* Set BIU to async mode */
set_c0_brcm_bus_pll(BIT(22));
__sync();
/* put the BIU back in sync mode */
clear_c0_brcm_bus_pll(BIT(22));
/* clear BHTD to enable branch history table */
clear_c0_brcm_reset(BIT(16));
/* Flush and enable RAC */
cfg = __raw_readl(cbr + BMIPS_RAC_CONFIG);
__raw_writel(cfg | 0x100, cbr + BMIPS_RAC_CONFIG);
__raw_readl(cbr + BMIPS_RAC_CONFIG);
cfg = __raw_readl(cbr + BMIPS_RAC_CONFIG);
__raw_writel(cfg | 0xf, cbr + BMIPS_RAC_CONFIG);
__raw_readl(cbr + BMIPS_RAC_CONFIG);
cfg = __raw_readl(cbr + BMIPS_RAC_ADDRESS_RANGE);
__raw_writel(cfg | 0x0fff0000, cbr + BMIPS_RAC_ADDRESS_RANGE);
__raw_readl(cbr + BMIPS_RAC_ADDRESS_RANGE);
break;
case CPU_BMIPS4380:
/* CBG workaround for early BMIPS4380 CPUs */
switch (read_c0_prid()) {
case 0x2a040:
case 0x2a042:
case 0x2a044:
case 0x2a060:
cfg = __raw_readl(cbr + BMIPS_L2_CONFIG);
__raw_writel(cfg & ~0x07000000, cbr + BMIPS_L2_CONFIG);
__raw_readl(cbr + BMIPS_L2_CONFIG);
}
/* clear BHTD to enable branch history table */
clear_c0_brcm_config_0(BIT(21));
/* XI/ROTR enable */
set_c0_brcm_config_0(BIT(23));
set_c0_brcm_cmt_ctrl(BIT(15));
break;
case CPU_BMIPS5000:
/* enable RDHWR, BRDHWR */
set_c0_brcm_config(BIT(17) | BIT(21));
/* Disable JTB */
__asm__ __volatile__(
" .set noreorder\n"
" li $8, 0x5a455048\n"
" .word 0x4088b00f\n" /* mtc0 t0, $22, 15 */
" .word 0x4008b008\n" /* mfc0 t0, $22, 8 */
" li $9, 0x00008000\n"
" or $8, $8, $9\n"
" .word 0x4088b008\n" /* mtc0 t0, $22, 8 */
" sync\n"
" li $8, 0x0\n"
" .word 0x4088b00f\n" /* mtc0 t0, $22, 15 */
" .set reorder\n"
: : : "$8", "$9");
/* XI enable */
set_c0_brcm_config(BIT(27));
/* enable MIPS32R2 ROR instruction for XI TLB handlers */
__asm__ __volatile__(
" li $8, 0x5a455048\n"
" .word 0x4088b00f\n" /* mtc0 $8, $22, 15 */
" nop; nop; nop\n"
" .word 0x4008b008\n" /* mfc0 $8, $22, 8 */
" lui $9, 0x0100\n"
" or $8, $9\n"
" .word 0x4088b008\n" /* mtc0 $8, $22, 8 */
: : : "$8", "$9");
break;
}
}
| linux-master | arch/mips/kernel/smp-bmips.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2010 Cavium Networks, Inc.
*/
#include <linux/jump_label.h>
#include <linux/kernel.h>
#include <linux/memory.h>
#include <linux/mutex.h>
#include <linux/types.h>
#include <linux/cpu.h>
#include <asm/cacheflush.h>
#include <asm/inst.h>
/*
* Define parameters for the standard MIPS and the microMIPS jump
* instruction encoding respectively:
*
* - the ISA bit of the target, either 0 or 1 respectively,
*
* - the amount the jump target address is shifted right to fit in the
* immediate field of the machine instruction, either 2 or 1,
*
* - the mask determining the size of the jump region relative to the
* delay-slot instruction, either 256MB or 128MB,
*
* - the jump target alignment, either 4 or 2 bytes.
*/
#define J_ISA_BIT IS_ENABLED(CONFIG_CPU_MICROMIPS)
#define J_RANGE_SHIFT (2 - J_ISA_BIT)
#define J_RANGE_MASK ((1ul << (26 + J_RANGE_SHIFT)) - 1)
#define J_ALIGN_MASK ((1ul << J_RANGE_SHIFT) - 1)
void arch_jump_label_transform(struct jump_entry *e,
enum jump_label_type type)
{
union mips_instruction *insn_p;
union mips_instruction insn;
long offset;
insn_p = (union mips_instruction *)msk_isa16_mode(e->code);
/* Target must have the right alignment and ISA must be preserved. */
BUG_ON((e->target & J_ALIGN_MASK) != J_ISA_BIT);
if (type == JUMP_LABEL_JMP) {
if (!IS_ENABLED(CONFIG_CPU_MICROMIPS) && MIPS_ISA_REV >= 6) {
offset = e->target - ((unsigned long)insn_p + 4);
offset >>= 2;
/*
* The branch offset must fit in the instruction's 26
* bit field.
*/
WARN_ON((offset >= (long)BIT(25)) ||
(offset < -(long)BIT(25)));
insn.j_format.opcode = bc6_op;
insn.j_format.target = offset;
} else {
/*
* Jump only works within an aligned region its delay
* slot is in.
*/
WARN_ON((e->target & ~J_RANGE_MASK) !=
((e->code + 4) & ~J_RANGE_MASK));
insn.j_format.opcode = J_ISA_BIT ? mm_j32_op : j_op;
insn.j_format.target = e->target >> J_RANGE_SHIFT;
}
} else {
insn.word = 0; /* nop */
}
mutex_lock(&text_mutex);
if (IS_ENABLED(CONFIG_CPU_MICROMIPS)) {
insn_p->halfword[0] = insn.word >> 16;
insn_p->halfword[1] = insn.word;
} else
*insn_p = insn;
flush_icache_range((unsigned long)insn_p,
(unsigned long)insn_p + sizeof(*insn_p));
mutex_unlock(&text_mutex);
}
#ifdef CONFIG_MODULES
void jump_label_apply_nops(struct module *mod)
{
struct jump_entry *iter_start = mod->jump_entries;
struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
struct jump_entry *iter;
/* if the module doesn't have jump label entries, just return */
if (iter_start == iter_stop)
return;
for (iter = iter_start; iter < iter_stop; iter++) {
/* Only write NOPs for arch_branch_static(). */
if (jump_label_init_type(iter) == JUMP_LABEL_NOP)
arch_jump_label_transform(iter, JUMP_LABEL_NOP);
}
}
#endif
| linux-master | arch/mips/kernel/jump_label.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1996, 97, 2000, 2001 by Ralf Baechle
* Copyright (C) 2001 MIPS Technologies, Inc.
*/
#include <linux/kernel.h>
#include <linux/sched/signal.h>
#include <linux/signal.h>
#include <linux/export.h>
#include <asm/branch.h>
#include <asm/cpu.h>
#include <asm/cpu-features.h>
#include <asm/fpu.h>
#include <asm/fpu_emulator.h>
#include <asm/inst.h>
#include <asm/mips-r2-to-r6-emul.h>
#include <asm/ptrace.h>
#include <linux/uaccess.h>
#include "probes-common.h"
/*
* Calculate and return exception PC in case of branch delay slot
* for microMIPS and MIPS16e. It does not clear the ISA mode bit.
*/
int __isa_exception_epc(struct pt_regs *regs)
{
unsigned short inst;
long epc = regs->cp0_epc;
/* Calculate exception PC in branch delay slot. */
if (__get_user(inst, (u16 __user *) msk_isa16_mode(epc))) {
/* This should never happen because delay slot was checked. */
force_sig(SIGSEGV);
return epc;
}
if (cpu_has_mips16) {
union mips16e_instruction inst_mips16e;
inst_mips16e.full = inst;
if (inst_mips16e.ri.opcode == MIPS16e_jal_op)
epc += 4;
else
epc += 2;
} else if (mm_insn_16bit(inst))
epc += 2;
else
epc += 4;
return epc;
}
/* (microMIPS) Convert 16-bit register encoding to 32-bit register encoding. */
static const unsigned int reg16to32map[8] = {16, 17, 2, 3, 4, 5, 6, 7};
int __mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
unsigned long *contpc)
{
union mips_instruction insn = (union mips_instruction)dec_insn.insn;
int __maybe_unused bc_false = 0;
if (!cpu_has_mmips)
return 0;
switch (insn.mm_i_format.opcode) {
case mm_pool32a_op:
if ((insn.mm_i_format.simmediate & MM_POOL32A_MINOR_MASK) ==
mm_pool32axf_op) {
switch (insn.mm_i_format.simmediate >>
MM_POOL32A_MINOR_SHIFT) {
case mm_jalr_op:
case mm_jalrhb_op:
case mm_jalrs_op:
case mm_jalrshb_op:
if (insn.mm_i_format.rt != 0) /* Not mm_jr */
regs->regs[insn.mm_i_format.rt] =
regs->cp0_epc +
dec_insn.pc_inc +
dec_insn.next_pc_inc;
*contpc = regs->regs[insn.mm_i_format.rs];
return 1;
}
}
break;
case mm_pool32i_op:
switch (insn.mm_i_format.rt) {
case mm_bltzals_op:
case mm_bltzal_op:
regs->regs[31] = regs->cp0_epc +
dec_insn.pc_inc +
dec_insn.next_pc_inc;
fallthrough;
case mm_bltz_op:
if ((long)regs->regs[insn.mm_i_format.rs] < 0)
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
(insn.mm_i_format.simmediate << 1);
else
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
dec_insn.next_pc_inc;
return 1;
case mm_bgezals_op:
case mm_bgezal_op:
regs->regs[31] = regs->cp0_epc +
dec_insn.pc_inc +
dec_insn.next_pc_inc;
fallthrough;
case mm_bgez_op:
if ((long)regs->regs[insn.mm_i_format.rs] >= 0)
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
(insn.mm_i_format.simmediate << 1);
else
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
dec_insn.next_pc_inc;
return 1;
case mm_blez_op:
if ((long)regs->regs[insn.mm_i_format.rs] <= 0)
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
(insn.mm_i_format.simmediate << 1);
else
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
dec_insn.next_pc_inc;
return 1;
case mm_bgtz_op:
if ((long)regs->regs[insn.mm_i_format.rs] <= 0)
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
(insn.mm_i_format.simmediate << 1);
else
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
dec_insn.next_pc_inc;
return 1;
#ifdef CONFIG_MIPS_FP_SUPPORT
case mm_bc2f_op:
case mm_bc1f_op: {
unsigned int fcr31;
unsigned int bit;
bc_false = 1;
fallthrough;
case mm_bc2t_op:
case mm_bc1t_op:
preempt_disable();
if (is_fpu_owner())
fcr31 = read_32bit_cp1_register(CP1_STATUS);
else
fcr31 = current->thread.fpu.fcr31;
preempt_enable();
if (bc_false)
fcr31 = ~fcr31;
bit = (insn.mm_i_format.rs >> 2);
bit += (bit != 0);
bit += 23;
if (fcr31 & (1 << bit))
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
(insn.mm_i_format.simmediate << 1);
else
*contpc = regs->cp0_epc +
dec_insn.pc_inc + dec_insn.next_pc_inc;
return 1;
}
#endif /* CONFIG_MIPS_FP_SUPPORT */
}
break;
case mm_pool16c_op:
switch (insn.mm_i_format.rt) {
case mm_jalr16_op:
case mm_jalrs16_op:
regs->regs[31] = regs->cp0_epc +
dec_insn.pc_inc + dec_insn.next_pc_inc;
fallthrough;
case mm_jr16_op:
*contpc = regs->regs[insn.mm_i_format.rs];
return 1;
}
break;
case mm_beqz16_op:
if ((long)regs->regs[reg16to32map[insn.mm_b1_format.rs]] == 0)
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
(insn.mm_b1_format.simmediate << 1);
else
*contpc = regs->cp0_epc +
dec_insn.pc_inc + dec_insn.next_pc_inc;
return 1;
case mm_bnez16_op:
if ((long)regs->regs[reg16to32map[insn.mm_b1_format.rs]] != 0)
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
(insn.mm_b1_format.simmediate << 1);
else
*contpc = regs->cp0_epc +
dec_insn.pc_inc + dec_insn.next_pc_inc;
return 1;
case mm_b16_op:
*contpc = regs->cp0_epc + dec_insn.pc_inc +
(insn.mm_b0_format.simmediate << 1);
return 1;
case mm_beq32_op:
if (regs->regs[insn.mm_i_format.rs] ==
regs->regs[insn.mm_i_format.rt])
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
(insn.mm_i_format.simmediate << 1);
else
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
dec_insn.next_pc_inc;
return 1;
case mm_bne32_op:
if (regs->regs[insn.mm_i_format.rs] !=
regs->regs[insn.mm_i_format.rt])
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
(insn.mm_i_format.simmediate << 1);
else
*contpc = regs->cp0_epc +
dec_insn.pc_inc + dec_insn.next_pc_inc;
return 1;
case mm_jalx32_op:
regs->regs[31] = regs->cp0_epc +
dec_insn.pc_inc + dec_insn.next_pc_inc;
*contpc = regs->cp0_epc + dec_insn.pc_inc;
*contpc >>= 28;
*contpc <<= 28;
*contpc |= (insn.j_format.target << 2);
return 1;
case mm_jals32_op:
case mm_jal32_op:
regs->regs[31] = regs->cp0_epc +
dec_insn.pc_inc + dec_insn.next_pc_inc;
fallthrough;
case mm_j32_op:
*contpc = regs->cp0_epc + dec_insn.pc_inc;
*contpc >>= 27;
*contpc <<= 27;
*contpc |= (insn.j_format.target << 1);
set_isa16_mode(*contpc);
return 1;
}
return 0;
}
/*
* Compute return address and emulate branch in microMIPS mode after an
* exception only. It does not handle compact branches/jumps and cannot
* be used in interrupt context. (Compact branches/jumps do not cause
* exceptions.)
*/
int __microMIPS_compute_return_epc(struct pt_regs *regs)
{
u16 __user *pc16;
u16 halfword;
unsigned int word;
unsigned long contpc;
struct mm_decoded_insn mminsn = { 0 };
mminsn.micro_mips_mode = 1;
/* This load never faults. */
pc16 = (unsigned short __user *)msk_isa16_mode(regs->cp0_epc);
__get_user(halfword, pc16);
pc16++;
contpc = regs->cp0_epc + 2;
word = ((unsigned int)halfword << 16);
mminsn.pc_inc = 2;
if (!mm_insn_16bit(halfword)) {
__get_user(halfword, pc16);
pc16++;
contpc = regs->cp0_epc + 4;
mminsn.pc_inc = 4;
word |= halfword;
}
mminsn.insn = word;
if (get_user(halfword, pc16))
goto sigsegv;
mminsn.next_pc_inc = 2;
word = ((unsigned int)halfword << 16);
if (!mm_insn_16bit(halfword)) {
pc16++;
if (get_user(halfword, pc16))
goto sigsegv;
mminsn.next_pc_inc = 4;
word |= halfword;
}
mminsn.next_insn = word;
mm_isBranchInstr(regs, mminsn, &contpc);
regs->cp0_epc = contpc;
return 0;
sigsegv:
force_sig(SIGSEGV);
return -EFAULT;
}
/*
* Compute return address and emulate branch in MIPS16e mode after an
* exception only. It does not handle compact branches/jumps and cannot
* be used in interrupt context. (Compact branches/jumps do not cause
* exceptions.)
*/
int __MIPS16e_compute_return_epc(struct pt_regs *regs)
{
u16 __user *addr;
union mips16e_instruction inst;
u16 inst2;
u32 fullinst;
long epc;
epc = regs->cp0_epc;
/* Read the instruction. */
addr = (u16 __user *)msk_isa16_mode(epc);
if (__get_user(inst.full, addr)) {
force_sig(SIGSEGV);
return -EFAULT;
}
switch (inst.ri.opcode) {
case MIPS16e_extend_op:
regs->cp0_epc += 4;
return 0;
/*
* JAL and JALX in MIPS16e mode
*/
case MIPS16e_jal_op:
addr += 1;
if (__get_user(inst2, addr)) {
force_sig(SIGSEGV);
return -EFAULT;
}
fullinst = ((unsigned)inst.full << 16) | inst2;
regs->regs[31] = epc + 6;
epc += 4;
epc >>= 28;
epc <<= 28;
/*
* JAL:5 X:1 TARGET[20-16]:5 TARGET[25:21]:5 TARGET[15:0]:16
*
* ......TARGET[15:0].................TARGET[20:16]...........
* ......TARGET[25:21]
*/
epc |=
((fullinst & 0xffff) << 2) | ((fullinst & 0x3e00000) >> 3) |
((fullinst & 0x1f0000) << 7);
if (!inst.jal.x)
set_isa16_mode(epc); /* Set ISA mode bit. */
regs->cp0_epc = epc;
return 0;
/*
* J(AL)R(C)
*/
case MIPS16e_rr_op:
if (inst.rr.func == MIPS16e_jr_func) {
if (inst.rr.ra)
regs->cp0_epc = regs->regs[31];
else
regs->cp0_epc =
regs->regs[reg16to32[inst.rr.rx]];
if (inst.rr.l) {
if (inst.rr.nd)
regs->regs[31] = epc + 2;
else
regs->regs[31] = epc + 4;
}
return 0;
}
break;
}
/*
* All other cases have no branch delay slot and are 16-bits.
* Branches do not cause an exception.
*/
regs->cp0_epc += 2;
return 0;
}
/**
* __compute_return_epc_for_insn - Computes the return address and do emulate
* branch simulation, if required.
*
* @regs: Pointer to pt_regs
* @insn: branch instruction to decode
* Return: -EFAULT on error and forces SIGILL, and on success
* returns 0 or BRANCH_LIKELY_TAKEN as appropriate after
* evaluating the branch.
*
* MIPS R6 Compact branches and forbidden slots:
* Compact branches do not throw exceptions because they do
* not have delay slots. The forbidden slot instruction ($PC+4)
* is only executed if the branch was not taken. Otherwise the
* forbidden slot is skipped entirely. This means that the
* only possible reason to be here because of a MIPS R6 compact
* branch instruction is that the forbidden slot has thrown one.
* In that case the branch was not taken, so the EPC can be safely
* set to EPC + 8.
*/
int __compute_return_epc_for_insn(struct pt_regs *regs,
union mips_instruction insn)
{
long epc = regs->cp0_epc;
unsigned int dspcontrol;
int ret = 0;
switch (insn.i_format.opcode) {
/*
* jr and jalr are in r_format format.
*/
case spec_op:
switch (insn.r_format.func) {
case jalr_op:
regs->regs[insn.r_format.rd] = epc + 8;
fallthrough;
case jr_op:
if (NO_R6EMU && insn.r_format.func == jr_op)
goto sigill_r2r6;
regs->cp0_epc = regs->regs[insn.r_format.rs];
break;
}
break;
/*
* This group contains:
* bltz_op, bgez_op, bltzl_op, bgezl_op,
* bltzal_op, bgezal_op, bltzall_op, bgezall_op.
*/
case bcond_op:
switch (insn.i_format.rt) {
case bltzl_op:
if (NO_R6EMU)
goto sigill_r2r6;
fallthrough;
case bltz_op:
if ((long)regs->regs[insn.i_format.rs] < 0) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
if (insn.i_format.rt == bltzl_op)
ret = BRANCH_LIKELY_TAKEN;
} else
epc += 8;
regs->cp0_epc = epc;
break;
case bgezl_op:
if (NO_R6EMU)
goto sigill_r2r6;
fallthrough;
case bgez_op:
if ((long)regs->regs[insn.i_format.rs] >= 0) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
if (insn.i_format.rt == bgezl_op)
ret = BRANCH_LIKELY_TAKEN;
} else
epc += 8;
regs->cp0_epc = epc;
break;
case bltzal_op:
case bltzall_op:
if (NO_R6EMU && (insn.i_format.rs ||
insn.i_format.rt == bltzall_op))
goto sigill_r2r6;
regs->regs[31] = epc + 8;
/*
* OK we are here either because we hit a NAL
* instruction or because we are emulating an
* old bltzal{,l} one. Let's figure out what the
* case really is.
*/
if (!insn.i_format.rs) {
/*
* NAL or BLTZAL with rs == 0
* Doesn't matter if we are R6 or not. The
* result is the same
*/
regs->cp0_epc += 4 +
(insn.i_format.simmediate << 2);
break;
}
/* Now do the real thing for non-R6 BLTZAL{,L} */
if ((long)regs->regs[insn.i_format.rs] < 0) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
if (insn.i_format.rt == bltzall_op)
ret = BRANCH_LIKELY_TAKEN;
} else
epc += 8;
regs->cp0_epc = epc;
break;
case bgezal_op:
case bgezall_op:
if (NO_R6EMU && (insn.i_format.rs ||
insn.i_format.rt == bgezall_op))
goto sigill_r2r6;
regs->regs[31] = epc + 8;
/*
* OK we are here either because we hit a BAL
* instruction or because we are emulating an
* old bgezal{,l} one. Let's figure out what the
* case really is.
*/
if (!insn.i_format.rs) {
/*
* BAL or BGEZAL with rs == 0
* Doesn't matter if we are R6 or not. The
* result is the same
*/
regs->cp0_epc += 4 +
(insn.i_format.simmediate << 2);
break;
}
/* Now do the real thing for non-R6 BGEZAL{,L} */
if ((long)regs->regs[insn.i_format.rs] >= 0) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
if (insn.i_format.rt == bgezall_op)
ret = BRANCH_LIKELY_TAKEN;
} else
epc += 8;
regs->cp0_epc = epc;
break;
case bposge32_op:
if (!cpu_has_dsp)
goto sigill_dsp;
dspcontrol = rddsp(0x01);
if (dspcontrol >= 32) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
} else
epc += 8;
regs->cp0_epc = epc;
break;
}
break;
/*
* These are unconditional and in j_format.
*/
case jalx_op:
case jal_op:
regs->regs[31] = regs->cp0_epc + 8;
fallthrough;
case j_op:
epc += 4;
epc >>= 28;
epc <<= 28;
epc |= (insn.j_format.target << 2);
regs->cp0_epc = epc;
if (insn.i_format.opcode == jalx_op)
set_isa16_mode(regs->cp0_epc);
break;
/*
* These are conditional and in i_format.
*/
case beql_op:
if (NO_R6EMU)
goto sigill_r2r6;
fallthrough;
case beq_op:
if (regs->regs[insn.i_format.rs] ==
regs->regs[insn.i_format.rt]) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
if (insn.i_format.opcode == beql_op)
ret = BRANCH_LIKELY_TAKEN;
} else
epc += 8;
regs->cp0_epc = epc;
break;
case bnel_op:
if (NO_R6EMU)
goto sigill_r2r6;
fallthrough;
case bne_op:
if (regs->regs[insn.i_format.rs] !=
regs->regs[insn.i_format.rt]) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
if (insn.i_format.opcode == bnel_op)
ret = BRANCH_LIKELY_TAKEN;
} else
epc += 8;
regs->cp0_epc = epc;
break;
case blezl_op: /* not really i_format */
if (!insn.i_format.rt && NO_R6EMU)
goto sigill_r2r6;
fallthrough;
case blez_op:
/*
* Compact branches for R6 for the
* blez and blezl opcodes.
* BLEZ | rs = 0 | rt != 0 == BLEZALC
* BLEZ | rs = rt != 0 == BGEZALC
* BLEZ | rs != 0 | rt != 0 == BGEUC
* BLEZL | rs = 0 | rt != 0 == BLEZC
* BLEZL | rs = rt != 0 == BGEZC
* BLEZL | rs != 0 | rt != 0 == BGEC
*
* For real BLEZ{,L}, rt is always 0.
*/
if (cpu_has_mips_r6 && insn.i_format.rt) {
if ((insn.i_format.opcode == blez_op) &&
((!insn.i_format.rs && insn.i_format.rt) ||
(insn.i_format.rs == insn.i_format.rt)))
regs->regs[31] = epc + 4;
regs->cp0_epc += 8;
break;
}
/* rt field assumed to be zero */
if ((long)regs->regs[insn.i_format.rs] <= 0) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
if (insn.i_format.opcode == blezl_op)
ret = BRANCH_LIKELY_TAKEN;
} else
epc += 8;
regs->cp0_epc = epc;
break;
case bgtzl_op:
if (!insn.i_format.rt && NO_R6EMU)
goto sigill_r2r6;
fallthrough;
case bgtz_op:
/*
* Compact branches for R6 for the
* bgtz and bgtzl opcodes.
* BGTZ | rs = 0 | rt != 0 == BGTZALC
* BGTZ | rs = rt != 0 == BLTZALC
* BGTZ | rs != 0 | rt != 0 == BLTUC
* BGTZL | rs = 0 | rt != 0 == BGTZC
* BGTZL | rs = rt != 0 == BLTZC
* BGTZL | rs != 0 | rt != 0 == BLTC
*
* *ZALC varint for BGTZ &&& rt != 0
* For real GTZ{,L}, rt is always 0.
*/
if (cpu_has_mips_r6 && insn.i_format.rt) {
if ((insn.i_format.opcode == blez_op) &&
((!insn.i_format.rs && insn.i_format.rt) ||
(insn.i_format.rs == insn.i_format.rt)))
regs->regs[31] = epc + 4;
regs->cp0_epc += 8;
break;
}
/* rt field assumed to be zero */
if ((long)regs->regs[insn.i_format.rs] > 0) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
if (insn.i_format.opcode == bgtzl_op)
ret = BRANCH_LIKELY_TAKEN;
} else
epc += 8;
regs->cp0_epc = epc;
break;
#ifdef CONFIG_MIPS_FP_SUPPORT
/*
* And now the FPA/cp1 branch instructions.
*/
case cop1_op: {
unsigned int bit, fcr31, reg;
if (cpu_has_mips_r6 &&
((insn.i_format.rs == bc1eqz_op) ||
(insn.i_format.rs == bc1nez_op))) {
if (!init_fp_ctx(current))
lose_fpu(1);
reg = insn.i_format.rt;
bit = get_fpr32(¤t->thread.fpu.fpr[reg], 0) & 0x1;
if (insn.i_format.rs == bc1eqz_op)
bit = !bit;
own_fpu(1);
if (bit)
epc = epc + 4 +
(insn.i_format.simmediate << 2);
else
epc += 8;
regs->cp0_epc = epc;
break;
} else {
preempt_disable();
if (is_fpu_owner())
fcr31 = read_32bit_cp1_register(CP1_STATUS);
else
fcr31 = current->thread.fpu.fcr31;
preempt_enable();
bit = (insn.i_format.rt >> 2);
bit += (bit != 0);
bit += 23;
switch (insn.i_format.rt & 3) {
case 0: /* bc1f */
case 2: /* bc1fl */
if (~fcr31 & (1 << bit)) {
epc = epc + 4 +
(insn.i_format.simmediate << 2);
if (insn.i_format.rt == 2)
ret = BRANCH_LIKELY_TAKEN;
} else
epc += 8;
regs->cp0_epc = epc;
break;
case 1: /* bc1t */
case 3: /* bc1tl */
if (fcr31 & (1 << bit)) {
epc = epc + 4 +
(insn.i_format.simmediate << 2);
if (insn.i_format.rt == 3)
ret = BRANCH_LIKELY_TAKEN;
} else
epc += 8;
regs->cp0_epc = epc;
break;
}
break;
}
}
#endif /* CONFIG_MIPS_FP_SUPPORT */
#ifdef CONFIG_CPU_CAVIUM_OCTEON
case lwc2_op: /* This is bbit0 on Octeon */
if ((regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt))
== 0)
epc = epc + 4 + (insn.i_format.simmediate << 2);
else
epc += 8;
regs->cp0_epc = epc;
break;
case ldc2_op: /* This is bbit032 on Octeon */
if ((regs->regs[insn.i_format.rs] &
(1ull<<(insn.i_format.rt+32))) == 0)
epc = epc + 4 + (insn.i_format.simmediate << 2);
else
epc += 8;
regs->cp0_epc = epc;
break;
case swc2_op: /* This is bbit1 on Octeon */
if (regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt))
epc = epc + 4 + (insn.i_format.simmediate << 2);
else
epc += 8;
regs->cp0_epc = epc;
break;
case sdc2_op: /* This is bbit132 on Octeon */
if (regs->regs[insn.i_format.rs] &
(1ull<<(insn.i_format.rt+32)))
epc = epc + 4 + (insn.i_format.simmediate << 2);
else
epc += 8;
regs->cp0_epc = epc;
break;
#else
case bc6_op:
/* Only valid for MIPS R6 */
if (!cpu_has_mips_r6)
goto sigill_r6;
regs->cp0_epc += 8;
break;
case balc6_op:
if (!cpu_has_mips_r6)
goto sigill_r6;
/* Compact branch: BALC */
regs->regs[31] = epc + 4;
epc += 4 + (insn.i_format.simmediate << 2);
regs->cp0_epc = epc;
break;
case pop66_op:
if (!cpu_has_mips_r6)
goto sigill_r6;
/* Compact branch: BEQZC || JIC */
regs->cp0_epc += 8;
break;
case pop76_op:
if (!cpu_has_mips_r6)
goto sigill_r6;
/* Compact branch: BNEZC || JIALC */
if (!insn.i_format.rs) {
/* JIALC: set $31/ra */
regs->regs[31] = epc + 4;
}
regs->cp0_epc += 8;
break;
#endif
case pop10_op:
case pop30_op:
/* Only valid for MIPS R6 */
if (!cpu_has_mips_r6)
goto sigill_r6;
/*
* Compact branches:
* bovc, beqc, beqzalc, bnvc, bnec, bnezlac
*/
if (insn.i_format.rt && !insn.i_format.rs)
regs->regs[31] = epc + 4;
regs->cp0_epc += 8;
break;
}
return ret;
sigill_dsp:
pr_debug("%s: DSP branch but not DSP ASE - sending SIGILL.\n",
current->comm);
force_sig(SIGILL);
return -EFAULT;
sigill_r2r6:
pr_debug("%s: R2 branch but r2-to-r6 emulator is not present - sending SIGILL.\n",
current->comm);
force_sig(SIGILL);
return -EFAULT;
sigill_r6:
pr_debug("%s: R6 branch but no MIPSr6 ISA support - sending SIGILL.\n",
current->comm);
force_sig(SIGILL);
return -EFAULT;
}
EXPORT_SYMBOL_GPL(__compute_return_epc_for_insn);
int __compute_return_epc(struct pt_regs *regs)
{
unsigned int __user *addr;
long epc;
union mips_instruction insn;
epc = regs->cp0_epc;
if (epc & 3)
goto unaligned;
/*
* Read the instruction
*/
addr = (unsigned int __user *) epc;
if (__get_user(insn.word, addr)) {
force_sig(SIGSEGV);
return -EFAULT;
}
return __compute_return_epc_for_insn(regs, insn);
unaligned:
printk("%s: unaligned epc - sending SIGBUS.\n", current->comm);
force_sig(SIGBUS);
return -EFAULT;
}
#if (defined CONFIG_KPROBES) || (defined CONFIG_UPROBES)
int __insn_is_compact_branch(union mips_instruction insn)
{
if (!cpu_has_mips_r6)
return 0;
switch (insn.i_format.opcode) {
case blezl_op:
case bgtzl_op:
case blez_op:
case bgtz_op:
/*
* blez[l] and bgtz[l] opcodes with non-zero rt
* are MIPS R6 compact branches
*/
if (insn.i_format.rt)
return 1;
break;
case bc6_op:
case balc6_op:
case pop10_op:
case pop30_op:
case pop66_op:
case pop76_op:
return 1;
}
return 0;
}
EXPORT_SYMBOL_GPL(__insn_is_compact_branch);
#endif /* CONFIG_KPROBES || CONFIG_UPROBES */
| linux-master | arch/mips/kernel/branch.c |
/*
* Based on linux/arch/mips/jmr3927/rbhma3100/irq.c,
* linux/arch/mips/tx4927/common/tx4927_irq.c,
* linux/arch/mips/tx4938/common/irq.c
*
* Copyright 2001, 2003-2005 MontaVista Software Inc.
* Author: MontaVista Software, Inc.
* [email protected]
* [email protected]
* Copyright (C) 2000-2001 Toshiba Corporation
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/types.h>
#include <linux/irq.h>
#include <asm/txx9irq.h>
struct txx9_irc_reg {
u32 cer;
u32 cr[2];
u32 unused0;
u32 ilr[8];
u32 unused1[4];
u32 imr;
u32 unused2[7];
u32 scr;
u32 unused3[7];
u32 ssr;
u32 unused4[7];
u32 csr;
};
/* IRCER : Int. Control Enable */
#define TXx9_IRCER_ICE 0x00000001
/* IRCR : Int. Control */
#define TXx9_IRCR_LOW 0x00000000
#define TXx9_IRCR_HIGH 0x00000001
#define TXx9_IRCR_DOWN 0x00000002
#define TXx9_IRCR_UP 0x00000003
#define TXx9_IRCR_EDGE(cr) ((cr) & 0x00000002)
/* IRSCR : Int. Status Control */
#define TXx9_IRSCR_EIClrE 0x00000100
#define TXx9_IRSCR_EIClr_MASK 0x0000000f
/* IRCSR : Int. Current Status */
#define TXx9_IRCSR_IF 0x00010000
#define TXx9_IRCSR_ILV_MASK 0x00000700
#define TXx9_IRCSR_IVL_MASK 0x0000001f
#define irc_dlevel 0
#define irc_elevel 1
static struct txx9_irc_reg __iomem *txx9_ircptr __read_mostly;
static struct {
unsigned char level;
unsigned char mode;
} txx9irq[TXx9_MAX_IR] __read_mostly;
static void txx9_irq_unmask(struct irq_data *d)
{
unsigned int irq_nr = d->irq - TXX9_IRQ_BASE;
u32 __iomem *ilrp = &txx9_ircptr->ilr[(irq_nr % 16 ) / 2];
int ofs = irq_nr / 16 * 16 + (irq_nr & 1) * 8;
__raw_writel((__raw_readl(ilrp) & ~(0xff << ofs))
| (txx9irq[irq_nr].level << ofs),
ilrp);
}
static inline void txx9_irq_mask(struct irq_data *d)
{
unsigned int irq_nr = d->irq - TXX9_IRQ_BASE;
u32 __iomem *ilrp = &txx9_ircptr->ilr[(irq_nr % 16) / 2];
int ofs = irq_nr / 16 * 16 + (irq_nr & 1) * 8;
__raw_writel((__raw_readl(ilrp) & ~(0xff << ofs))
| (irc_dlevel << ofs),
ilrp);
mmiowb();
}
static void txx9_irq_mask_ack(struct irq_data *d)
{
unsigned int irq_nr = d->irq - TXX9_IRQ_BASE;
txx9_irq_mask(d);
/* clear edge detection */
if (unlikely(TXx9_IRCR_EDGE(txx9irq[irq_nr].mode)))
__raw_writel(TXx9_IRSCR_EIClrE | irq_nr, &txx9_ircptr->scr);
}
static int txx9_irq_set_type(struct irq_data *d, unsigned int flow_type)
{
unsigned int irq_nr = d->irq - TXX9_IRQ_BASE;
u32 cr;
u32 __iomem *crp;
int ofs;
int mode;
if (flow_type & IRQF_TRIGGER_PROBE)
return 0;
switch (flow_type & IRQF_TRIGGER_MASK) {
case IRQF_TRIGGER_RISING: mode = TXx9_IRCR_UP; break;
case IRQF_TRIGGER_FALLING: mode = TXx9_IRCR_DOWN; break;
case IRQF_TRIGGER_HIGH: mode = TXx9_IRCR_HIGH; break;
case IRQF_TRIGGER_LOW: mode = TXx9_IRCR_LOW; break;
default:
return -EINVAL;
}
crp = &txx9_ircptr->cr[(unsigned int)irq_nr / 8];
cr = __raw_readl(crp);
ofs = (irq_nr & (8 - 1)) * 2;
cr &= ~(0x3 << ofs);
cr |= (mode & 0x3) << ofs;
__raw_writel(cr, crp);
txx9irq[irq_nr].mode = mode;
return 0;
}
static struct irq_chip txx9_irq_chip = {
.name = "TXX9",
.irq_ack = txx9_irq_mask_ack,
.irq_mask = txx9_irq_mask,
.irq_mask_ack = txx9_irq_mask_ack,
.irq_unmask = txx9_irq_unmask,
.irq_set_type = txx9_irq_set_type,
};
void __init txx9_irq_init(unsigned long baseaddr)
{
int i;
txx9_ircptr = ioremap(baseaddr, sizeof(struct txx9_irc_reg));
for (i = 0; i < TXx9_MAX_IR; i++) {
txx9irq[i].level = 4; /* middle level */
txx9irq[i].mode = TXx9_IRCR_LOW;
irq_set_chip_and_handler(TXX9_IRQ_BASE + i, &txx9_irq_chip,
handle_level_irq);
}
/* mask all IRC interrupts */
__raw_writel(0, &txx9_ircptr->imr);
for (i = 0; i < 8; i++)
__raw_writel(0, &txx9_ircptr->ilr[i]);
/* setup IRC interrupt mode (Low Active) */
for (i = 0; i < 2; i++)
__raw_writel(0, &txx9_ircptr->cr[i]);
/* enable interrupt control */
__raw_writel(TXx9_IRCER_ICE, &txx9_ircptr->cer);
__raw_writel(irc_elevel, &txx9_ircptr->imr);
}
int __init txx9_irq_set_pri(int irc_irq, int new_pri)
{
int old_pri;
if ((unsigned int)irc_irq >= TXx9_MAX_IR)
return 0;
old_pri = txx9irq[irc_irq].level;
txx9irq[irc_irq].level = new_pri;
return old_pri;
}
int txx9_irq(void)
{
u32 csr = __raw_readl(&txx9_ircptr->csr);
if (likely(!(csr & TXx9_IRCSR_IF)))
return TXX9_IRQ_BASE + (csr & (TXx9_MAX_IR - 1));
return -1;
}
| linux-master | arch/mips/kernel/irq_txx9.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/highmem.h>
#include <linux/kdebug.h>
#include <linux/types.h>
#include <linux/notifier.h>
#include <linux/sched.h>
#include <linux/uprobes.h>
#include <asm/branch.h>
#include <asm/cpu-features.h>
#include <asm/ptrace.h>
#include "probes-common.h"
static inline int insn_has_delay_slot(const union mips_instruction insn)
{
return __insn_has_delay_slot(insn);
}
/**
* arch_uprobe_analyze_insn - instruction analysis including validity and fixups.
* @mm: the probed address space.
* @arch_uprobe: the probepoint information.
* @addr: virtual address at which to install the probepoint
* Return 0 on success or a -ve number on error.
*/
int arch_uprobe_analyze_insn(struct arch_uprobe *aup,
struct mm_struct *mm, unsigned long addr)
{
union mips_instruction inst;
/*
* For the time being this also blocks attempts to use uprobes with
* MIPS16 and microMIPS.
*/
if (addr & 0x03)
return -EINVAL;
inst.word = aup->insn[0];
if (__insn_is_compact_branch(inst)) {
pr_notice("Uprobes for compact branches are not supported\n");
return -EINVAL;
}
aup->ixol[0] = aup->insn[insn_has_delay_slot(inst)];
aup->ixol[1] = UPROBE_BRK_UPROBE_XOL; /* NOP */
return 0;
}
/**
* is_trap_insn - check if the instruction is a trap variant
* @insn: instruction to be checked.
* Returns true if @insn is a trap variant.
*
* This definition overrides the weak definition in kernel/events/uprobes.c.
* and is needed for the case where an architecture has multiple trap
* instructions (like PowerPC or MIPS). We treat BREAK just like the more
* modern conditional trap instructions.
*/
bool is_trap_insn(uprobe_opcode_t *insn)
{
union mips_instruction inst;
inst.word = *insn;
switch (inst.i_format.opcode) {
case spec_op:
switch (inst.r_format.func) {
case break_op:
case teq_op:
case tge_op:
case tgeu_op:
case tlt_op:
case tltu_op:
case tne_op:
return true;
}
break;
case bcond_op: /* Yes, really ... */
switch (inst.u_format.rt) {
case teqi_op:
case tgei_op:
case tgeiu_op:
case tlti_op:
case tltiu_op:
case tnei_op:
return true;
}
break;
}
return false;
}
#define UPROBE_TRAP_NR ULONG_MAX
/*
* arch_uprobe_pre_xol - prepare to execute out of line.
* @auprobe: the probepoint information.
* @regs: reflects the saved user state of current task.
*/
int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs)
{
struct uprobe_task *utask = current->utask;
/*
* Now find the EPC where to resume after the breakpoint has been
* dealt with. This may require emulation of a branch.
*/
aup->resume_epc = regs->cp0_epc + 4;
if (insn_has_delay_slot((union mips_instruction) aup->insn[0])) {
__compute_return_epc_for_insn(regs,
(union mips_instruction) aup->insn[0]);
aup->resume_epc = regs->cp0_epc;
}
utask->autask.saved_trap_nr = current->thread.trap_nr;
current->thread.trap_nr = UPROBE_TRAP_NR;
regs->cp0_epc = current->utask->xol_vaddr;
return 0;
}
int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs)
{
struct uprobe_task *utask = current->utask;
current->thread.trap_nr = utask->autask.saved_trap_nr;
regs->cp0_epc = aup->resume_epc;
return 0;
}
/*
* If xol insn itself traps and generates a signal(Say,
* SIGILL/SIGSEGV/etc), then detect the case where a singlestepped
* instruction jumps back to its own address. It is assumed that anything
* like do_page_fault/do_trap/etc sets thread.trap_nr != -1.
*
* arch_uprobe_pre_xol/arch_uprobe_post_xol save/restore thread.trap_nr,
* arch_uprobe_xol_was_trapped() simply checks that ->trap_nr is not equal to
* UPROBE_TRAP_NR == -1 set by arch_uprobe_pre_xol().
*/
bool arch_uprobe_xol_was_trapped(struct task_struct *tsk)
{
if (tsk->thread.trap_nr != UPROBE_TRAP_NR)
return true;
return false;
}
int arch_uprobe_exception_notify(struct notifier_block *self,
unsigned long val, void *data)
{
struct die_args *args = data;
struct pt_regs *regs = args->regs;
/* regs == NULL is a kernel bug */
if (WARN_ON(!regs))
return NOTIFY_DONE;
/* We are only interested in userspace traps */
if (!user_mode(regs))
return NOTIFY_DONE;
switch (val) {
case DIE_UPROBE:
if (uprobe_pre_sstep_notifier(regs))
return NOTIFY_STOP;
break;
case DIE_UPROBE_XOL:
if (uprobe_post_sstep_notifier(regs))
return NOTIFY_STOP;
break;
default:
break;
}
return 0;
}
/*
* This function gets called when XOL instruction either gets trapped or
* the thread has a fatal signal. Reset the instruction pointer to its
* probed address for the potential restart or for post mortem analysis.
*/
void arch_uprobe_abort_xol(struct arch_uprobe *aup,
struct pt_regs *regs)
{
struct uprobe_task *utask = current->utask;
current->thread.trap_nr = utask->autask.saved_trap_nr;
instruction_pointer_set(regs, utask->vaddr);
}
unsigned long arch_uretprobe_hijack_return_addr(
unsigned long trampoline_vaddr, struct pt_regs *regs)
{
unsigned long ra;
ra = regs->regs[31];
/* Replace the return address with the trampoline address */
regs->regs[31] = trampoline_vaddr;
return ra;
}
void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
void *src, unsigned long len)
{
unsigned long kaddr, kstart;
/* Initialize the slot */
kaddr = (unsigned long)kmap_atomic(page);
kstart = kaddr + (vaddr & ~PAGE_MASK);
memcpy((void *)kstart, src, len);
flush_icache_range(kstart, kstart + len);
kunmap_atomic((void *)kaddr);
}
/**
* uprobe_get_swbp_addr - compute address of swbp given post-swbp regs
* @regs: Reflects the saved state of the task after it has hit a breakpoint
* instruction.
* Return the address of the breakpoint instruction.
*
* This overrides the weak version in kernel/events/uprobes.c.
*/
unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
{
return instruction_pointer(regs);
}
/*
* See if the instruction can be emulated.
* Returns true if instruction was emulated, false otherwise.
*
* For now we always emulate so this function just returns false.
*/
bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
{
return false;
}
| linux-master | arch/mips/kernel/uprobes.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2000, 2001 Broadcom Corporation
*/
#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/percpu.h>
#include <linux/smp.h>
#include <asm/addrspace.h>
#include <asm/io.h>
#include <asm/time.h>
#include <asm/sibyte/sb1250.h>
#include <asm/sibyte/sb1250_regs.h>
#include <asm/sibyte/sb1250_int.h>
#include <asm/sibyte/sb1250_scd.h>
#define IMR_IP2_VAL K_INT_MAP_I0
#define IMR_IP3_VAL K_INT_MAP_I1
#define IMR_IP4_VAL K_INT_MAP_I2
/*
* The general purpose timer ticks at 1MHz independent if
* the rest of the system
*/
static int sibyte_shutdown(struct clock_event_device *evt)
{
void __iomem *cfg;
cfg = IOADDR(A_SCD_TIMER_REGISTER(smp_processor_id(), R_SCD_TIMER_CFG));
/* Stop the timer until we actually program a shot */
__raw_writeq(0, cfg);
return 0;
}
static int sibyte_set_periodic(struct clock_event_device *evt)
{
unsigned int cpu = smp_processor_id();
void __iomem *cfg, *init;
cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT));
__raw_writeq(0, cfg);
__raw_writeq((V_SCD_TIMER_FREQ / HZ) - 1, init);
__raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS, cfg);
return 0;
}
static int sibyte_next_event(unsigned long delta, struct clock_event_device *cd)
{
unsigned int cpu = smp_processor_id();
void __iomem *cfg, *init;
cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT));
__raw_writeq(0, cfg);
__raw_writeq(delta - 1, init);
__raw_writeq(M_SCD_TIMER_ENABLE, cfg);
return 0;
}
static irqreturn_t sibyte_counter_handler(int irq, void *dev_id)
{
unsigned int cpu = smp_processor_id();
struct clock_event_device *cd = dev_id;
void __iomem *cfg;
unsigned long tmode;
if (clockevent_state_periodic(cd))
tmode = M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS;
else
tmode = 0;
/* ACK interrupt */
cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
____raw_writeq(tmode, cfg);
cd->event_handler(cd);
return IRQ_HANDLED;
}
static DEFINE_PER_CPU(struct clock_event_device, sibyte_hpt_clockevent);
static DEFINE_PER_CPU(char [18], sibyte_hpt_name);
void sb1250_clockevent_init(void)
{
unsigned int cpu = smp_processor_id();
unsigned int irq = K_INT_TIMER_0 + cpu;
struct clock_event_device *cd = &per_cpu(sibyte_hpt_clockevent, cpu);
unsigned char *name = per_cpu(sibyte_hpt_name, cpu);
unsigned long flags = IRQF_PERCPU | IRQF_TIMER;
/* Only have 4 general purpose timers, and we use last one as hpt */
BUG_ON(cpu > 2);
sprintf(name, "sb1250-counter-%d", cpu);
cd->name = name;
cd->features = CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_ONESHOT;
clockevent_set_clock(cd, V_SCD_TIMER_FREQ);
cd->max_delta_ns = clockevent_delta2ns(0x7fffff, cd);
cd->max_delta_ticks = 0x7fffff;
cd->min_delta_ns = clockevent_delta2ns(2, cd);
cd->min_delta_ticks = 2;
cd->rating = 200;
cd->irq = irq;
cd->cpumask = cpumask_of(cpu);
cd->set_next_event = sibyte_next_event;
cd->set_state_shutdown = sibyte_shutdown;
cd->set_state_periodic = sibyte_set_periodic;
cd->set_state_oneshot = sibyte_shutdown;
clockevents_register_device(cd);
sb1250_mask_irq(cpu, irq);
/*
* Map the timer interrupt to IP[4] of this cpu
*/
__raw_writeq(IMR_IP4_VAL,
IOADDR(A_IMR_REGISTER(cpu, R_IMR_INTERRUPT_MAP_BASE) +
(irq << 3)));
sb1250_unmask_irq(cpu, irq);
irq_set_affinity(irq, cpumask_of(cpu));
if (request_irq(irq, sibyte_counter_handler, flags, name, cd))
pr_err("Failed to request irq %d (%s)\n", irq, name);
}
| linux-master | arch/mips/kernel/cevt-sb1250.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* MIPS support for CONFIG_OF device tree support
*
* Copyright (C) 2010 Cisco Systems Inc. <[email protected]>
*/
#include <linux/init.h>
#include <linux/export.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/memblock.h>
#include <linux/debugfs.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <linux/of_platform.h>
#include <asm/bootinfo.h>
#include <asm/page.h>
#include <asm/prom.h>
static char mips_machine_name[64] = "Unknown";
__init void mips_set_machine_name(const char *name)
{
if (name == NULL)
return;
strscpy(mips_machine_name, name, sizeof(mips_machine_name));
pr_info("MIPS: machine is %s\n", mips_get_machine_name());
}
char *mips_get_machine_name(void)
{
return mips_machine_name;
}
#ifdef CONFIG_USE_OF
void __init __dt_setup_arch(void *bph)
{
if (!early_init_dt_scan(bph))
return;
mips_set_machine_name(of_flat_dt_get_machine_name());
}
int __init __dt_register_buses(const char *bus0, const char *bus1)
{
static struct of_device_id of_ids[3];
if (!of_have_populated_dt())
panic("device tree not present");
strscpy(of_ids[0].compatible, bus0, sizeof(of_ids[0].compatible));
if (bus1) {
strscpy(of_ids[1].compatible, bus1,
sizeof(of_ids[1].compatible));
}
if (of_platform_populate(NULL, of_ids, NULL, NULL))
panic("failed to populate DT");
return 0;
}
void __weak __init device_tree_init(void)
{
unflatten_and_copy_device_tree();
}
#endif
| linux-master | arch/mips/kernel/prom.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2013 Imagination Technologies Ltd.
*/
#include <linux/kernel.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <asm/cpu.h>
#include <asm/debug.h>
#include <asm/mipsregs.h>
static void build_segment_config(char *str, unsigned int cfg)
{
unsigned int am;
static const char * const am_str[] = {
"UK", "MK", "MSK", "MUSK", "MUSUK", "USK",
"RSRVD", "UUSK"};
/* Segment access mode. */
am = (cfg & MIPS_SEGCFG_AM) >> MIPS_SEGCFG_AM_SHIFT;
str += sprintf(str, "%-5s", am_str[am]);
/*
* Access modes MK, MSK and MUSK are mapped segments. Therefore
* there is no direct physical address mapping unless it becomes
* unmapped uncached at error level due to EU.
*/
if ((am == 0) || (am > 3) || (cfg & MIPS_SEGCFG_EU))
str += sprintf(str, " %03lx",
((cfg & MIPS_SEGCFG_PA) >> MIPS_SEGCFG_PA_SHIFT));
else
str += sprintf(str, " UND");
if ((am == 0) || (am > 3))
str += sprintf(str, " %01ld",
((cfg & MIPS_SEGCFG_C) >> MIPS_SEGCFG_C_SHIFT));
else
str += sprintf(str, " U");
/* Exception configuration. */
str += sprintf(str, " %01ld\n",
((cfg & MIPS_SEGCFG_EU) >> MIPS_SEGCFG_EU_SHIFT));
}
static int segments_show(struct seq_file *m, void *v)
{
unsigned int segcfg;
char str[42];
seq_puts(m, "Segment Virtual Size Access Mode Physical Caching EU\n");
seq_puts(m, "------- ------- ---- ----------- -------- ------- --\n");
segcfg = read_c0_segctl0();
build_segment_config(str, segcfg);
seq_printf(m, " 0 e0000000 512M %s", str);
segcfg >>= 16;
build_segment_config(str, segcfg);
seq_printf(m, " 1 c0000000 512M %s", str);
segcfg = read_c0_segctl1();
build_segment_config(str, segcfg);
seq_printf(m, " 2 a0000000 512M %s", str);
segcfg >>= 16;
build_segment_config(str, segcfg);
seq_printf(m, " 3 80000000 512M %s", str);
segcfg = read_c0_segctl2();
build_segment_config(str, segcfg);
seq_printf(m, " 4 40000000 1G %s", str);
segcfg >>= 16;
build_segment_config(str, segcfg);
seq_printf(m, " 5 00000000 1G %s\n", str);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(segments);
static int __init segments_info(void)
{
if (cpu_has_segments)
debugfs_create_file("segments", S_IRUGO, mips_debugfs_dir, NULL,
&segments_fops);
return 0;
}
device_initcall(segments_info);
| linux-master | arch/mips/kernel/segment.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Stack trace management functions
*
* Copyright (C) 2006 Atsushi Nemoto <[email protected]>
*/
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/sched/task_stack.h>
#include <linux/stacktrace.h>
#include <linux/export.h>
#include <asm/stacktrace.h>
/*
* Save stack-backtrace addresses into a stack_trace buffer:
*/
static void save_raw_context_stack(struct stack_trace *trace,
unsigned long reg29, int savesched)
{
unsigned long *sp = (unsigned long *)reg29;
unsigned long addr;
while (!kstack_end(sp)) {
addr = *sp++;
if (__kernel_text_address(addr) &&
(savesched || !in_sched_functions(addr))) {
if (trace->skip > 0)
trace->skip--;
else
trace->entries[trace->nr_entries++] = addr;
if (trace->nr_entries >= trace->max_entries)
break;
}
}
}
static void save_context_stack(struct stack_trace *trace,
struct task_struct *tsk, struct pt_regs *regs, int savesched)
{
unsigned long sp = regs->regs[29];
#ifdef CONFIG_KALLSYMS
unsigned long ra = regs->regs[31];
unsigned long pc = regs->cp0_epc;
if (raw_show_trace || !__kernel_text_address(pc)) {
unsigned long stack_page =
(unsigned long)task_stack_page(tsk);
if (stack_page && sp >= stack_page &&
sp <= stack_page + THREAD_SIZE - 32)
save_raw_context_stack(trace, sp, savesched);
return;
}
do {
if (savesched || !in_sched_functions(pc)) {
if (trace->skip > 0)
trace->skip--;
else
trace->entries[trace->nr_entries++] = pc;
if (trace->nr_entries >= trace->max_entries)
break;
}
pc = unwind_stack(tsk, &sp, pc, &ra);
} while (pc);
#else
save_raw_context_stack(trace, sp, savesched);
#endif
}
/*
* Save stack-backtrace addresses into a stack_trace buffer.
*/
void save_stack_trace(struct stack_trace *trace)
{
save_stack_trace_tsk(current, trace);
}
EXPORT_SYMBOL_GPL(save_stack_trace);
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{
struct pt_regs dummyregs;
struct pt_regs *regs = &dummyregs;
WARN_ON(trace->nr_entries || !trace->max_entries);
if (tsk != current) {
regs->regs[29] = tsk->thread.reg29;
regs->regs[31] = 0;
regs->cp0_epc = tsk->thread.reg31;
} else
prepare_frametrace(regs);
save_context_stack(trace, tsk, regs, tsk == current);
}
EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
| linux-master | arch/mips/kernel/stacktrace.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 1995, 1996, 2001 Ralf Baechle
* Copyright (C) 2001, 2004 MIPS Technologies, Inc.
* Copyright (C) 2004 Maciej W. Rozycki
*/
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <asm/bootinfo.h>
#include <asm/cpu.h>
#include <asm/cpu-features.h>
#include <asm/idle.h>
#include <asm/mipsregs.h>
#include <asm/processor.h>
#include <asm/prom.h>
unsigned int vced_count, vcei_count;
/*
* No lock; only written during early bootup by CPU 0.
*/
static RAW_NOTIFIER_HEAD(proc_cpuinfo_chain);
int __ref register_proc_cpuinfo_notifier(struct notifier_block *nb)
{
return raw_notifier_chain_register(&proc_cpuinfo_chain, nb);
}
int proc_cpuinfo_notifier_call_chain(unsigned long val, void *v)
{
return raw_notifier_call_chain(&proc_cpuinfo_chain, val, v);
}
static int show_cpuinfo(struct seq_file *m, void *v)
{
struct proc_cpuinfo_notifier_args proc_cpuinfo_notifier_args;
unsigned long n = (unsigned long) v - 1;
unsigned int version = cpu_data[n].processor_id;
unsigned int fp_vers = cpu_data[n].fpu_id;
char fmt[64];
int i;
#ifdef CONFIG_SMP
if (!cpu_online(n))
return 0;
#endif
/*
* For the first processor also print the system type
*/
if (n == 0) {
seq_printf(m, "system type\t\t: %s\n", get_system_type());
if (mips_get_machine_name())
seq_printf(m, "machine\t\t\t: %s\n",
mips_get_machine_name());
}
seq_printf(m, "processor\t\t: %ld\n", n);
sprintf(fmt, "cpu model\t\t: %%s V%%d.%%d%s\n",
cpu_data[n].options & MIPS_CPU_FPU ? " FPU V%d.%d" : "");
seq_printf(m, fmt, __cpu_name[n],
(version >> 4) & 0x0f, version & 0x0f,
(fp_vers >> 4) & 0x0f, fp_vers & 0x0f);
seq_printf(m, "BogoMIPS\t\t: %u.%02u\n",
cpu_data[n].udelay_val / (500000/HZ),
(cpu_data[n].udelay_val / (5000/HZ)) % 100);
seq_printf(m, "wait instruction\t: %s\n", cpu_wait ? "yes" : "no");
seq_printf(m, "microsecond timers\t: %s\n",
cpu_has_counter ? "yes" : "no");
seq_printf(m, "tlb_entries\t\t: %d\n", cpu_data[n].tlbsize);
seq_printf(m, "extra interrupt vector\t: %s\n",
cpu_has_divec ? "yes" : "no");
seq_printf(m, "hardware watchpoint\t: %s",
cpu_has_watch ? "yes, " : "no\n");
if (cpu_has_watch) {
seq_printf(m, "count: %d, address/irw mask: [",
cpu_data[n].watch_reg_count);
for (i = 0; i < cpu_data[n].watch_reg_count; i++)
seq_printf(m, "%s0x%04x", i ? ", " : "",
cpu_data[n].watch_reg_masks[i]);
seq_puts(m, "]\n");
}
seq_puts(m, "isa\t\t\t:");
if (cpu_has_mips_1)
seq_puts(m, " mips1");
if (cpu_has_mips_2)
seq_puts(m, " mips2");
if (cpu_has_mips_3)
seq_puts(m, " mips3");
if (cpu_has_mips_4)
seq_puts(m, " mips4");
if (cpu_has_mips_5)
seq_puts(m, " mips5");
if (cpu_has_mips32r1)
seq_puts(m, " mips32r1");
if (cpu_has_mips32r2)
seq_puts(m, " mips32r2");
if (cpu_has_mips32r5)
seq_puts(m, " mips32r5");
if (cpu_has_mips32r6)
seq_puts(m, " mips32r6");
if (cpu_has_mips64r1)
seq_puts(m, " mips64r1");
if (cpu_has_mips64r2)
seq_puts(m, " mips64r2");
if (cpu_has_mips64r5)
seq_puts(m, " mips64r5");
if (cpu_has_mips64r6)
seq_puts(m, " mips64r6");
seq_puts(m, "\n");
seq_puts(m, "ASEs implemented\t:");
if (cpu_has_mips16)
seq_puts(m, " mips16");
if (cpu_has_mips16e2)
seq_puts(m, " mips16e2");
if (cpu_has_mdmx)
seq_puts(m, " mdmx");
if (cpu_has_mips3d)
seq_puts(m, " mips3d");
if (cpu_has_smartmips)
seq_puts(m, " smartmips");
if (cpu_has_dsp)
seq_puts(m, " dsp");
if (cpu_has_dsp2)
seq_puts(m, " dsp2");
if (cpu_has_dsp3)
seq_puts(m, " dsp3");
if (cpu_has_mipsmt)
seq_puts(m, " mt");
if (cpu_has_mmips)
seq_puts(m, " micromips");
if (cpu_has_vz)
seq_puts(m, " vz");
if (cpu_has_msa)
seq_puts(m, " msa");
if (cpu_has_eva)
seq_puts(m, " eva");
if (cpu_has_htw)
seq_puts(m, " htw");
if (cpu_has_xpa)
seq_puts(m, " xpa");
if (cpu_has_loongson_mmi)
seq_puts(m, " loongson-mmi");
if (cpu_has_loongson_cam)
seq_puts(m, " loongson-cam");
if (cpu_has_loongson_ext)
seq_puts(m, " loongson-ext");
if (cpu_has_loongson_ext2)
seq_puts(m, " loongson-ext2");
seq_puts(m, "\n");
if (cpu_has_mmips) {
seq_printf(m, "micromips kernel\t: %s\n",
(read_c0_config3() & MIPS_CONF3_ISA_OE) ? "yes" : "no");
}
seq_puts(m, "Options implemented\t:");
if (cpu_has_tlb)
seq_puts(m, " tlb");
if (cpu_has_ftlb)
seq_puts(m, " ftlb");
if (cpu_has_tlbinv)
seq_puts(m, " tlbinv");
if (cpu_has_segments)
seq_puts(m, " segments");
if (cpu_has_rixiex)
seq_puts(m, " rixiex");
if (cpu_has_ldpte)
seq_puts(m, " ldpte");
if (cpu_has_maar)
seq_puts(m, " maar");
if (cpu_has_rw_llb)
seq_puts(m, " rw_llb");
if (cpu_has_4kex)
seq_puts(m, " 4kex");
if (cpu_has_3k_cache)
seq_puts(m, " 3k_cache");
if (cpu_has_4k_cache)
seq_puts(m, " 4k_cache");
if (cpu_has_octeon_cache)
seq_puts(m, " octeon_cache");
if (raw_cpu_has_fpu)
seq_puts(m, " fpu");
if (cpu_has_32fpr)
seq_puts(m, " 32fpr");
if (cpu_has_cache_cdex_p)
seq_puts(m, " cache_cdex_p");
if (cpu_has_cache_cdex_s)
seq_puts(m, " cache_cdex_s");
if (cpu_has_prefetch)
seq_puts(m, " prefetch");
if (cpu_has_mcheck)
seq_puts(m, " mcheck");
if (cpu_has_ejtag)
seq_puts(m, " ejtag");
if (cpu_has_llsc)
seq_puts(m, " llsc");
if (cpu_has_guestctl0ext)
seq_puts(m, " guestctl0ext");
if (cpu_has_guestctl1)
seq_puts(m, " guestctl1");
if (cpu_has_guestctl2)
seq_puts(m, " guestctl2");
if (cpu_has_guestid)
seq_puts(m, " guestid");
if (cpu_has_drg)
seq_puts(m, " drg");
if (cpu_has_rixi)
seq_puts(m, " rixi");
if (cpu_has_lpa)
seq_puts(m, " lpa");
if (cpu_has_mvh)
seq_puts(m, " mvh");
if (cpu_has_vtag_icache)
seq_puts(m, " vtag_icache");
if (cpu_has_dc_aliases)
seq_puts(m, " dc_aliases");
if (cpu_has_ic_fills_f_dc)
seq_puts(m, " ic_fills_f_dc");
if (cpu_has_pindexed_dcache)
seq_puts(m, " pindexed_dcache");
if (cpu_has_userlocal)
seq_puts(m, " userlocal");
if (cpu_has_nofpuex)
seq_puts(m, " nofpuex");
if (cpu_has_vint)
seq_puts(m, " vint");
if (cpu_has_veic)
seq_puts(m, " veic");
if (cpu_has_inclusive_pcaches)
seq_puts(m, " inclusive_pcaches");
if (cpu_has_perf_cntr_intr_bit)
seq_puts(m, " perf_cntr_intr_bit");
if (cpu_has_ufr)
seq_puts(m, " ufr");
if (cpu_has_fre)
seq_puts(m, " fre");
if (cpu_has_cdmm)
seq_puts(m, " cdmm");
if (cpu_has_small_pages)
seq_puts(m, " small_pages");
if (cpu_has_nan_legacy)
seq_puts(m, " nan_legacy");
if (cpu_has_nan_2008)
seq_puts(m, " nan_2008");
if (cpu_has_ebase_wg)
seq_puts(m, " ebase_wg");
if (cpu_has_badinstr)
seq_puts(m, " badinstr");
if (cpu_has_badinstrp)
seq_puts(m, " badinstrp");
if (cpu_has_contextconfig)
seq_puts(m, " contextconfig");
if (cpu_has_perf)
seq_puts(m, " perf");
if (cpu_has_mac2008_only)
seq_puts(m, " mac2008_only");
if (cpu_has_ftlbparex)
seq_puts(m, " ftlbparex");
if (cpu_has_gsexcex)
seq_puts(m, " gsexcex");
if (cpu_has_shared_ftlb_ram)
seq_puts(m, " shared_ftlb_ram");
if (cpu_has_shared_ftlb_entries)
seq_puts(m, " shared_ftlb_entries");
if (cpu_has_mipsmt_pertccounters)
seq_puts(m, " mipsmt_pertccounters");
if (cpu_has_mmid)
seq_puts(m, " mmid");
if (cpu_has_mm_sysad)
seq_puts(m, " mm_sysad");
if (cpu_has_mm_full)
seq_puts(m, " mm_full");
seq_puts(m, "\n");
seq_printf(m, "shadow register sets\t: %d\n",
cpu_data[n].srsets);
seq_printf(m, "kscratch registers\t: %d\n",
hweight8(cpu_data[n].kscratch_mask));
seq_printf(m, "package\t\t\t: %d\n", cpu_data[n].package);
seq_printf(m, "core\t\t\t: %d\n", cpu_core(&cpu_data[n]));
#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_CPU_MIPSR6)
if (cpu_has_mipsmt)
seq_printf(m, "VPE\t\t\t: %d\n", cpu_vpe_id(&cpu_data[n]));
else if (cpu_has_vp)
seq_printf(m, "VP\t\t\t: %d\n", cpu_vpe_id(&cpu_data[n]));
#endif
sprintf(fmt, "VCE%%c exceptions\t\t: %s\n",
cpu_has_vce ? "%u" : "not available");
seq_printf(m, fmt, 'D', vced_count);
seq_printf(m, fmt, 'I', vcei_count);
proc_cpuinfo_notifier_args.m = m;
proc_cpuinfo_notifier_args.n = n;
raw_notifier_call_chain(&proc_cpuinfo_chain, 0,
&proc_cpuinfo_notifier_args);
seq_puts(m, "\n");
return 0;
}
static void *c_start(struct seq_file *m, loff_t *pos)
{
unsigned long i = *pos;
return i < nr_cpu_ids ? (void *) (i + 1) : NULL;
}
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
{
++*pos;
return c_start(m, pos);
}
static void c_stop(struct seq_file *m, void *v)
{
}
const struct seq_operations cpuinfo_op = {
.start = c_start,
.next = c_next,
.stop = c_stop,
.show = show_cpuinfo,
};
| linux-master | arch/mips/kernel/proc.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2000, 2001 Broadcom Corporation
*/
#include <linux/clocksource.h>
#include <linux/sched_clock.h>
#include <asm/addrspace.h>
#include <asm/io.h>
#include <asm/time.h>
#include <asm/sibyte/sb1250.h>
#include <asm/sibyte/sb1250_regs.h>
#include <asm/sibyte/sb1250_int.h>
#include <asm/sibyte/sb1250_scd.h>
#define SB1250_HPT_NUM 3
#define SB1250_HPT_VALUE M_SCD_TIMER_CNT /* max value */
/*
* The HPT is free running from SB1250_HPT_VALUE down to 0 then starts over
* again.
*/
static inline u64 sb1250_hpt_get_cycles(void)
{
unsigned int count;
void __iomem *addr;
addr = IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM, R_SCD_TIMER_CNT));
count = G_SCD_TIMER_CNT(__raw_readq(addr));
return SB1250_HPT_VALUE - count;
}
static u64 sb1250_hpt_read(struct clocksource *cs)
{
return sb1250_hpt_get_cycles();
}
struct clocksource bcm1250_clocksource = {
.name = "bcm1250-counter-3",
.rating = 200,
.read = sb1250_hpt_read,
.mask = CLOCKSOURCE_MASK(23),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static u64 notrace sb1250_read_sched_clock(void)
{
return sb1250_hpt_get_cycles();
}
void __init sb1250_clocksource_init(void)
{
struct clocksource *cs = &bcm1250_clocksource;
/* Setup hpt using timer #3 but do not enable irq for it */
__raw_writeq(0,
IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM,
R_SCD_TIMER_CFG)));
__raw_writeq(SB1250_HPT_VALUE,
IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM,
R_SCD_TIMER_INIT)));
__raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS,
IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM,
R_SCD_TIMER_CFG)));
clocksource_register_hz(cs, V_SCD_TIMER_FREQ);
sched_clock_register(sb1250_read_sched_clock, 23, V_SCD_TIMER_FREQ);
}
| linux-master | arch/mips/kernel/csrc-sb1250.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Processor capabilities determination functions.
*
* Copyright (C) xxxx the Anonymous
* Copyright (C) 1994 - 2006 Ralf Baechle
* Copyright (C) 2003, 2004 Maciej W. Rozycki
* Copyright (C) 2001, 2004, 2011, 2012 MIPS Technologies, Inc.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/ptrace.h>
#include <linux/smp.h>
#include <linux/stddef.h>
#include <linux/export.h>
#include <asm/bugs.h>
#include <asm/cpu.h>
#include <asm/cpu-features.h>
#include <asm/cpu-type.h>
#include <asm/fpu.h>
#include <asm/mipsregs.h>
#include <asm/mipsmtregs.h>
#include <asm/msa.h>
#include <asm/watch.h>
#include <asm/elf.h>
#include <asm/pgtable-bits.h>
#include <asm/spram.h>
#include <asm/traps.h>
#include <linux/uaccess.h>
#include "fpu-probe.h"
#include <asm/mach-loongson64/cpucfg-emul.h>
/* Hardware capabilities */
unsigned int elf_hwcap __read_mostly;
EXPORT_SYMBOL_GPL(elf_hwcap);
static inline unsigned long cpu_get_msa_id(void)
{
unsigned long status, msa_id;
status = read_c0_status();
__enable_fpu(FPU_64BIT);
enable_msa();
msa_id = read_msa_ir();
disable_msa();
write_c0_status(status);
return msa_id;
}
static int mips_dsp_disabled;
static int __init dsp_disable(char *s)
{
cpu_data[0].ases &= ~(MIPS_ASE_DSP | MIPS_ASE_DSP2P);
mips_dsp_disabled = 1;
return 1;
}
__setup("nodsp", dsp_disable);
static int mips_htw_disabled;
static int __init htw_disable(char *s)
{
mips_htw_disabled = 1;
cpu_data[0].options &= ~MIPS_CPU_HTW;
write_c0_pwctl(read_c0_pwctl() &
~(1 << MIPS_PWCTL_PWEN_SHIFT));
return 1;
}
__setup("nohtw", htw_disable);
static int mips_ftlb_disabled;
static int mips_has_ftlb_configured;
enum ftlb_flags {
FTLB_EN = 1 << 0,
FTLB_SET_PROB = 1 << 1,
};
static int set_ftlb_enable(struct cpuinfo_mips *c, enum ftlb_flags flags);
static int __init ftlb_disable(char *s)
{
unsigned int config4, mmuextdef;
/*
* If the core hasn't done any FTLB configuration, there is nothing
* for us to do here.
*/
if (!mips_has_ftlb_configured)
return 1;
/* Disable it in the boot cpu */
if (set_ftlb_enable(&cpu_data[0], 0)) {
pr_warn("Can't turn FTLB off\n");
return 1;
}
config4 = read_c0_config4();
/* Check that FTLB has been disabled */
mmuextdef = config4 & MIPS_CONF4_MMUEXTDEF;
/* MMUSIZEEXT == VTLB ON, FTLB OFF */
if (mmuextdef == MIPS_CONF4_MMUEXTDEF_FTLBSIZEEXT) {
/* This should never happen */
pr_warn("FTLB could not be disabled!\n");
return 1;
}
mips_ftlb_disabled = 1;
mips_has_ftlb_configured = 0;
/*
* noftlb is mainly used for debug purposes so print
* an informative message instead of using pr_debug()
*/
pr_info("FTLB has been disabled\n");
/*
* Some of these bits are duplicated in the decode_config4.
* MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT is the only possible case
* once FTLB has been disabled so undo what decode_config4 did.
*/
cpu_data[0].tlbsize -= cpu_data[0].tlbsizeftlbways *
cpu_data[0].tlbsizeftlbsets;
cpu_data[0].tlbsizeftlbsets = 0;
cpu_data[0].tlbsizeftlbways = 0;
return 1;
}
__setup("noftlb", ftlb_disable);
/*
* Check if the CPU has per tc perf counters
*/
static inline void cpu_set_mt_per_tc_perf(struct cpuinfo_mips *c)
{
if (read_c0_config7() & MTI_CONF7_PTC)
c->options |= MIPS_CPU_MT_PER_TC_PERF_COUNTERS;
}
static inline void check_errata(void)
{
struct cpuinfo_mips *c = ¤t_cpu_data;
switch (current_cpu_type()) {
case CPU_34K:
/*
* Erratum "RPS May Cause Incorrect Instruction Execution"
* This code only handles VPE0, any SMP/RTOS code
* making use of VPE1 will be responsible for that VPE.
*/
if ((c->processor_id & PRID_REV_MASK) <= PRID_REV_34K_V1_0_2)
write_c0_config7(read_c0_config7() | MIPS_CONF7_RPS);
break;
default:
break;
}
}
void __init check_bugs32(void)
{
check_errata();
}
/*
* Probe whether cpu has config register by trying to play with
* alternate cache bit and see whether it matters.
* It's used by cpu_probe to distinguish between R3000A and R3081.
*/
static inline int cpu_has_confreg(void)
{
#ifdef CONFIG_CPU_R3000
extern unsigned long r3k_cache_size(unsigned long);
unsigned long size1, size2;
unsigned long cfg = read_c0_conf();
size1 = r3k_cache_size(ST0_ISC);
write_c0_conf(cfg ^ R30XX_CONF_AC);
size2 = r3k_cache_size(ST0_ISC);
write_c0_conf(cfg);
return size1 != size2;
#else
return 0;
#endif
}
static inline void set_elf_platform(int cpu, const char *plat)
{
if (cpu == 0)
__elf_platform = plat;
}
static inline void set_elf_base_platform(const char *plat)
{
if (__elf_base_platform == NULL) {
__elf_base_platform = plat;
}
}
static inline void cpu_probe_vmbits(struct cpuinfo_mips *c)
{
#ifdef __NEED_VMBITS_PROBE
write_c0_entryhi(0x3fffffffffffe000ULL);
back_to_back_c0_hazard();
c->vmbits = fls64(read_c0_entryhi() & 0x3fffffffffffe000ULL);
#endif
}
static void set_isa(struct cpuinfo_mips *c, unsigned int isa)
{
switch (isa) {
case MIPS_CPU_ISA_M64R5:
c->isa_level |= MIPS_CPU_ISA_M32R5 | MIPS_CPU_ISA_M64R5;
set_elf_base_platform("mips64r5");
fallthrough;
case MIPS_CPU_ISA_M64R2:
c->isa_level |= MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2;
set_elf_base_platform("mips64r2");
fallthrough;
case MIPS_CPU_ISA_M64R1:
c->isa_level |= MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M64R1;
set_elf_base_platform("mips64");
fallthrough;
case MIPS_CPU_ISA_V:
c->isa_level |= MIPS_CPU_ISA_V;
set_elf_base_platform("mips5");
fallthrough;
case MIPS_CPU_ISA_IV:
c->isa_level |= MIPS_CPU_ISA_IV;
set_elf_base_platform("mips4");
fallthrough;
case MIPS_CPU_ISA_III:
c->isa_level |= MIPS_CPU_ISA_II | MIPS_CPU_ISA_III;
set_elf_base_platform("mips3");
break;
/* R6 incompatible with everything else */
case MIPS_CPU_ISA_M64R6:
c->isa_level |= MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6;
set_elf_base_platform("mips64r6");
fallthrough;
case MIPS_CPU_ISA_M32R6:
c->isa_level |= MIPS_CPU_ISA_M32R6;
set_elf_base_platform("mips32r6");
/* Break here so we don't add incompatible ISAs */
break;
case MIPS_CPU_ISA_M32R5:
c->isa_level |= MIPS_CPU_ISA_M32R5;
set_elf_base_platform("mips32r5");
fallthrough;
case MIPS_CPU_ISA_M32R2:
c->isa_level |= MIPS_CPU_ISA_M32R2;
set_elf_base_platform("mips32r2");
fallthrough;
case MIPS_CPU_ISA_M32R1:
c->isa_level |= MIPS_CPU_ISA_M32R1;
set_elf_base_platform("mips32");
fallthrough;
case MIPS_CPU_ISA_II:
c->isa_level |= MIPS_CPU_ISA_II;
set_elf_base_platform("mips2");
break;
}
}
static char unknown_isa[] = KERN_ERR \
"Unsupported ISA type, c0.config0: %d.";
static unsigned int calculate_ftlb_probability(struct cpuinfo_mips *c)
{
unsigned int probability = c->tlbsize / c->tlbsizevtlb;
/*
* 0 = All TLBWR instructions go to FTLB
* 1 = 15:1: For every 16 TBLWR instructions, 15 go to the
* FTLB and 1 goes to the VTLB.
* 2 = 7:1: As above with 7:1 ratio.
* 3 = 3:1: As above with 3:1 ratio.
*
* Use the linear midpoint as the probability threshold.
*/
if (probability >= 12)
return 1;
else if (probability >= 6)
return 2;
else
/*
* So FTLB is less than 4 times bigger than VTLB.
* A 3:1 ratio can still be useful though.
*/
return 3;
}
static int set_ftlb_enable(struct cpuinfo_mips *c, enum ftlb_flags flags)
{
unsigned int config;
/* It's implementation dependent how the FTLB can be enabled */
switch (c->cputype) {
case CPU_PROAPTIV:
case CPU_P5600:
case CPU_P6600:
/* proAptiv & related cores use Config6 to enable the FTLB */
config = read_c0_config6();
if (flags & FTLB_EN)
config |= MTI_CONF6_FTLBEN;
else
config &= ~MTI_CONF6_FTLBEN;
if (flags & FTLB_SET_PROB) {
config &= ~(3 << MTI_CONF6_FTLBP_SHIFT);
config |= calculate_ftlb_probability(c)
<< MTI_CONF6_FTLBP_SHIFT;
}
write_c0_config6(config);
back_to_back_c0_hazard();
break;
case CPU_I6400:
case CPU_I6500:
/* There's no way to disable the FTLB */
if (!(flags & FTLB_EN))
return 1;
return 0;
case CPU_LOONGSON64:
/* Flush ITLB, DTLB, VTLB and FTLB */
write_c0_diag(LOONGSON_DIAG_ITLB | LOONGSON_DIAG_DTLB |
LOONGSON_DIAG_VTLB | LOONGSON_DIAG_FTLB);
/* Loongson-3 cores use Config6 to enable the FTLB */
config = read_c0_config6();
if (flags & FTLB_EN)
/* Enable FTLB */
write_c0_config6(config & ~LOONGSON_CONF6_FTLBDIS);
else
/* Disable FTLB */
write_c0_config6(config | LOONGSON_CONF6_FTLBDIS);
break;
default:
return 1;
}
return 0;
}
static int mm_config(struct cpuinfo_mips *c)
{
unsigned int config0, update, mm;
config0 = read_c0_config();
mm = config0 & MIPS_CONF_MM;
/*
* It's implementation dependent what type of write-merge is supported
* and whether it can be enabled/disabled. If it is settable lets make
* the merging allowed by default. Some platforms might have
* write-through caching unsupported. In this case just ignore the
* CP0.Config.MM bit field value.
*/
switch (c->cputype) {
case CPU_24K:
case CPU_34K:
case CPU_74K:
case CPU_P5600:
case CPU_P6600:
c->options |= MIPS_CPU_MM_FULL;
update = MIPS_CONF_MM_FULL;
break;
case CPU_1004K:
case CPU_1074K:
case CPU_INTERAPTIV:
case CPU_PROAPTIV:
mm = 0;
fallthrough;
default:
update = 0;
break;
}
if (update) {
config0 = (config0 & ~MIPS_CONF_MM) | update;
write_c0_config(config0);
} else if (mm == MIPS_CONF_MM_SYSAD) {
c->options |= MIPS_CPU_MM_SYSAD;
} else if (mm == MIPS_CONF_MM_FULL) {
c->options |= MIPS_CPU_MM_FULL;
}
return 0;
}
static inline unsigned int decode_config0(struct cpuinfo_mips *c)
{
unsigned int config0;
int isa, mt;
config0 = read_c0_config();
/*
* Look for Standard TLB or Dual VTLB and FTLB
*/
mt = config0 & MIPS_CONF_MT;
if (mt == MIPS_CONF_MT_TLB)
c->options |= MIPS_CPU_TLB;
else if (mt == MIPS_CONF_MT_FTLB)
c->options |= MIPS_CPU_TLB | MIPS_CPU_FTLB;
isa = (config0 & MIPS_CONF_AT) >> 13;
switch (isa) {
case 0:
switch ((config0 & MIPS_CONF_AR) >> 10) {
case 0:
set_isa(c, MIPS_CPU_ISA_M32R1);
break;
case 1:
set_isa(c, MIPS_CPU_ISA_M32R2);
break;
case 2:
set_isa(c, MIPS_CPU_ISA_M32R6);
break;
default:
goto unknown;
}
break;
case 2:
switch ((config0 & MIPS_CONF_AR) >> 10) {
case 0:
set_isa(c, MIPS_CPU_ISA_M64R1);
break;
case 1:
set_isa(c, MIPS_CPU_ISA_M64R2);
break;
case 2:
set_isa(c, MIPS_CPU_ISA_M64R6);
break;
default:
goto unknown;
}
break;
default:
goto unknown;
}
return config0 & MIPS_CONF_M;
unknown:
panic(unknown_isa, config0);
}
static inline unsigned int decode_config1(struct cpuinfo_mips *c)
{
unsigned int config1;
config1 = read_c0_config1();
if (config1 & MIPS_CONF1_MD)
c->ases |= MIPS_ASE_MDMX;
if (config1 & MIPS_CONF1_PC)
c->options |= MIPS_CPU_PERF;
if (config1 & MIPS_CONF1_WR)
c->options |= MIPS_CPU_WATCH;
if (config1 & MIPS_CONF1_CA)
c->ases |= MIPS_ASE_MIPS16;
if (config1 & MIPS_CONF1_EP)
c->options |= MIPS_CPU_EJTAG;
if (config1 & MIPS_CONF1_FP) {
c->options |= MIPS_CPU_FPU;
c->options |= MIPS_CPU_32FPR;
}
if (cpu_has_tlb) {
c->tlbsize = ((config1 & MIPS_CONF1_TLBS) >> 25) + 1;
c->tlbsizevtlb = c->tlbsize;
c->tlbsizeftlbsets = 0;
}
return config1 & MIPS_CONF_M;
}
static inline unsigned int decode_config2(struct cpuinfo_mips *c)
{
unsigned int config2;
config2 = read_c0_config2();
if (config2 & MIPS_CONF2_SL)
c->scache.flags &= ~MIPS_CACHE_NOT_PRESENT;
return config2 & MIPS_CONF_M;
}
static inline unsigned int decode_config3(struct cpuinfo_mips *c)
{
unsigned int config3;
config3 = read_c0_config3();
if (config3 & MIPS_CONF3_SM) {
c->ases |= MIPS_ASE_SMARTMIPS;
c->options |= MIPS_CPU_RIXI | MIPS_CPU_CTXTC;
}
if (config3 & MIPS_CONF3_RXI)
c->options |= MIPS_CPU_RIXI;
if (config3 & MIPS_CONF3_CTXTC)
c->options |= MIPS_CPU_CTXTC;
if (config3 & MIPS_CONF3_DSP)
c->ases |= MIPS_ASE_DSP;
if (config3 & MIPS_CONF3_DSP2P) {
c->ases |= MIPS_ASE_DSP2P;
if (cpu_has_mips_r6)
c->ases |= MIPS_ASE_DSP3;
}
if (config3 & MIPS_CONF3_VINT)
c->options |= MIPS_CPU_VINT;
if (config3 & MIPS_CONF3_VEIC)
c->options |= MIPS_CPU_VEIC;
if (config3 & MIPS_CONF3_LPA)
c->options |= MIPS_CPU_LPA;
if (config3 & MIPS_CONF3_MT)
c->ases |= MIPS_ASE_MIPSMT;
if (config3 & MIPS_CONF3_ULRI)
c->options |= MIPS_CPU_ULRI;
if (config3 & MIPS_CONF3_ISA)
c->options |= MIPS_CPU_MICROMIPS;
if (config3 & MIPS_CONF3_VZ)
c->ases |= MIPS_ASE_VZ;
if (config3 & MIPS_CONF3_SC)
c->options |= MIPS_CPU_SEGMENTS;
if (config3 & MIPS_CONF3_BI)
c->options |= MIPS_CPU_BADINSTR;
if (config3 & MIPS_CONF3_BP)
c->options |= MIPS_CPU_BADINSTRP;
if (config3 & MIPS_CONF3_MSA)
c->ases |= MIPS_ASE_MSA;
if (config3 & MIPS_CONF3_PW) {
c->htw_seq = 0;
c->options |= MIPS_CPU_HTW;
}
if (config3 & MIPS_CONF3_CDMM)
c->options |= MIPS_CPU_CDMM;
if (config3 & MIPS_CONF3_SP)
c->options |= MIPS_CPU_SP;
return config3 & MIPS_CONF_M;
}
static inline unsigned int decode_config4(struct cpuinfo_mips *c)
{
unsigned int config4;
unsigned int newcf4;
unsigned int mmuextdef;
unsigned int ftlb_page = MIPS_CONF4_FTLBPAGESIZE;
unsigned long asid_mask;
config4 = read_c0_config4();
if (cpu_has_tlb) {
if (((config4 & MIPS_CONF4_IE) >> 29) == 2)
c->options |= MIPS_CPU_TLBINV;
/*
* R6 has dropped the MMUExtDef field from config4.
* On R6 the fields always describe the FTLB, and only if it is
* present according to Config.MT.
*/
if (!cpu_has_mips_r6)
mmuextdef = config4 & MIPS_CONF4_MMUEXTDEF;
else if (cpu_has_ftlb)
mmuextdef = MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT;
else
mmuextdef = 0;
switch (mmuextdef) {
case MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT:
c->tlbsize += (config4 & MIPS_CONF4_MMUSIZEEXT) * 0x40;
c->tlbsizevtlb = c->tlbsize;
break;
case MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT:
c->tlbsizevtlb +=
((config4 & MIPS_CONF4_VTLBSIZEEXT) >>
MIPS_CONF4_VTLBSIZEEXT_SHIFT) * 0x40;
c->tlbsize = c->tlbsizevtlb;
ftlb_page = MIPS_CONF4_VFTLBPAGESIZE;
fallthrough;
case MIPS_CONF4_MMUEXTDEF_FTLBSIZEEXT:
if (mips_ftlb_disabled)
break;
newcf4 = (config4 & ~ftlb_page) |
(page_size_ftlb(mmuextdef) <<
MIPS_CONF4_FTLBPAGESIZE_SHIFT);
write_c0_config4(newcf4);
back_to_back_c0_hazard();
config4 = read_c0_config4();
if (config4 != newcf4) {
pr_err("PAGE_SIZE 0x%lx is not supported by FTLB (config4=0x%x)\n",
PAGE_SIZE, config4);
/* Switch FTLB off */
set_ftlb_enable(c, 0);
mips_ftlb_disabled = 1;
break;
}
c->tlbsizeftlbsets = 1 <<
((config4 & MIPS_CONF4_FTLBSETS) >>
MIPS_CONF4_FTLBSETS_SHIFT);
c->tlbsizeftlbways = ((config4 & MIPS_CONF4_FTLBWAYS) >>
MIPS_CONF4_FTLBWAYS_SHIFT) + 2;
c->tlbsize += c->tlbsizeftlbways * c->tlbsizeftlbsets;
mips_has_ftlb_configured = 1;
break;
}
}
c->kscratch_mask = (config4 & MIPS_CONF4_KSCREXIST)
>> MIPS_CONF4_KSCREXIST_SHIFT;
asid_mask = MIPS_ENTRYHI_ASID;
if (config4 & MIPS_CONF4_AE)
asid_mask |= MIPS_ENTRYHI_ASIDX;
set_cpu_asid_mask(c, asid_mask);
/*
* Warn if the computed ASID mask doesn't match the mask the kernel
* is built for. This may indicate either a serious problem or an
* easy optimisation opportunity, but either way should be addressed.
*/
WARN_ON(asid_mask != cpu_asid_mask(c));
return config4 & MIPS_CONF_M;
}
static inline unsigned int decode_config5(struct cpuinfo_mips *c)
{
unsigned int config5, max_mmid_width;
unsigned long asid_mask;
config5 = read_c0_config5();
config5 &= ~(MIPS_CONF5_UFR | MIPS_CONF5_UFE);
if (cpu_has_mips_r6) {
if (!__builtin_constant_p(cpu_has_mmid) || cpu_has_mmid)
config5 |= MIPS_CONF5_MI;
else
config5 &= ~MIPS_CONF5_MI;
}
write_c0_config5(config5);
if (config5 & MIPS_CONF5_EVA)
c->options |= MIPS_CPU_EVA;
if (config5 & MIPS_CONF5_MRP)
c->options |= MIPS_CPU_MAAR;
if (config5 & MIPS_CONF5_LLB)
c->options |= MIPS_CPU_RW_LLB;
if (config5 & MIPS_CONF5_MVH)
c->options |= MIPS_CPU_MVH;
if (cpu_has_mips_r6 && (config5 & MIPS_CONF5_VP))
c->options |= MIPS_CPU_VP;
if (config5 & MIPS_CONF5_CA2)
c->ases |= MIPS_ASE_MIPS16E2;
if (config5 & MIPS_CONF5_CRCP)
elf_hwcap |= HWCAP_MIPS_CRC32;
if (cpu_has_mips_r6) {
/* Ensure the write to config5 above takes effect */
back_to_back_c0_hazard();
/* Check whether we successfully enabled MMID support */
config5 = read_c0_config5();
if (config5 & MIPS_CONF5_MI)
c->options |= MIPS_CPU_MMID;
/*
* Warn if we've hardcoded cpu_has_mmid to a value unsuitable
* for the CPU we're running on, or if CPUs in an SMP system
* have inconsistent MMID support.
*/
WARN_ON(!!cpu_has_mmid != !!(config5 & MIPS_CONF5_MI));
if (cpu_has_mmid) {
write_c0_memorymapid(~0ul);
back_to_back_c0_hazard();
asid_mask = read_c0_memorymapid();
/*
* We maintain a bitmap to track MMID allocation, and
* need a sensible upper bound on the size of that
* bitmap. The initial CPU with MMID support (I6500)
* supports 16 bit MMIDs, which gives us an 8KiB
* bitmap. The architecture recommends that hardware
* support 32 bit MMIDs, which would give us a 512MiB
* bitmap - that's too big in most cases.
*
* Cap MMID width at 16 bits for now & we can revisit
* this if & when hardware supports anything wider.
*/
max_mmid_width = 16;
if (asid_mask > GENMASK(max_mmid_width - 1, 0)) {
pr_info("Capping MMID width at %d bits",
max_mmid_width);
asid_mask = GENMASK(max_mmid_width - 1, 0);
}
set_cpu_asid_mask(c, asid_mask);
}
}
return config5 & MIPS_CONF_M;
}
static void decode_configs(struct cpuinfo_mips *c)
{
int ok;
/* MIPS32 or MIPS64 compliant CPU. */
c->options = MIPS_CPU_4KEX | MIPS_CPU_4K_CACHE | MIPS_CPU_COUNTER |
MIPS_CPU_DIVEC | MIPS_CPU_LLSC | MIPS_CPU_MCHECK;
c->scache.flags = MIPS_CACHE_NOT_PRESENT;
/* Enable FTLB if present and not disabled */
set_ftlb_enable(c, mips_ftlb_disabled ? 0 : FTLB_EN);
ok = decode_config0(c); /* Read Config registers. */
BUG_ON(!ok); /* Arch spec violation! */
if (ok)
ok = decode_config1(c);
if (ok)
ok = decode_config2(c);
if (ok)
ok = decode_config3(c);
if (ok)
ok = decode_config4(c);
if (ok)
ok = decode_config5(c);
/* Probe the EBase.WG bit */
if (cpu_has_mips_r2_r6) {
u64 ebase;
unsigned int status;
/* {read,write}_c0_ebase_64() may be UNDEFINED prior to r6 */
ebase = cpu_has_mips64r6 ? read_c0_ebase_64()
: (s32)read_c0_ebase();
if (ebase & MIPS_EBASE_WG) {
/* WG bit already set, we can avoid the clumsy probe */
c->options |= MIPS_CPU_EBASE_WG;
} else {
/* Its UNDEFINED to change EBase while BEV=0 */
status = read_c0_status();
write_c0_status(status | ST0_BEV);
irq_enable_hazard();
/*
* On pre-r6 cores, this may well clobber the upper bits
* of EBase. This is hard to avoid without potentially
* hitting UNDEFINED dm*c0 behaviour if EBase is 32-bit.
*/
if (cpu_has_mips64r6)
write_c0_ebase_64(ebase | MIPS_EBASE_WG);
else
write_c0_ebase(ebase | MIPS_EBASE_WG);
back_to_back_c0_hazard();
/* Restore BEV */
write_c0_status(status);
if (read_c0_ebase() & MIPS_EBASE_WG) {
c->options |= MIPS_CPU_EBASE_WG;
write_c0_ebase(ebase);
}
}
}
/* configure the FTLB write probability */
set_ftlb_enable(c, (mips_ftlb_disabled ? 0 : FTLB_EN) | FTLB_SET_PROB);
mips_probe_watch_registers(c);
#ifndef CONFIG_MIPS_CPS
if (cpu_has_mips_r2_r6) {
unsigned int core;
core = get_ebase_cpunum();
if (cpu_has_mipsmt)
core >>= fls(core_nvpes()) - 1;
cpu_set_core(c, core);
}
#endif
}
/*
* Probe for certain guest capabilities by writing config bits and reading back.
* Finally write back the original value.
*/
#define probe_gc0_config(name, maxconf, bits) \
do { \
unsigned int tmp; \
tmp = read_gc0_##name(); \
write_gc0_##name(tmp | (bits)); \
back_to_back_c0_hazard(); \
maxconf = read_gc0_##name(); \
write_gc0_##name(tmp); \
} while (0)
/*
* Probe for dynamic guest capabilities by changing certain config bits and
* reading back to see if they change. Finally write back the original value.
*/
#define probe_gc0_config_dyn(name, maxconf, dynconf, bits) \
do { \
maxconf = read_gc0_##name(); \
write_gc0_##name(maxconf ^ (bits)); \
back_to_back_c0_hazard(); \
dynconf = maxconf ^ read_gc0_##name(); \
write_gc0_##name(maxconf); \
maxconf |= dynconf; \
} while (0)
static inline unsigned int decode_guest_config0(struct cpuinfo_mips *c)
{
unsigned int config0;
probe_gc0_config(config, config0, MIPS_CONF_M);
if (config0 & MIPS_CONF_M)
c->guest.conf |= BIT(1);
return config0 & MIPS_CONF_M;
}
static inline unsigned int decode_guest_config1(struct cpuinfo_mips *c)
{
unsigned int config1, config1_dyn;
probe_gc0_config_dyn(config1, config1, config1_dyn,
MIPS_CONF_M | MIPS_CONF1_PC | MIPS_CONF1_WR |
MIPS_CONF1_FP);
if (config1 & MIPS_CONF1_FP)
c->guest.options |= MIPS_CPU_FPU;
if (config1_dyn & MIPS_CONF1_FP)
c->guest.options_dyn |= MIPS_CPU_FPU;
if (config1 & MIPS_CONF1_WR)
c->guest.options |= MIPS_CPU_WATCH;
if (config1_dyn & MIPS_CONF1_WR)
c->guest.options_dyn |= MIPS_CPU_WATCH;
if (config1 & MIPS_CONF1_PC)
c->guest.options |= MIPS_CPU_PERF;
if (config1_dyn & MIPS_CONF1_PC)
c->guest.options_dyn |= MIPS_CPU_PERF;
if (config1 & MIPS_CONF_M)
c->guest.conf |= BIT(2);
return config1 & MIPS_CONF_M;
}
static inline unsigned int decode_guest_config2(struct cpuinfo_mips *c)
{
unsigned int config2;
probe_gc0_config(config2, config2, MIPS_CONF_M);
if (config2 & MIPS_CONF_M)
c->guest.conf |= BIT(3);
return config2 & MIPS_CONF_M;
}
static inline unsigned int decode_guest_config3(struct cpuinfo_mips *c)
{
unsigned int config3, config3_dyn;
probe_gc0_config_dyn(config3, config3, config3_dyn,
MIPS_CONF_M | MIPS_CONF3_MSA | MIPS_CONF3_ULRI |
MIPS_CONF3_CTXTC);
if (config3 & MIPS_CONF3_CTXTC)
c->guest.options |= MIPS_CPU_CTXTC;
if (config3_dyn & MIPS_CONF3_CTXTC)
c->guest.options_dyn |= MIPS_CPU_CTXTC;
if (config3 & MIPS_CONF3_PW)
c->guest.options |= MIPS_CPU_HTW;
if (config3 & MIPS_CONF3_ULRI)
c->guest.options |= MIPS_CPU_ULRI;
if (config3 & MIPS_CONF3_SC)
c->guest.options |= MIPS_CPU_SEGMENTS;
if (config3 & MIPS_CONF3_BI)
c->guest.options |= MIPS_CPU_BADINSTR;
if (config3 & MIPS_CONF3_BP)
c->guest.options |= MIPS_CPU_BADINSTRP;
if (config3 & MIPS_CONF3_MSA)
c->guest.ases |= MIPS_ASE_MSA;
if (config3_dyn & MIPS_CONF3_MSA)
c->guest.ases_dyn |= MIPS_ASE_MSA;
if (config3 & MIPS_CONF_M)
c->guest.conf |= BIT(4);
return config3 & MIPS_CONF_M;
}
static inline unsigned int decode_guest_config4(struct cpuinfo_mips *c)
{
unsigned int config4;
probe_gc0_config(config4, config4,
MIPS_CONF_M | MIPS_CONF4_KSCREXIST);
c->guest.kscratch_mask = (config4 & MIPS_CONF4_KSCREXIST)
>> MIPS_CONF4_KSCREXIST_SHIFT;
if (config4 & MIPS_CONF_M)
c->guest.conf |= BIT(5);
return config4 & MIPS_CONF_M;
}
static inline unsigned int decode_guest_config5(struct cpuinfo_mips *c)
{
unsigned int config5, config5_dyn;
probe_gc0_config_dyn(config5, config5, config5_dyn,
MIPS_CONF_M | MIPS_CONF5_MVH | MIPS_CONF5_MRP);
if (config5 & MIPS_CONF5_MRP)
c->guest.options |= MIPS_CPU_MAAR;
if (config5_dyn & MIPS_CONF5_MRP)
c->guest.options_dyn |= MIPS_CPU_MAAR;
if (config5 & MIPS_CONF5_LLB)
c->guest.options |= MIPS_CPU_RW_LLB;
if (config5 & MIPS_CONF5_MVH)
c->guest.options |= MIPS_CPU_MVH;
if (config5 & MIPS_CONF_M)
c->guest.conf |= BIT(6);
return config5 & MIPS_CONF_M;
}
static inline void decode_guest_configs(struct cpuinfo_mips *c)
{
unsigned int ok;
ok = decode_guest_config0(c);
if (ok)
ok = decode_guest_config1(c);
if (ok)
ok = decode_guest_config2(c);
if (ok)
ok = decode_guest_config3(c);
if (ok)
ok = decode_guest_config4(c);
if (ok)
decode_guest_config5(c);
}
static inline void cpu_probe_guestctl0(struct cpuinfo_mips *c)
{
unsigned int guestctl0, temp;
guestctl0 = read_c0_guestctl0();
if (guestctl0 & MIPS_GCTL0_G0E)
c->options |= MIPS_CPU_GUESTCTL0EXT;
if (guestctl0 & MIPS_GCTL0_G1)
c->options |= MIPS_CPU_GUESTCTL1;
if (guestctl0 & MIPS_GCTL0_G2)
c->options |= MIPS_CPU_GUESTCTL2;
if (!(guestctl0 & MIPS_GCTL0_RAD)) {
c->options |= MIPS_CPU_GUESTID;
/*
* Probe for Direct Root to Guest (DRG). Set GuestCtl1.RID = 0
* first, otherwise all data accesses will be fully virtualised
* as if they were performed by guest mode.
*/
write_c0_guestctl1(0);
tlbw_use_hazard();
write_c0_guestctl0(guestctl0 | MIPS_GCTL0_DRG);
back_to_back_c0_hazard();
temp = read_c0_guestctl0();
if (temp & MIPS_GCTL0_DRG) {
write_c0_guestctl0(guestctl0);
c->options |= MIPS_CPU_DRG;
}
}
}
static inline void cpu_probe_guestctl1(struct cpuinfo_mips *c)
{
if (cpu_has_guestid) {
/* determine the number of bits of GuestID available */
write_c0_guestctl1(MIPS_GCTL1_ID);
back_to_back_c0_hazard();
c->guestid_mask = (read_c0_guestctl1() & MIPS_GCTL1_ID)
>> MIPS_GCTL1_ID_SHIFT;
write_c0_guestctl1(0);
}
}
static inline void cpu_probe_gtoffset(struct cpuinfo_mips *c)
{
/* determine the number of bits of GTOffset available */
write_c0_gtoffset(0xffffffff);
back_to_back_c0_hazard();
c->gtoffset_mask = read_c0_gtoffset();
write_c0_gtoffset(0);
}
static inline void cpu_probe_vz(struct cpuinfo_mips *c)
{
cpu_probe_guestctl0(c);
if (cpu_has_guestctl1)
cpu_probe_guestctl1(c);
cpu_probe_gtoffset(c);
decode_guest_configs(c);
}
#define R4K_OPTS (MIPS_CPU_TLB | MIPS_CPU_4KEX | MIPS_CPU_4K_CACHE \
| MIPS_CPU_COUNTER)
static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
{
switch (c->processor_id & PRID_IMP_MASK) {
case PRID_IMP_R2000:
c->cputype = CPU_R2000;
__cpu_name[cpu] = "R2000";
c->fpu_msk31 |= FPU_CSR_CONDX | FPU_CSR_FS;
c->options = MIPS_CPU_TLB | MIPS_CPU_3K_CACHE |
MIPS_CPU_NOFPUEX;
if (__cpu_has_fpu())
c->options |= MIPS_CPU_FPU;
c->tlbsize = 64;
break;
case PRID_IMP_R3000:
if ((c->processor_id & PRID_REV_MASK) == PRID_REV_R3000A) {
if (cpu_has_confreg()) {
c->cputype = CPU_R3081E;
__cpu_name[cpu] = "R3081";
} else {
c->cputype = CPU_R3000A;
__cpu_name[cpu] = "R3000A";
}
} else {
c->cputype = CPU_R3000;
__cpu_name[cpu] = "R3000";
}
c->fpu_msk31 |= FPU_CSR_CONDX | FPU_CSR_FS;
c->options = MIPS_CPU_TLB | MIPS_CPU_3K_CACHE |
MIPS_CPU_NOFPUEX;
if (__cpu_has_fpu())
c->options |= MIPS_CPU_FPU;
c->tlbsize = 64;
break;
case PRID_IMP_R4000:
if (read_c0_config() & CONF_SC) {
if ((c->processor_id & PRID_REV_MASK) >=
PRID_REV_R4400) {
c->cputype = CPU_R4400PC;
__cpu_name[cpu] = "R4400PC";
} else {
c->cputype = CPU_R4000PC;
__cpu_name[cpu] = "R4000PC";
}
} else {
int cca = read_c0_config() & CONF_CM_CMASK;
int mc;
/*
* SC and MC versions can't be reliably told apart,
* but only the latter support coherent caching
* modes so assume the firmware has set the KSEG0
* coherency attribute reasonably (if uncached, we
* assume SC).
*/
switch (cca) {
case CONF_CM_CACHABLE_CE:
case CONF_CM_CACHABLE_COW:
case CONF_CM_CACHABLE_CUW:
mc = 1;
break;
default:
mc = 0;
break;
}
if ((c->processor_id & PRID_REV_MASK) >=
PRID_REV_R4400) {
c->cputype = mc ? CPU_R4400MC : CPU_R4400SC;
__cpu_name[cpu] = mc ? "R4400MC" : "R4400SC";
} else {
c->cputype = mc ? CPU_R4000MC : CPU_R4000SC;
__cpu_name[cpu] = mc ? "R4000MC" : "R4000SC";
}
}
set_isa(c, MIPS_CPU_ISA_III);
c->fpu_msk31 |= FPU_CSR_CONDX;
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_WATCH | MIPS_CPU_VCE |
MIPS_CPU_LLSC;
c->tlbsize = 48;
break;
case PRID_IMP_R4300:
c->cputype = CPU_R4300;
__cpu_name[cpu] = "R4300";
set_isa(c, MIPS_CPU_ISA_III);
c->fpu_msk31 |= FPU_CSR_CONDX;
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_LLSC;
c->tlbsize = 32;
break;
case PRID_IMP_R4600:
c->cputype = CPU_R4600;
__cpu_name[cpu] = "R4600";
set_isa(c, MIPS_CPU_ISA_III);
c->fpu_msk31 |= FPU_CSR_CONDX;
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_LLSC;
c->tlbsize = 48;
break;
#if 0
case PRID_IMP_R4650:
/*
* This processor doesn't have an MMU, so it's not
* "real easy" to run Linux on it. It is left purely
* for documentation. Commented out because it shares
* it's c0_prid id number with the TX3900.
*/
c->cputype = CPU_R4650;
__cpu_name[cpu] = "R4650";
set_isa(c, MIPS_CPU_ISA_III);
c->fpu_msk31 |= FPU_CSR_CONDX;
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_LLSC;
c->tlbsize = 48;
break;
#endif
case PRID_IMP_R4700:
c->cputype = CPU_R4700;
__cpu_name[cpu] = "R4700";
set_isa(c, MIPS_CPU_ISA_III);
c->fpu_msk31 |= FPU_CSR_CONDX;
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_LLSC;
c->tlbsize = 48;
break;
case PRID_IMP_TX49:
c->cputype = CPU_TX49XX;
__cpu_name[cpu] = "R49XX";
set_isa(c, MIPS_CPU_ISA_III);
c->fpu_msk31 |= FPU_CSR_CONDX;
c->options = R4K_OPTS | MIPS_CPU_LLSC;
if (!(c->processor_id & 0x08))
c->options |= MIPS_CPU_FPU | MIPS_CPU_32FPR;
c->tlbsize = 48;
break;
case PRID_IMP_R5000:
c->cputype = CPU_R5000;
__cpu_name[cpu] = "R5000";
set_isa(c, MIPS_CPU_ISA_IV);
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_LLSC;
c->tlbsize = 48;
break;
case PRID_IMP_R5500:
c->cputype = CPU_R5500;
__cpu_name[cpu] = "R5500";
set_isa(c, MIPS_CPU_ISA_IV);
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_WATCH | MIPS_CPU_LLSC;
c->tlbsize = 48;
break;
case PRID_IMP_NEVADA:
c->cputype = CPU_NEVADA;
__cpu_name[cpu] = "Nevada";
set_isa(c, MIPS_CPU_ISA_IV);
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_DIVEC | MIPS_CPU_LLSC;
c->tlbsize = 48;
break;
case PRID_IMP_RM7000:
c->cputype = CPU_RM7000;
__cpu_name[cpu] = "RM7000";
set_isa(c, MIPS_CPU_ISA_IV);
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_LLSC;
/*
* Undocumented RM7000: Bit 29 in the info register of
* the RM7000 v2.0 indicates if the TLB has 48 or 64
* entries.
*
* 29 1 => 64 entry JTLB
* 0 => 48 entry JTLB
*/
c->tlbsize = (read_c0_info() & (1 << 29)) ? 64 : 48;
break;
case PRID_IMP_R10000:
c->cputype = CPU_R10000;
__cpu_name[cpu] = "R10000";
set_isa(c, MIPS_CPU_ISA_IV);
c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX |
MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_COUNTER | MIPS_CPU_WATCH |
MIPS_CPU_LLSC;
c->tlbsize = 64;
break;
case PRID_IMP_R12000:
c->cputype = CPU_R12000;
__cpu_name[cpu] = "R12000";
set_isa(c, MIPS_CPU_ISA_IV);
c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX |
MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_COUNTER | MIPS_CPU_WATCH |
MIPS_CPU_LLSC;
c->tlbsize = 64;
write_c0_r10k_diag(read_c0_r10k_diag() | R10K_DIAG_E_GHIST);
break;
case PRID_IMP_R14000:
if (((c->processor_id >> 4) & 0x0f) > 2) {
c->cputype = CPU_R16000;
__cpu_name[cpu] = "R16000";
} else {
c->cputype = CPU_R14000;
__cpu_name[cpu] = "R14000";
}
set_isa(c, MIPS_CPU_ISA_IV);
c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX |
MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_COUNTER | MIPS_CPU_WATCH |
MIPS_CPU_LLSC;
c->tlbsize = 64;
write_c0_r10k_diag(read_c0_r10k_diag() | R10K_DIAG_E_GHIST);
break;
case PRID_IMP_LOONGSON_64C: /* Loongson-2/3 */
switch (c->processor_id & PRID_REV_MASK) {
case PRID_REV_LOONGSON2E:
c->cputype = CPU_LOONGSON2EF;
__cpu_name[cpu] = "ICT Loongson-2";
set_elf_platform(cpu, "loongson2e");
set_isa(c, MIPS_CPU_ISA_III);
c->fpu_msk31 |= FPU_CSR_CONDX;
break;
case PRID_REV_LOONGSON2F:
c->cputype = CPU_LOONGSON2EF;
__cpu_name[cpu] = "ICT Loongson-2";
set_elf_platform(cpu, "loongson2f");
set_isa(c, MIPS_CPU_ISA_III);
c->fpu_msk31 |= FPU_CSR_CONDX;
break;
case PRID_REV_LOONGSON3A_R1:
c->cputype = CPU_LOONGSON64;
__cpu_name[cpu] = "ICT Loongson-3";
set_elf_platform(cpu, "loongson3a");
set_isa(c, MIPS_CPU_ISA_M64R1);
c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_CAM |
MIPS_ASE_LOONGSON_EXT);
break;
case PRID_REV_LOONGSON3B_R1:
case PRID_REV_LOONGSON3B_R2:
c->cputype = CPU_LOONGSON64;
__cpu_name[cpu] = "ICT Loongson-3";
set_elf_platform(cpu, "loongson3b");
set_isa(c, MIPS_CPU_ISA_M64R1);
c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_CAM |
MIPS_ASE_LOONGSON_EXT);
break;
}
c->options = R4K_OPTS |
MIPS_CPU_FPU | MIPS_CPU_LLSC |
MIPS_CPU_32FPR;
c->tlbsize = 64;
set_cpu_asid_mask(c, MIPS_ENTRYHI_ASID);
c->writecombine = _CACHE_UNCACHED_ACCELERATED;
break;
case PRID_IMP_LOONGSON_32: /* Loongson-1 */
decode_configs(c);
c->cputype = CPU_LOONGSON32;
switch (c->processor_id & PRID_REV_MASK) {
case PRID_REV_LOONGSON1B:
__cpu_name[cpu] = "Loongson 1B";
break;
}
break;
}
}
static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu)
{
c->writecombine = _CACHE_UNCACHED_ACCELERATED;
switch (c->processor_id & PRID_IMP_MASK) {
case PRID_IMP_QEMU_GENERIC:
c->writecombine = _CACHE_UNCACHED;
c->cputype = CPU_QEMU_GENERIC;
__cpu_name[cpu] = "MIPS GENERIC QEMU";
break;
case PRID_IMP_4KC:
c->cputype = CPU_4KC;
c->writecombine = _CACHE_UNCACHED;
__cpu_name[cpu] = "MIPS 4Kc";
break;
case PRID_IMP_4KEC:
case PRID_IMP_4KECR2:
c->cputype = CPU_4KEC;
c->writecombine = _CACHE_UNCACHED;
__cpu_name[cpu] = "MIPS 4KEc";
break;
case PRID_IMP_4KSC:
case PRID_IMP_4KSD:
c->cputype = CPU_4KSC;
c->writecombine = _CACHE_UNCACHED;
__cpu_name[cpu] = "MIPS 4KSc";
break;
case PRID_IMP_5KC:
c->cputype = CPU_5KC;
c->writecombine = _CACHE_UNCACHED;
__cpu_name[cpu] = "MIPS 5Kc";
break;
case PRID_IMP_5KE:
c->cputype = CPU_5KE;
c->writecombine = _CACHE_UNCACHED;
__cpu_name[cpu] = "MIPS 5KE";
break;
case PRID_IMP_20KC:
c->cputype = CPU_20KC;
c->writecombine = _CACHE_UNCACHED;
__cpu_name[cpu] = "MIPS 20Kc";
break;
case PRID_IMP_24K:
c->cputype = CPU_24K;
c->writecombine = _CACHE_UNCACHED;
__cpu_name[cpu] = "MIPS 24Kc";
break;
case PRID_IMP_24KE:
c->cputype = CPU_24K;
c->writecombine = _CACHE_UNCACHED;
__cpu_name[cpu] = "MIPS 24KEc";
break;
case PRID_IMP_25KF:
c->cputype = CPU_25KF;
c->writecombine = _CACHE_UNCACHED;
__cpu_name[cpu] = "MIPS 25Kc";
break;
case PRID_IMP_34K:
c->cputype = CPU_34K;
c->writecombine = _CACHE_UNCACHED;
__cpu_name[cpu] = "MIPS 34Kc";
cpu_set_mt_per_tc_perf(c);
break;
case PRID_IMP_74K:
c->cputype = CPU_74K;
c->writecombine = _CACHE_UNCACHED;
__cpu_name[cpu] = "MIPS 74Kc";
break;
case PRID_IMP_M14KC:
c->cputype = CPU_M14KC;
c->writecombine = _CACHE_UNCACHED;
__cpu_name[cpu] = "MIPS M14Kc";
break;
case PRID_IMP_M14KEC:
c->cputype = CPU_M14KEC;
c->writecombine = _CACHE_UNCACHED;
__cpu_name[cpu] = "MIPS M14KEc";
break;
case PRID_IMP_1004K:
c->cputype = CPU_1004K;
c->writecombine = _CACHE_UNCACHED;
__cpu_name[cpu] = "MIPS 1004Kc";
cpu_set_mt_per_tc_perf(c);
break;
case PRID_IMP_1074K:
c->cputype = CPU_1074K;
c->writecombine = _CACHE_UNCACHED;
__cpu_name[cpu] = "MIPS 1074Kc";
break;
case PRID_IMP_INTERAPTIV_UP:
c->cputype = CPU_INTERAPTIV;
__cpu_name[cpu] = "MIPS interAptiv";
cpu_set_mt_per_tc_perf(c);
break;
case PRID_IMP_INTERAPTIV_MP:
c->cputype = CPU_INTERAPTIV;
__cpu_name[cpu] = "MIPS interAptiv (multi)";
cpu_set_mt_per_tc_perf(c);
break;
case PRID_IMP_PROAPTIV_UP:
c->cputype = CPU_PROAPTIV;
__cpu_name[cpu] = "MIPS proAptiv";
break;
case PRID_IMP_PROAPTIV_MP:
c->cputype = CPU_PROAPTIV;
__cpu_name[cpu] = "MIPS proAptiv (multi)";
break;
case PRID_IMP_P5600:
c->cputype = CPU_P5600;
__cpu_name[cpu] = "MIPS P5600";
break;
case PRID_IMP_P6600:
c->cputype = CPU_P6600;
__cpu_name[cpu] = "MIPS P6600";
break;
case PRID_IMP_I6400:
c->cputype = CPU_I6400;
__cpu_name[cpu] = "MIPS I6400";
break;
case PRID_IMP_I6500:
c->cputype = CPU_I6500;
__cpu_name[cpu] = "MIPS I6500";
break;
case PRID_IMP_M5150:
c->cputype = CPU_M5150;
__cpu_name[cpu] = "MIPS M5150";
break;
case PRID_IMP_M6250:
c->cputype = CPU_M6250;
__cpu_name[cpu] = "MIPS M6250";
break;
}
decode_configs(c);
spram_config();
mm_config(c);
switch (__get_cpu_type(c->cputype)) {
case CPU_M5150:
case CPU_P5600:
set_isa(c, MIPS_CPU_ISA_M32R5);
break;
case CPU_I6500:
c->options |= MIPS_CPU_SHARED_FTLB_ENTRIES;
fallthrough;
case CPU_I6400:
c->options |= MIPS_CPU_SHARED_FTLB_RAM;
fallthrough;
default:
break;
}
/* Recent MIPS cores use the implementation-dependent ExcCode 16 for
* cache/FTLB parity exceptions.
*/
switch (__get_cpu_type(c->cputype)) {
case CPU_PROAPTIV:
case CPU_P5600:
case CPU_P6600:
case CPU_I6400:
case CPU_I6500:
c->options |= MIPS_CPU_FTLBPAREX;
break;
}
}
static inline void cpu_probe_alchemy(struct cpuinfo_mips *c, unsigned int cpu)
{
decode_configs(c);
switch (c->processor_id & PRID_IMP_MASK) {
case PRID_IMP_AU1_REV1:
case PRID_IMP_AU1_REV2:
c->cputype = CPU_ALCHEMY;
switch ((c->processor_id >> 24) & 0xff) {
case 0:
__cpu_name[cpu] = "Au1000";
break;
case 1:
__cpu_name[cpu] = "Au1500";
break;
case 2:
__cpu_name[cpu] = "Au1100";
break;
case 3:
__cpu_name[cpu] = "Au1550";
break;
case 4:
__cpu_name[cpu] = "Au1200";
if ((c->processor_id & PRID_REV_MASK) == 2)
__cpu_name[cpu] = "Au1250";
break;
case 5:
__cpu_name[cpu] = "Au1210";
break;
default:
__cpu_name[cpu] = "Au1xxx";
break;
}
break;
case PRID_IMP_NETLOGIC_AU13XX:
c->cputype = CPU_ALCHEMY;
__cpu_name[cpu] = "Au1300";
break;
}
}
static inline void cpu_probe_sibyte(struct cpuinfo_mips *c, unsigned int cpu)
{
decode_configs(c);
c->writecombine = _CACHE_UNCACHED_ACCELERATED;
switch (c->processor_id & PRID_IMP_MASK) {
case PRID_IMP_SB1:
c->cputype = CPU_SB1;
__cpu_name[cpu] = "SiByte SB1";
/* FPU in pass1 is known to have issues. */
if ((c->processor_id & PRID_REV_MASK) < 0x02)
c->options &= ~(MIPS_CPU_FPU | MIPS_CPU_32FPR);
break;
case PRID_IMP_SB1A:
c->cputype = CPU_SB1A;
__cpu_name[cpu] = "SiByte SB1A";
break;
}
}
static inline void cpu_probe_sandcraft(struct cpuinfo_mips *c, unsigned int cpu)
{
decode_configs(c);
switch (c->processor_id & PRID_IMP_MASK) {
case PRID_IMP_SR71000:
c->cputype = CPU_SR71000;
__cpu_name[cpu] = "Sandcraft SR71000";
c->scache.ways = 8;
c->tlbsize = 64;
break;
}
}
static inline void cpu_probe_nxp(struct cpuinfo_mips *c, unsigned int cpu)
{
decode_configs(c);
switch (c->processor_id & PRID_IMP_MASK) {
case PRID_IMP_PR4450:
c->cputype = CPU_PR4450;
__cpu_name[cpu] = "Philips PR4450";
set_isa(c, MIPS_CPU_ISA_M32R1);
break;
}
}
static inline void cpu_probe_broadcom(struct cpuinfo_mips *c, unsigned int cpu)
{
decode_configs(c);
switch (c->processor_id & PRID_IMP_MASK) {
case PRID_IMP_BMIPS32_REV4:
case PRID_IMP_BMIPS32_REV8:
c->cputype = CPU_BMIPS32;
__cpu_name[cpu] = "Broadcom BMIPS32";
set_elf_platform(cpu, "bmips32");
break;
case PRID_IMP_BMIPS3300:
case PRID_IMP_BMIPS3300_ALT:
case PRID_IMP_BMIPS3300_BUG:
c->cputype = CPU_BMIPS3300;
__cpu_name[cpu] = "Broadcom BMIPS3300";
set_elf_platform(cpu, "bmips3300");
reserve_exception_space(0x400, VECTORSPACING * 64);
break;
case PRID_IMP_BMIPS43XX: {
int rev = c->processor_id & PRID_REV_MASK;
if (rev >= PRID_REV_BMIPS4380_LO &&
rev <= PRID_REV_BMIPS4380_HI) {
c->cputype = CPU_BMIPS4380;
__cpu_name[cpu] = "Broadcom BMIPS4380";
set_elf_platform(cpu, "bmips4380");
c->options |= MIPS_CPU_RIXI;
reserve_exception_space(0x400, VECTORSPACING * 64);
} else {
c->cputype = CPU_BMIPS4350;
__cpu_name[cpu] = "Broadcom BMIPS4350";
set_elf_platform(cpu, "bmips4350");
}
break;
}
case PRID_IMP_BMIPS5000:
case PRID_IMP_BMIPS5200:
c->cputype = CPU_BMIPS5000;
if ((c->processor_id & PRID_IMP_MASK) == PRID_IMP_BMIPS5200)
__cpu_name[cpu] = "Broadcom BMIPS5200";
else
__cpu_name[cpu] = "Broadcom BMIPS5000";
set_elf_platform(cpu, "bmips5000");
c->options |= MIPS_CPU_ULRI | MIPS_CPU_RIXI;
reserve_exception_space(0x1000, VECTORSPACING * 64);
break;
}
}
static inline void cpu_probe_cavium(struct cpuinfo_mips *c, unsigned int cpu)
{
decode_configs(c);
/* Octeon has different cache interface */
c->options &= ~MIPS_CPU_4K_CACHE;
switch (c->processor_id & PRID_IMP_MASK) {
case PRID_IMP_CAVIUM_CN38XX:
case PRID_IMP_CAVIUM_CN31XX:
case PRID_IMP_CAVIUM_CN30XX:
c->cputype = CPU_CAVIUM_OCTEON;
__cpu_name[cpu] = "Cavium Octeon";
goto platform;
case PRID_IMP_CAVIUM_CN58XX:
case PRID_IMP_CAVIUM_CN56XX:
case PRID_IMP_CAVIUM_CN50XX:
case PRID_IMP_CAVIUM_CN52XX:
c->cputype = CPU_CAVIUM_OCTEON_PLUS;
__cpu_name[cpu] = "Cavium Octeon+";
platform:
set_elf_platform(cpu, "octeon");
break;
case PRID_IMP_CAVIUM_CN61XX:
case PRID_IMP_CAVIUM_CN63XX:
case PRID_IMP_CAVIUM_CN66XX:
case PRID_IMP_CAVIUM_CN68XX:
case PRID_IMP_CAVIUM_CNF71XX:
c->cputype = CPU_CAVIUM_OCTEON2;
__cpu_name[cpu] = "Cavium Octeon II";
set_elf_platform(cpu, "octeon2");
break;
case PRID_IMP_CAVIUM_CN70XX:
case PRID_IMP_CAVIUM_CN73XX:
case PRID_IMP_CAVIUM_CNF75XX:
case PRID_IMP_CAVIUM_CN78XX:
c->cputype = CPU_CAVIUM_OCTEON3;
__cpu_name[cpu] = "Cavium Octeon III";
set_elf_platform(cpu, "octeon3");
break;
default:
printk(KERN_INFO "Unknown Octeon chip!\n");
c->cputype = CPU_UNKNOWN;
break;
}
}
#ifdef CONFIG_CPU_LOONGSON64
#include <loongson_regs.h>
static inline void decode_cpucfg(struct cpuinfo_mips *c)
{
u32 cfg1 = read_cpucfg(LOONGSON_CFG1);
u32 cfg2 = read_cpucfg(LOONGSON_CFG2);
u32 cfg3 = read_cpucfg(LOONGSON_CFG3);
if (cfg1 & LOONGSON_CFG1_MMI)
c->ases |= MIPS_ASE_LOONGSON_MMI;
if (cfg2 & LOONGSON_CFG2_LEXT1)
c->ases |= MIPS_ASE_LOONGSON_EXT;
if (cfg2 & LOONGSON_CFG2_LEXT2)
c->ases |= MIPS_ASE_LOONGSON_EXT2;
if (cfg2 & LOONGSON_CFG2_LSPW) {
c->options |= MIPS_CPU_LDPTE;
c->guest.options |= MIPS_CPU_LDPTE;
}
if (cfg3 & LOONGSON_CFG3_LCAMP)
c->ases |= MIPS_ASE_LOONGSON_CAM;
}
static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
{
c->cputype = CPU_LOONGSON64;
/* All Loongson processors covered here define ExcCode 16 as GSExc. */
decode_configs(c);
c->options |= MIPS_CPU_GSEXCEX;
switch (c->processor_id & PRID_IMP_MASK) {
case PRID_IMP_LOONGSON_64R: /* Loongson-64 Reduced */
switch (c->processor_id & PRID_REV_MASK) {
case PRID_REV_LOONGSON2K_R1_0:
case PRID_REV_LOONGSON2K_R1_1:
case PRID_REV_LOONGSON2K_R1_2:
case PRID_REV_LOONGSON2K_R1_3:
__cpu_name[cpu] = "Loongson-2K";
set_elf_platform(cpu, "gs264e");
set_isa(c, MIPS_CPU_ISA_M64R2);
break;
}
c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_EXT |
MIPS_ASE_LOONGSON_EXT2);
break;
case PRID_IMP_LOONGSON_64C: /* Loongson-3 Classic */
switch (c->processor_id & PRID_REV_MASK) {
case PRID_REV_LOONGSON3A_R2_0:
case PRID_REV_LOONGSON3A_R2_1:
__cpu_name[cpu] = "ICT Loongson-3";
set_elf_platform(cpu, "loongson3a");
set_isa(c, MIPS_CPU_ISA_M64R2);
break;
case PRID_REV_LOONGSON3A_R3_0:
case PRID_REV_LOONGSON3A_R3_1:
__cpu_name[cpu] = "ICT Loongson-3";
set_elf_platform(cpu, "loongson3a");
set_isa(c, MIPS_CPU_ISA_M64R2);
break;
}
/*
* Loongson-3 Classic did not implement MIPS standard TLBINV
* but implemented TLBINVF and EHINV. As currently we're only
* using these two features, enable MIPS_CPU_TLBINV as well.
*
* Also some early Loongson-3A2000 had wrong TLB type in Config
* register, we correct it here.
*/
c->options |= MIPS_CPU_FTLB | MIPS_CPU_TLBINV | MIPS_CPU_LDPTE;
c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_CAM |
MIPS_ASE_LOONGSON_EXT | MIPS_ASE_LOONGSON_EXT2);
c->ases &= ~MIPS_ASE_VZ; /* VZ of Loongson-3A2000/3000 is incomplete */
break;
case PRID_IMP_LOONGSON_64G:
__cpu_name[cpu] = "ICT Loongson-3";
set_elf_platform(cpu, "loongson3a");
set_isa(c, MIPS_CPU_ISA_M64R2);
decode_cpucfg(c);
break;
default:
panic("Unknown Loongson Processor ID!");
break;
}
}
#else
static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu) { }
#endif
static inline void cpu_probe_ingenic(struct cpuinfo_mips *c, unsigned int cpu)
{
decode_configs(c);
/*
* XBurst misses a config2 register, so config3 decode was skipped in
* decode_configs().
*/
decode_config3(c);
/* XBurst does not implement the CP0 counter. */
c->options &= ~MIPS_CPU_COUNTER;
BUG_ON(__builtin_constant_p(cpu_has_counter) && cpu_has_counter);
/* XBurst has virtually tagged icache */
c->icache.flags |= MIPS_CACHE_VTAG;
switch (c->processor_id & PRID_IMP_MASK) {
/* XBurst®1 with MXU1.0/MXU1.1 SIMD ISA */
case PRID_IMP_XBURST_REV1:
/*
* The XBurst core by default attempts to avoid branch target
* buffer lookups by detecting & special casing loops. This
* feature will cause BogoMIPS and lpj calculate in error.
* Set cp0 config7 bit 4 to disable this feature.
*/
set_c0_config7(MIPS_CONF7_BTB_LOOP_EN);
switch (c->processor_id & PRID_COMP_MASK) {
/*
* The config0 register in the XBurst CPUs with a processor ID of
* PRID_COMP_INGENIC_D0 report themselves as MIPS32r2 compatible,
* but they don't actually support this ISA.
*/
case PRID_COMP_INGENIC_D0:
c->isa_level &= ~MIPS_CPU_ISA_M32R2;
/* FPU is not properly detected on JZ4760(B). */
if (c->processor_id == 0x2ed0024f)
c->options |= MIPS_CPU_FPU;
fallthrough;
/*
* The config0 register in the XBurst CPUs with a processor ID of
* PRID_COMP_INGENIC_D0 or PRID_COMP_INGENIC_D1 has an abandoned
* huge page tlb mode, this mode is not compatible with the MIPS
* standard, it will cause tlbmiss and into an infinite loop
* (line 21 in the tlb-funcs.S) when starting the init process.
* After chip reset, the default is HPTLB mode, Write 0xa9000000
* to cp0 register 5 sel 4 to switch back to VTLB mode to prevent
* getting stuck.
*/
case PRID_COMP_INGENIC_D1:
write_c0_page_ctrl(XBURST_PAGECTRL_HPTLB_DIS);
break;
default:
break;
}
fallthrough;
/* XBurst®1 with MXU2.0 SIMD ISA */
case PRID_IMP_XBURST_REV2:
/* Ingenic uses the WA bit to achieve write-combine memory writes */
c->writecombine = _CACHE_CACHABLE_WA;
c->cputype = CPU_XBURST;
__cpu_name[cpu] = "Ingenic XBurst";
break;
/* XBurst®2 with MXU2.1 SIMD ISA */
case PRID_IMP_XBURST2:
c->cputype = CPU_XBURST;
__cpu_name[cpu] = "Ingenic XBurst II";
break;
default:
panic("Unknown Ingenic Processor ID!");
break;
}
}
#ifdef CONFIG_64BIT
/* For use by uaccess.h */
u64 __ua_limit;
EXPORT_SYMBOL(__ua_limit);
#endif
const char *__cpu_name[NR_CPUS];
const char *__elf_platform;
const char *__elf_base_platform;
void cpu_probe(void)
{
struct cpuinfo_mips *c = ¤t_cpu_data;
unsigned int cpu = smp_processor_id();
/*
* Set a default elf platform, cpu probe may later
* overwrite it with a more precise value
*/
set_elf_platform(cpu, "mips");
c->processor_id = PRID_IMP_UNKNOWN;
c->fpu_id = FPIR_IMP_NONE;
c->cputype = CPU_UNKNOWN;
c->writecombine = _CACHE_UNCACHED;
c->fpu_csr31 = FPU_CSR_RN;
c->fpu_msk31 = FPU_CSR_RSVD | FPU_CSR_ABS2008 | FPU_CSR_NAN2008;
c->processor_id = read_c0_prid();
switch (c->processor_id & PRID_COMP_MASK) {
case PRID_COMP_LEGACY:
cpu_probe_legacy(c, cpu);
break;
case PRID_COMP_MIPS:
cpu_probe_mips(c, cpu);
break;
case PRID_COMP_ALCHEMY:
case PRID_COMP_NETLOGIC:
cpu_probe_alchemy(c, cpu);
break;
case PRID_COMP_SIBYTE:
cpu_probe_sibyte(c, cpu);
break;
case PRID_COMP_BROADCOM:
cpu_probe_broadcom(c, cpu);
break;
case PRID_COMP_SANDCRAFT:
cpu_probe_sandcraft(c, cpu);
break;
case PRID_COMP_NXP:
cpu_probe_nxp(c, cpu);
break;
case PRID_COMP_CAVIUM:
cpu_probe_cavium(c, cpu);
break;
case PRID_COMP_LOONGSON:
cpu_probe_loongson(c, cpu);
break;
case PRID_COMP_INGENIC_13:
case PRID_COMP_INGENIC_D0:
case PRID_COMP_INGENIC_D1:
case PRID_COMP_INGENIC_E1:
cpu_probe_ingenic(c, cpu);
break;
}
BUG_ON(!__cpu_name[cpu]);
BUG_ON(c->cputype == CPU_UNKNOWN);
/*
* Platform code can force the cpu type to optimize code
* generation. In that case be sure the cpu type is correctly
* manually setup otherwise it could trigger some nasty bugs.
*/
BUG_ON(current_cpu_type() != c->cputype);
if (cpu_has_rixi) {
/* Enable the RIXI exceptions */
set_c0_pagegrain(PG_IEC);
back_to_back_c0_hazard();
/* Verify the IEC bit is set */
if (read_c0_pagegrain() & PG_IEC)
c->options |= MIPS_CPU_RIXIEX;
}
if (mips_fpu_disabled)
c->options &= ~MIPS_CPU_FPU;
if (mips_dsp_disabled)
c->ases &= ~(MIPS_ASE_DSP | MIPS_ASE_DSP2P);
if (mips_htw_disabled) {
c->options &= ~MIPS_CPU_HTW;
write_c0_pwctl(read_c0_pwctl() &
~(1 << MIPS_PWCTL_PWEN_SHIFT));
}
if (c->options & MIPS_CPU_FPU)
cpu_set_fpu_opts(c);
else
cpu_set_nofpu_opts(c);
if (cpu_has_mips_r2_r6) {
c->srsets = ((read_c0_srsctl() >> 26) & 0x0f) + 1;
/* R2 has Performance Counter Interrupt indicator */
c->options |= MIPS_CPU_PCI;
}
else
c->srsets = 1;
if (cpu_has_mips_r6)
elf_hwcap |= HWCAP_MIPS_R6;
if (cpu_has_msa) {
c->msa_id = cpu_get_msa_id();
WARN(c->msa_id & MSA_IR_WRPF,
"Vector register partitioning unimplemented!");
elf_hwcap |= HWCAP_MIPS_MSA;
}
if (cpu_has_mips16)
elf_hwcap |= HWCAP_MIPS_MIPS16;
if (cpu_has_mdmx)
elf_hwcap |= HWCAP_MIPS_MDMX;
if (cpu_has_mips3d)
elf_hwcap |= HWCAP_MIPS_MIPS3D;
if (cpu_has_smartmips)
elf_hwcap |= HWCAP_MIPS_SMARTMIPS;
if (cpu_has_dsp)
elf_hwcap |= HWCAP_MIPS_DSP;
if (cpu_has_dsp2)
elf_hwcap |= HWCAP_MIPS_DSP2;
if (cpu_has_dsp3)
elf_hwcap |= HWCAP_MIPS_DSP3;
if (cpu_has_mips16e2)
elf_hwcap |= HWCAP_MIPS_MIPS16E2;
if (cpu_has_loongson_mmi)
elf_hwcap |= HWCAP_LOONGSON_MMI;
if (cpu_has_loongson_ext)
elf_hwcap |= HWCAP_LOONGSON_EXT;
if (cpu_has_loongson_ext2)
elf_hwcap |= HWCAP_LOONGSON_EXT2;
if (cpu_has_vz)
cpu_probe_vz(c);
cpu_probe_vmbits(c);
/* Synthesize CPUCFG data if running on Loongson processors;
* no-op otherwise.
*
* This looks at previously probed features, so keep this at bottom.
*/
loongson3_cpucfg_synthesize_data(c);
#ifdef CONFIG_64BIT
if (cpu == 0)
__ua_limit = ~((1ull << cpu_vmbits) - 1);
#endif
reserve_exception_space(0, 0x1000);
}
void cpu_report(void)
{
struct cpuinfo_mips *c = ¤t_cpu_data;
pr_info("CPU%d revision is: %08x (%s)\n",
smp_processor_id(), c->processor_id, cpu_name_string());
if (c->options & MIPS_CPU_FPU)
printk(KERN_INFO "FPU revision is: %08x\n", c->fpu_id);
if (cpu_has_msa)
pr_info("MSA revision is: %08x\n", c->msa_id);
}
void cpu_set_cluster(struct cpuinfo_mips *cpuinfo, unsigned int cluster)
{
/* Ensure the core number fits in the field */
WARN_ON(cluster > (MIPS_GLOBALNUMBER_CLUSTER >>
MIPS_GLOBALNUMBER_CLUSTER_SHF));
cpuinfo->globalnumber &= ~MIPS_GLOBALNUMBER_CLUSTER;
cpuinfo->globalnumber |= cluster << MIPS_GLOBALNUMBER_CLUSTER_SHF;
}
void cpu_set_core(struct cpuinfo_mips *cpuinfo, unsigned int core)
{
/* Ensure the core number fits in the field */
WARN_ON(core > (MIPS_GLOBALNUMBER_CORE >> MIPS_GLOBALNUMBER_CORE_SHF));
cpuinfo->globalnumber &= ~MIPS_GLOBALNUMBER_CORE;
cpuinfo->globalnumber |= core << MIPS_GLOBALNUMBER_CORE_SHF;
}
void cpu_set_vpe_id(struct cpuinfo_mips *cpuinfo, unsigned int vpe)
{
/* Ensure the VP(E) ID fits in the field */
WARN_ON(vpe > (MIPS_GLOBALNUMBER_VP >> MIPS_GLOBALNUMBER_VP_SHF));
/* Ensure we're not using VP(E)s without support */
WARN_ON(vpe && !IS_ENABLED(CONFIG_MIPS_MT_SMP) &&
!IS_ENABLED(CONFIG_CPU_MIPSR6));
cpuinfo->globalnumber &= ~MIPS_GLOBALNUMBER_VP;
cpuinfo->globalnumber |= vpe << MIPS_GLOBALNUMBER_VP_SHF;
}
| linux-master | arch/mips/kernel/cpu-probe.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.