python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Support code for the SCOOP interface found on various Sharp PDAs
*
* Copyright (c) 2004 Richard Purdie
*
* Based on code written by Sharp/Lineo for 2.4 kernels
*/
#include <linux/device.h>
#include <linux/gpio/driver.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/export.h>
#include <linux/io.h>
#include <asm/hardware/scoop.h>
/* PCMCIA to Scoop linkage
There is no easy way to link multiple scoop devices into one
single entity for the pxa2xx_pcmcia device so this structure
is used which is setup by the platform code.
This file is never modular so this symbol is always
accessile to the board support files.
*/
struct scoop_pcmcia_config *platform_scoop_config;
EXPORT_SYMBOL(platform_scoop_config);
struct scoop_dev {
void __iomem *base;
struct gpio_chip gpio;
spinlock_t scoop_lock;
unsigned short suspend_clr;
unsigned short suspend_set;
u32 scoop_gpwr;
};
void reset_scoop(struct device *dev)
{
struct scoop_dev *sdev = dev_get_drvdata(dev);
iowrite16(0x0100, sdev->base + SCOOP_MCR); /* 00 */
iowrite16(0x0000, sdev->base + SCOOP_CDR); /* 04 */
iowrite16(0x0000, sdev->base + SCOOP_CCR); /* 10 */
iowrite16(0x0000, sdev->base + SCOOP_IMR); /* 18 */
iowrite16(0x00FF, sdev->base + SCOOP_IRM); /* 14 */
iowrite16(0x0000, sdev->base + SCOOP_ISR); /* 1C */
iowrite16(0x0000, sdev->base + SCOOP_IRM);
}
static void __scoop_gpio_set(struct scoop_dev *sdev,
unsigned offset, int value)
{
unsigned short gpwr;
gpwr = ioread16(sdev->base + SCOOP_GPWR);
if (value)
gpwr |= 1 << (offset + 1);
else
gpwr &= ~(1 << (offset + 1));
iowrite16(gpwr, sdev->base + SCOOP_GPWR);
}
static void scoop_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
{
struct scoop_dev *sdev = gpiochip_get_data(chip);
unsigned long flags;
spin_lock_irqsave(&sdev->scoop_lock, flags);
__scoop_gpio_set(sdev, offset, value);
spin_unlock_irqrestore(&sdev->scoop_lock, flags);
}
static int scoop_gpio_get(struct gpio_chip *chip, unsigned offset)
{
struct scoop_dev *sdev = gpiochip_get_data(chip);
/* XXX: I'm unsure, but it seems so */
return !!(ioread16(sdev->base + SCOOP_GPRR) & (1 << (offset + 1)));
}
static int scoop_gpio_direction_input(struct gpio_chip *chip,
unsigned offset)
{
struct scoop_dev *sdev = gpiochip_get_data(chip);
unsigned long flags;
unsigned short gpcr;
spin_lock_irqsave(&sdev->scoop_lock, flags);
gpcr = ioread16(sdev->base + SCOOP_GPCR);
gpcr &= ~(1 << (offset + 1));
iowrite16(gpcr, sdev->base + SCOOP_GPCR);
spin_unlock_irqrestore(&sdev->scoop_lock, flags);
return 0;
}
static int scoop_gpio_direction_output(struct gpio_chip *chip,
unsigned offset, int value)
{
struct scoop_dev *sdev = gpiochip_get_data(chip);
unsigned long flags;
unsigned short gpcr;
spin_lock_irqsave(&sdev->scoop_lock, flags);
__scoop_gpio_set(sdev, offset, value);
gpcr = ioread16(sdev->base + SCOOP_GPCR);
gpcr |= 1 << (offset + 1);
iowrite16(gpcr, sdev->base + SCOOP_GPCR);
spin_unlock_irqrestore(&sdev->scoop_lock, flags);
return 0;
}
unsigned short read_scoop_reg(struct device *dev, unsigned short reg)
{
struct scoop_dev *sdev = dev_get_drvdata(dev);
return ioread16(sdev->base + reg);
}
void write_scoop_reg(struct device *dev, unsigned short reg, unsigned short data)
{
struct scoop_dev *sdev = dev_get_drvdata(dev);
iowrite16(data, sdev->base + reg);
}
EXPORT_SYMBOL(reset_scoop);
EXPORT_SYMBOL(read_scoop_reg);
EXPORT_SYMBOL(write_scoop_reg);
#ifdef CONFIG_PM
static void check_scoop_reg(struct scoop_dev *sdev)
{
unsigned short mcr;
mcr = ioread16(sdev->base + SCOOP_MCR);
if ((mcr & 0x100) == 0)
iowrite16(0x0101, sdev->base + SCOOP_MCR);
}
static int scoop_suspend(struct platform_device *dev, pm_message_t state)
{
struct scoop_dev *sdev = platform_get_drvdata(dev);
check_scoop_reg(sdev);
sdev->scoop_gpwr = ioread16(sdev->base + SCOOP_GPWR);
iowrite16((sdev->scoop_gpwr & ~sdev->suspend_clr) | sdev->suspend_set, sdev->base + SCOOP_GPWR);
return 0;
}
static int scoop_resume(struct platform_device *dev)
{
struct scoop_dev *sdev = platform_get_drvdata(dev);
check_scoop_reg(sdev);
iowrite16(sdev->scoop_gpwr, sdev->base + SCOOP_GPWR);
return 0;
}
#else
#define scoop_suspend NULL
#define scoop_resume NULL
#endif
static int scoop_probe(struct platform_device *pdev)
{
struct scoop_dev *devptr;
struct scoop_config *inf;
struct resource *mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
int ret;
if (!mem)
return -EINVAL;
devptr = kzalloc(sizeof(struct scoop_dev), GFP_KERNEL);
if (!devptr)
return -ENOMEM;
spin_lock_init(&devptr->scoop_lock);
inf = pdev->dev.platform_data;
devptr->base = ioremap(mem->start, resource_size(mem));
if (!devptr->base) {
ret = -ENOMEM;
goto err_ioremap;
}
platform_set_drvdata(pdev, devptr);
printk("Sharp Scoop Device found at 0x%08x -> 0x%8p\n",(unsigned int)mem->start, devptr->base);
iowrite16(0x0140, devptr->base + SCOOP_MCR);
reset_scoop(&pdev->dev);
iowrite16(0x0000, devptr->base + SCOOP_CPR);
iowrite16(inf->io_dir & 0xffff, devptr->base + SCOOP_GPCR);
iowrite16(inf->io_out & 0xffff, devptr->base + SCOOP_GPWR);
devptr->suspend_clr = inf->suspend_clr;
devptr->suspend_set = inf->suspend_set;
devptr->gpio.base = -1;
if (inf->gpio_base != 0) {
devptr->gpio.label = dev_name(&pdev->dev);
devptr->gpio.base = inf->gpio_base;
devptr->gpio.ngpio = 12; /* PA11 = 0, PA12 = 1, etc. up to PA22 = 11 */
devptr->gpio.set = scoop_gpio_set;
devptr->gpio.get = scoop_gpio_get;
devptr->gpio.direction_input = scoop_gpio_direction_input;
devptr->gpio.direction_output = scoop_gpio_direction_output;
ret = gpiochip_add_data(&devptr->gpio, devptr);
if (ret)
goto err_gpio;
}
return 0;
err_gpio:
platform_set_drvdata(pdev, NULL);
err_ioremap:
iounmap(devptr->base);
kfree(devptr);
return ret;
}
static void scoop_remove(struct platform_device *pdev)
{
struct scoop_dev *sdev = platform_get_drvdata(pdev);
if (sdev->gpio.base != -1)
gpiochip_remove(&sdev->gpio);
platform_set_drvdata(pdev, NULL);
iounmap(sdev->base);
kfree(sdev);
}
static struct platform_driver scoop_driver = {
.probe = scoop_probe,
.remove_new = scoop_remove,
.suspend = scoop_suspend,
.resume = scoop_resume,
.driver = {
.name = "sharp-scoop",
},
};
static int __init scoop_init(void)
{
return platform_driver_register(&scoop_driver);
}
subsys_initcall(scoop_init);
| linux-master | arch/arm/common/scoop.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* File: arch/arm/plat-omap/fb.c
*
* Framebuffer device registration for TI OMAP platforms
*
* Copyright (C) 2006 Nokia Corporation
* Author: Imre Deak <[email protected]>
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/memblock.h>
#include <linux/io.h>
#include <linux/omapfb.h>
#include <linux/dma-mapping.h>
#include <linux/irq.h>
#include <asm/mach/map.h>
#include "irqs.h"
#if IS_ENABLED(CONFIG_FB_OMAP)
static bool omapfb_lcd_configured;
static struct omapfb_platform_data omapfb_config;
static u64 omap_fb_dma_mask = ~(u32)0;
static struct resource omap_fb_resources[] = {
{
.name = "irq",
.start = INT_LCD_CTRL,
.flags = IORESOURCE_IRQ,
},
{
.name = "irq",
.start = INT_SOSSI_MATCH,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device omap_fb_device = {
.name = "omapfb",
.id = -1,
.dev = {
.dma_mask = &omap_fb_dma_mask,
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &omapfb_config,
},
.num_resources = ARRAY_SIZE(omap_fb_resources),
.resource = omap_fb_resources,
};
void __init omapfb_set_lcd_config(const struct omap_lcd_config *config)
{
omapfb_config.lcd = *config;
omapfb_lcd_configured = true;
}
static int __init omap_init_fb(void)
{
/*
* If the board file has not set the lcd config with
* omapfb_set_lcd_config(), don't bother registering the omapfb device
*/
if (!omapfb_lcd_configured)
return 0;
return platform_device_register(&omap_fb_device);
}
arch_initcall(omap_init_fb);
#else
void __init omapfb_set_lcd_config(const struct omap_lcd_config *config)
{
}
#endif
| linux-master | arch/arm/mach-omap1/fb.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/arch/arm/mach-omap1/serial.c
*
* OMAP1 serial support.
*/
#include <linux/gpio/machine.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/delay.h>
#include <linux/serial.h>
#include <linux/tty.h>
#include <linux/serial_8250.h>
#include <linux/serial_reg.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <asm/mach-types.h>
#include "common.h"
#include "serial.h"
#include "mux.h"
#include "pm.h"
#include "soc.h"
static struct clk * uart1_ck;
static struct clk * uart2_ck;
static struct clk * uart3_ck;
static inline unsigned int omap_serial_in(struct plat_serial8250_port *up,
int offset)
{
offset <<= up->regshift;
return (unsigned int)__raw_readb(up->membase + offset);
}
static inline void omap_serial_outp(struct plat_serial8250_port *p, int offset,
int value)
{
offset <<= p->regshift;
__raw_writeb(value, p->membase + offset);
}
/*
* Internal UARTs need to be initialized for the 8250 autoconfig to work
* properly. Note that the TX watermark initialization may not be needed
* once the 8250.c watermark handling code is merged.
*/
static void __init omap_serial_reset(struct plat_serial8250_port *p)
{
omap_serial_outp(p, UART_OMAP_MDR1,
UART_OMAP_MDR1_DISABLE); /* disable UART */
omap_serial_outp(p, UART_OMAP_SCR, 0x08); /* TX watermark */
omap_serial_outp(p, UART_OMAP_MDR1,
UART_OMAP_MDR1_16X_MODE); /* enable UART */
if (!cpu_is_omap15xx()) {
omap_serial_outp(p, UART_OMAP_SYSC, 0x01);
while (!(omap_serial_in(p, UART_OMAP_SYSC) & 0x01));
}
}
static struct plat_serial8250_port serial_platform_data[] = {
{
.mapbase = OMAP1_UART1_BASE,
.irq = INT_UART1,
.flags = UPF_BOOT_AUTOCONF,
.iotype = UPIO_MEM,
.regshift = 2,
.uartclk = OMAP16XX_BASE_BAUD * 16,
},
{
.mapbase = OMAP1_UART2_BASE,
.irq = INT_UART2,
.flags = UPF_BOOT_AUTOCONF,
.iotype = UPIO_MEM,
.regshift = 2,
.uartclk = OMAP16XX_BASE_BAUD * 16,
},
{
.mapbase = OMAP1_UART3_BASE,
.irq = INT_UART3,
.flags = UPF_BOOT_AUTOCONF,
.iotype = UPIO_MEM,
.regshift = 2,
.uartclk = OMAP16XX_BASE_BAUD * 16,
},
{ },
};
static struct platform_device serial_device = {
.name = "serial8250",
.id = PLAT8250_DEV_PLATFORM,
.dev = {
.platform_data = serial_platform_data,
},
};
/*
* Note that on Innovator-1510 UART2 pins conflict with USB2.
* By default UART2 does not work on Innovator-1510 if you have
* USB OHCI enabled. To use UART2, you must disable USB2 first.
*/
void __init omap_serial_init(void)
{
int i;
if (cpu_is_omap15xx()) {
serial_platform_data[0].uartclk = OMAP1510_BASE_BAUD * 16;
serial_platform_data[1].uartclk = OMAP1510_BASE_BAUD * 16;
serial_platform_data[2].uartclk = OMAP1510_BASE_BAUD * 16;
}
for (i = 0; i < ARRAY_SIZE(serial_platform_data) - 1; i++) {
/* Static mapping, never released */
serial_platform_data[i].membase =
ioremap(serial_platform_data[i].mapbase, SZ_2K);
if (!serial_platform_data[i].membase) {
printk(KERN_ERR "Could not ioremap uart%i\n", i);
continue;
}
switch (i) {
case 0:
uart1_ck = clk_get(NULL, "uart1_ck");
if (IS_ERR(uart1_ck))
printk("Could not get uart1_ck\n");
else {
clk_prepare_enable(uart1_ck);
if (cpu_is_omap15xx())
clk_set_rate(uart1_ck, 12000000);
}
break;
case 1:
uart2_ck = clk_get(NULL, "uart2_ck");
if (IS_ERR(uart2_ck))
printk("Could not get uart2_ck\n");
else {
clk_prepare_enable(uart2_ck);
if (cpu_is_omap15xx())
clk_set_rate(uart2_ck, 12000000);
else
clk_set_rate(uart2_ck, 48000000);
}
break;
case 2:
uart3_ck = clk_get(NULL, "uart3_ck");
if (IS_ERR(uart3_ck))
printk("Could not get uart3_ck\n");
else {
clk_prepare_enable(uart3_ck);
if (cpu_is_omap15xx())
clk_set_rate(uart3_ck, 12000000);
}
break;
}
omap_serial_reset(&serial_platform_data[i]);
}
}
#ifdef CONFIG_OMAP_SERIAL_WAKE
static irqreturn_t omap_serial_wake_interrupt(int irq, void *dev_id)
{
/* Need to do something with serial port right after wake-up? */
return IRQ_HANDLED;
}
/*
* Reroutes serial RX lines to GPIO lines for the duration of
* sleep to allow waking up the device from serial port even
* in deep sleep.
*/
void omap_serial_wake_trigger(int enable)
{
if (!cpu_is_omap16xx())
return;
if (uart1_ck != NULL) {
if (enable)
omap_cfg_reg(V14_16XX_GPIO37);
else
omap_cfg_reg(V14_16XX_UART1_RX);
}
if (uart2_ck != NULL) {
if (enable)
omap_cfg_reg(R9_16XX_GPIO18);
else
omap_cfg_reg(R9_16XX_UART2_RX);
}
if (uart3_ck != NULL) {
if (enable)
omap_cfg_reg(L14_16XX_GPIO49);
else
omap_cfg_reg(L14_16XX_UART3_RX);
}
}
static void __init omap_serial_set_port_wakeup(int idx)
{
struct gpio_desc *d;
int ret;
d = gpiod_get_index(NULL, "wakeup", idx, GPIOD_IN);
if (IS_ERR(d)) {
pr_err("Unable to get UART wakeup GPIO descriptor\n");
return;
}
ret = request_irq(gpiod_to_irq(d), &omap_serial_wake_interrupt,
IRQF_TRIGGER_RISING, "serial wakeup", NULL);
if (ret) {
gpiod_put(d);
pr_err("No interrupt for UART%d wake GPIO\n", idx + 1);
return;
}
enable_irq_wake(gpiod_to_irq(d));
}
int __init omap_serial_wakeup_init(void)
{
if (!cpu_is_omap16xx())
return 0;
if (uart1_ck != NULL)
omap_serial_set_port_wakeup(0);
if (uart2_ck != NULL)
omap_serial_set_port_wakeup(1);
if (uart3_ck != NULL)
omap_serial_set_port_wakeup(2);
return 0;
}
#endif /* CONFIG_OMAP_SERIAL_WAKE */
static int __init omap_init(void)
{
if (!cpu_class_is_omap1())
return -ENODEV;
return platform_device_register(&serial_device);
}
arch_initcall(omap_init);
| linux-master | arch/arm/mach-omap1/serial.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* OMAP1 Dual-Mode Timers - platform device registration
*
* Contains first level initialization routines which internally
* generates timer device information and registers with linux
* device model. It also has a low level function to change the timer
* input clock source.
*
* Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/
* Tarun Kanti DebBarma <[email protected]>
* Thara Gopinath <[email protected]>
*/
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/platform_data/dmtimer-omap.h>
#include <linux/soc/ti/omap1-io.h>
#include <clocksource/timer-ti-dm.h>
#include "soc.h"
#define OMAP1610_GPTIMER1_BASE 0xfffb1400
#define OMAP1610_GPTIMER2_BASE 0xfffb1c00
#define OMAP1610_GPTIMER3_BASE 0xfffb2400
#define OMAP1610_GPTIMER4_BASE 0xfffb2c00
#define OMAP1610_GPTIMER5_BASE 0xfffb3400
#define OMAP1610_GPTIMER6_BASE 0xfffb3c00
#define OMAP1610_GPTIMER7_BASE 0xfffb7400
#define OMAP1610_GPTIMER8_BASE 0xfffbd400
#define OMAP1_DM_TIMER_COUNT 8
static int omap1_dm_timer_set_src(struct platform_device *pdev,
int source)
{
int n = (pdev->id - 1) << 1;
u32 l;
l = omap_readl(MOD_CONF_CTRL_1) & ~(0x03 << n);
l |= source << n;
omap_writel(l, MOD_CONF_CTRL_1);
return 0;
}
static int __init omap1_dm_timer_init(void)
{
int i;
int ret;
struct dmtimer_platform_data *pdata;
struct platform_device *pdev;
if (!cpu_is_omap16xx())
return 0;
for (i = 1; i <= OMAP1_DM_TIMER_COUNT; i++) {
struct resource res[2];
u32 base, irq;
switch (i) {
case 1:
base = OMAP1610_GPTIMER1_BASE;
irq = INT_1610_GPTIMER1;
break;
case 2:
base = OMAP1610_GPTIMER2_BASE;
irq = INT_1610_GPTIMER2;
break;
case 3:
base = OMAP1610_GPTIMER3_BASE;
irq = INT_1610_GPTIMER3;
break;
case 4:
base = OMAP1610_GPTIMER4_BASE;
irq = INT_1610_GPTIMER4;
break;
case 5:
base = OMAP1610_GPTIMER5_BASE;
irq = INT_1610_GPTIMER5;
break;
case 6:
base = OMAP1610_GPTIMER6_BASE;
irq = INT_1610_GPTIMER6;
break;
case 7:
base = OMAP1610_GPTIMER7_BASE;
irq = INT_1610_GPTIMER7;
break;
case 8:
base = OMAP1610_GPTIMER8_BASE;
irq = INT_1610_GPTIMER8;
break;
default:
/*
* not supposed to reach here.
* this is to remove warning.
*/
return -EINVAL;
}
pdev = platform_device_alloc("omap_timer", i);
if (!pdev) {
pr_err("%s: Failed to device alloc for dmtimer%d\n",
__func__, i);
return -ENOMEM;
}
memset(res, 0, 2 * sizeof(struct resource));
res[0].start = base;
res[0].end = base + 0x46;
res[0].flags = IORESOURCE_MEM;
res[1].start = irq;
res[1].end = irq;
res[1].flags = IORESOURCE_IRQ;
ret = platform_device_add_resources(pdev, res,
ARRAY_SIZE(res));
if (ret) {
dev_err(&pdev->dev, "%s: Failed to add resources.\n",
__func__);
goto err_free_pdev;
}
pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
if (!pdata) {
ret = -ENOMEM;
goto err_free_pdata;
}
pdata->set_timer_src = omap1_dm_timer_set_src;
pdata->timer_capability = OMAP_TIMER_ALWON |
OMAP_TIMER_NEEDS_RESET | OMAP_TIMER_HAS_DSP_IRQ;
ret = platform_device_add_data(pdev, pdata, sizeof(*pdata));
if (ret) {
dev_err(&pdev->dev, "%s: Failed to add platform data.\n",
__func__);
goto err_free_pdata;
}
ret = platform_device_add(pdev);
if (ret) {
dev_err(&pdev->dev, "%s: Failed to add platform device.\n",
__func__);
goto err_free_pdata;
}
dev_dbg(&pdev->dev, " Registered.\n");
}
return 0;
err_free_pdata:
kfree(pdata);
err_free_pdev:
platform_device_put(pdev);
return ret;
}
arch_initcall(omap1_dm_timer_init);
| linux-master | arch/arm/mach-omap1/timer.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* linux/arch/arm/plat-omap/ocpi.c
*
* Minimal OCP bus support for omap16xx
*
* Copyright (C) 2003 - 2005 Nokia Corporation
* Copyright (C) 2012 Texas Instruments, Inc.
* Written by Tony Lindgren <[email protected]>
*
* Modified for clock framework by Paul Mundt <[email protected]>.
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/soc/ti/omap1-io.h>
#include "hardware.h"
#include "common.h"
#define OCPI_BASE 0xfffec320
#define OCPI_FAULT (OCPI_BASE + 0x00)
#define OCPI_CMD_FAULT (OCPI_BASE + 0x04)
#define OCPI_SINT0 (OCPI_BASE + 0x08)
#define OCPI_TABORT (OCPI_BASE + 0x0c)
#define OCPI_SINT1 (OCPI_BASE + 0x10)
#define OCPI_PROT (OCPI_BASE + 0x14)
#define OCPI_SEC (OCPI_BASE + 0x18)
/* USB OHCI OCPI access error registers */
#define HOSTUEADDR 0xfffba0e0
#define HOSTUESTATUS 0xfffba0e4
static struct clk *ocpi_ck;
/*
* Enables device access to OMAP buses via the OCPI bridge
*/
int ocpi_enable(void)
{
unsigned int val;
if (!cpu_is_omap16xx())
return -ENODEV;
/* Enable access for OHCI in OCPI */
val = omap_readl(OCPI_PROT);
val &= ~0xff;
/* val &= (1 << 0); Allow access only to EMIFS */
omap_writel(val, OCPI_PROT);
val = omap_readl(OCPI_SEC);
val &= ~0xff;
omap_writel(val, OCPI_SEC);
return 0;
}
EXPORT_SYMBOL(ocpi_enable);
static int __init omap_ocpi_init(void)
{
if (!cpu_is_omap16xx())
return -ENODEV;
ocpi_ck = clk_get(NULL, "l3_ocpi_ck");
if (IS_ERR(ocpi_ck))
return PTR_ERR(ocpi_ck);
clk_prepare_enable(ocpi_ck);
ocpi_enable();
pr_info("OMAP OCPI interconnect driver loaded\n");
return 0;
}
static void __exit omap_ocpi_exit(void)
{
/* REVISIT: Disable OCPI */
if (!cpu_is_omap16xx())
return;
clk_disable_unprepare(ocpi_ck);
clk_put(ocpi_ck);
}
MODULE_AUTHOR("Tony Lindgren <[email protected]>");
MODULE_DESCRIPTION("OMAP OCPI bus controller module");
MODULE_LICENSE("GPL");
module_init(omap_ocpi_init);
module_exit(omap_ocpi_exit);
| linux-master | arch/arm/mach-omap1/ocpi.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/arch/arm/mach-omap1/board-nokia770.c
*
* Modified from board-generic.c
*/
#include <linux/clkdev.h>
#include <linux/irq.h>
#include <linux/gpio/consumer.h>
#include <linux/gpio/machine.h>
#include <linux/gpio/property.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/input.h>
#include <linux/omapfb.h>
#include <linux/spi/spi.h>
#include <linux/workqueue.h>
#include <linux/delay.h>
#include <linux/platform_data/keypad-omap.h>
#include <linux/platform_data/lcd-mipid.h>
#include <linux/platform_data/gpio-omap.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include "mux.h"
#include "hardware.h"
#include "usb.h"
#include "common.h"
#include "clock.h"
#include "mmc.h"
static const struct software_node nokia770_mpuio_gpiochip_node = {
.name = "mpuio",
};
static const struct software_node nokia770_gpiochip1_node = {
.name = "gpio-0-15",
};
static const struct software_node nokia770_gpiochip2_node = {
.name = "gpio-16-31",
};
static const struct software_node *nokia770_gpiochip_nodes[] = {
&nokia770_mpuio_gpiochip_node,
&nokia770_gpiochip1_node,
&nokia770_gpiochip2_node,
NULL
};
#define ADS7846_PENDOWN_GPIO 15
static const unsigned int nokia770_keymap[] = {
KEY(1, 0, GROUP_0 | KEY_UP),
KEY(2, 0, GROUP_1 | KEY_F5),
KEY(0, 1, GROUP_0 | KEY_LEFT),
KEY(1, 1, GROUP_0 | KEY_ENTER),
KEY(2, 1, GROUP_0 | KEY_RIGHT),
KEY(0, 2, GROUP_1 | KEY_ESC),
KEY(1, 2, GROUP_0 | KEY_DOWN),
KEY(2, 2, GROUP_1 | KEY_F4),
KEY(0, 3, GROUP_2 | KEY_F7),
KEY(1, 3, GROUP_2 | KEY_F8),
KEY(2, 3, GROUP_2 | KEY_F6),
};
static struct resource nokia770_kp_resources[] = {
[0] = {
.start = INT_KEYBOARD,
.end = INT_KEYBOARD,
.flags = IORESOURCE_IRQ,
},
};
static const struct matrix_keymap_data nokia770_keymap_data = {
.keymap = nokia770_keymap,
.keymap_size = ARRAY_SIZE(nokia770_keymap),
};
static struct omap_kp_platform_data nokia770_kp_data = {
.rows = 8,
.cols = 8,
.keymap_data = &nokia770_keymap_data,
.delay = 4,
};
static struct platform_device nokia770_kp_device = {
.name = "omap-keypad",
.id = -1,
.dev = {
.platform_data = &nokia770_kp_data,
},
.num_resources = ARRAY_SIZE(nokia770_kp_resources),
.resource = nokia770_kp_resources,
};
static struct platform_device *nokia770_devices[] __initdata = {
&nokia770_kp_device,
};
static struct mipid_platform_data nokia770_mipid_platform_data = { };
static const struct omap_lcd_config nokia770_lcd_config __initconst = {
.ctrl_name = "hwa742",
};
static const struct property_entry nokia770_mipid_props[] = {
PROPERTY_ENTRY_GPIO("reset-gpios", &nokia770_gpiochip1_node,
13, GPIO_ACTIVE_LOW),
{ }
};
static const struct software_node nokia770_mipid_swnode = {
.name = "lcd_mipid",
.properties = nokia770_mipid_props,
};
static void __init mipid_dev_init(void)
{
nokia770_mipid_platform_data.data_lines = 16;
omapfb_set_lcd_config(&nokia770_lcd_config);
}
static const struct property_entry nokia770_ads7846_props[] = {
PROPERTY_ENTRY_STRING("compatible", "ti,ads7846"),
PROPERTY_ENTRY_U32("touchscreen-size-x", 4096),
PROPERTY_ENTRY_U32("touchscreen-size-y", 4096),
PROPERTY_ENTRY_U32("touchscreen-max-pressure", 256),
PROPERTY_ENTRY_U32("touchscreen-average-samples", 10),
PROPERTY_ENTRY_U16("ti,x-plate-ohms", 180),
PROPERTY_ENTRY_U16("ti,debounce-tol", 3),
PROPERTY_ENTRY_U16("ti,debounce-rep", 1),
PROPERTY_ENTRY_GPIO("pendown-gpios", &nokia770_gpiochip1_node,
ADS7846_PENDOWN_GPIO, GPIO_ACTIVE_LOW),
{ }
};
static const struct software_node nokia770_ads7846_swnode = {
.name = "ads7846",
.properties = nokia770_ads7846_props,
};
static struct spi_board_info nokia770_spi_board_info[] __initdata = {
[0] = {
.modalias = "lcd_mipid",
.bus_num = 2,
.chip_select = 3,
.max_speed_hz = 12000000,
.platform_data = &nokia770_mipid_platform_data,
.swnode = &nokia770_mipid_swnode,
},
[1] = {
.modalias = "ads7846",
.bus_num = 2,
.chip_select = 0,
.max_speed_hz = 2500000,
.swnode = &nokia770_ads7846_swnode,
},
};
static void __init hwa742_dev_init(void)
{
clk_add_alias("hwa_sys_ck", NULL, "bclk", NULL);
}
/* assume no Mini-AB port */
static struct omap_usb_config nokia770_usb_config __initdata = {
.otg = 1,
.register_host = 1,
.register_dev = 1,
.hmc_mode = 16,
.pins[0] = 6,
.extcon = "tahvo-usb",
};
#if IS_ENABLED(CONFIG_MMC_OMAP)
static struct gpiod_lookup_table nokia770_mmc_gpio_table = {
.dev_id = "mmci-omap.1",
.table = {
/* Slot index 0, VSD power, GPIO 41 */
GPIO_LOOKUP_IDX("gpio-32-47", 9,
"vsd", 0, GPIO_ACTIVE_HIGH),
/* Slot index 0, switch, GPIO 23 */
GPIO_LOOKUP_IDX("gpio-16-31", 7,
"cover", 0, GPIO_ACTIVE_HIGH),
{ }
},
};
static struct omap_mmc_platform_data nokia770_mmc2_data = {
.nr_slots = 1,
.max_freq = 12000000,
.slots[0] = {
.ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
.name = "mmcblk",
},
};
static struct omap_mmc_platform_data *nokia770_mmc_data[OMAP16XX_NR_MMC];
static void __init nokia770_mmc_init(void)
{
gpiod_add_lookup_table(&nokia770_mmc_gpio_table);
/* Only the second MMC controller is used */
nokia770_mmc_data[1] = &nokia770_mmc2_data;
omap1_init_mmc(nokia770_mmc_data, OMAP16XX_NR_MMC);
}
#else
static inline void nokia770_mmc_init(void)
{
}
#endif
#if IS_ENABLED(CONFIG_I2C_CBUS_GPIO)
static const struct software_node_ref_args nokia770_cbus_gpio_refs[] = {
SOFTWARE_NODE_REFERENCE(&nokia770_mpuio_gpiochip_node, 9, 0),
SOFTWARE_NODE_REFERENCE(&nokia770_mpuio_gpiochip_node, 10, 0),
SOFTWARE_NODE_REFERENCE(&nokia770_mpuio_gpiochip_node, 11, 0),
};
static const struct property_entry nokia770_cbus_props[] = {
PROPERTY_ENTRY_REF_ARRAY("gpios", nokia770_cbus_gpio_refs),
{ }
};
static struct platform_device nokia770_cbus_device = {
.name = "i2c-cbus-gpio",
.id = 2,
};
static struct i2c_board_info nokia770_i2c_board_info_2[] __initdata = {
{
I2C_BOARD_INFO("retu", 0x01),
},
{
I2C_BOARD_INFO("tahvo", 0x02),
},
};
static void __init nokia770_cbus_init(void)
{
struct gpio_desc *d;
int irq;
d = gpiod_get(NULL, "retu_irq", GPIOD_IN);
if (IS_ERR(d)) {
pr_err("Unable to get CBUS Retu IRQ GPIO descriptor\n");
} else {
irq = gpiod_to_irq(d);
irq_set_irq_type(irq, IRQ_TYPE_EDGE_RISING);
nokia770_i2c_board_info_2[0].irq = irq;
}
d = gpiod_get(NULL, "tahvo_irq", GPIOD_IN);
if (IS_ERR(d)) {
pr_err("Unable to get CBUS Tahvo IRQ GPIO descriptor\n");
} else {
irq = gpiod_to_irq(d);
irq_set_irq_type(irq, IRQ_TYPE_EDGE_RISING);
nokia770_i2c_board_info_2[1].irq = irq;
}
i2c_register_board_info(2, nokia770_i2c_board_info_2,
ARRAY_SIZE(nokia770_i2c_board_info_2));
device_create_managed_software_node(&nokia770_cbus_device.dev,
nokia770_cbus_props, NULL);
platform_device_register(&nokia770_cbus_device);
}
#else /* CONFIG_I2C_CBUS_GPIO */
static void __init nokia770_cbus_init(void)
{
}
#endif /* CONFIG_I2C_CBUS_GPIO */
static struct gpiod_lookup_table nokia770_irq_gpio_table = {
.dev_id = NULL,
.table = {
/* GPIO used by SPI device 1 */
GPIO_LOOKUP("gpio-0-15", 15, "ads7846_irq",
GPIO_ACTIVE_HIGH),
/* GPIO used for retu IRQ */
GPIO_LOOKUP("gpio-48-63", 15, "retu_irq",
GPIO_ACTIVE_HIGH),
/* GPIO used for tahvo IRQ */
GPIO_LOOKUP("gpio-32-47", 8, "tahvo_irq",
GPIO_ACTIVE_HIGH),
/* GPIOs used by serial wakeup IRQs */
GPIO_LOOKUP_IDX("gpio-32-47", 5, "wakeup", 0,
GPIO_ACTIVE_HIGH),
GPIO_LOOKUP_IDX("gpio-16-31", 2, "wakeup", 1,
GPIO_ACTIVE_HIGH),
GPIO_LOOKUP_IDX("gpio-48-63", 1, "wakeup", 2,
GPIO_ACTIVE_HIGH),
{ }
},
};
static void __init omap_nokia770_init(void)
{
struct gpio_desc *d;
/* On Nokia 770, the SleepX signal is masked with an
* MPUIO line by default. It has to be unmasked for it
* to become functional */
/* SleepX mask direction */
omap_writew((omap_readw(0xfffb5008) & ~2), 0xfffb5008);
/* Unmask SleepX signal */
omap_writew((omap_readw(0xfffb5004) & ~2), 0xfffb5004);
software_node_register_node_group(nokia770_gpiochip_nodes);
platform_add_devices(nokia770_devices, ARRAY_SIZE(nokia770_devices));
gpiod_add_lookup_table(&nokia770_irq_gpio_table);
d = gpiod_get(NULL, "ads7846_irq", GPIOD_IN);
if (IS_ERR(d))
pr_err("Unable to get ADS7846 IRQ GPIO descriptor\n");
else
nokia770_spi_board_info[1].irq = gpiod_to_irq(d);
spi_register_board_info(nokia770_spi_board_info,
ARRAY_SIZE(nokia770_spi_board_info));
omap_serial_init();
omap_register_i2c_bus(1, 100, NULL, 0);
hwa742_dev_init();
mipid_dev_init();
omap1_usb_init(&nokia770_usb_config);
nokia770_mmc_init();
nokia770_cbus_init();
}
MACHINE_START(NOKIA770, "Nokia 770")
.atag_offset = 0x100,
.map_io = omap1_map_io,
.init_early = omap1_init_early,
.init_irq = omap1_init_irq,
.init_machine = omap_nokia770_init,
.init_late = omap1_init_late,
.init_time = omap1_timer_init,
.restart = omap1_restart,
MACHINE_END
| linux-master | arch/arm/mach-omap1/board-nokia770.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Flash support for OMAP1
*/
#include <linux/io.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/soc/ti/omap1-io.h>
#include "tc.h"
#include "flash.h"
void omap1_set_vpp(struct platform_device *pdev, int enable)
{
u32 l;
l = omap_readl(EMIFS_CONFIG);
if (enable)
l |= OMAP_EMIFS_CONFIG_WP;
else
l &= ~OMAP_EMIFS_CONFIG_WP;
omap_writel(l, EMIFS_CONFIG);
}
| linux-master | arch/arm/mach-omap1/flash.c |
/*
* linux/arch/arm/mach-omap1/irq.c
*
* Interrupt handler for all OMAP boards
*
* Copyright (C) 2004 Nokia Corporation
* Written by Tony Lindgren <[email protected]>
* Major cleanups by Juha Yrjölä <[email protected]>
*
* Completely re-written to support various OMAP chips with bank specific
* interrupt handlers.
*
* Some snippets of the code taken from the older OMAP interrupt handler
* Copyright (C) 2001 RidgeRun, Inc. Greg Lonnon <[email protected]>
*
* GPIO interrupt handler moved to gpio.c by Juha Yrjola
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
* NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irqdomain.h>
#include <asm/irq.h>
#include <asm/exception.h>
#include <asm/mach/irq.h>
#include "soc.h"
#include "hardware.h"
#include "common.h"
#define IRQ_BANK(irq) ((irq) >> 5)
#define IRQ_BIT(irq) ((irq) & 0x1f)
struct omap_irq_bank {
unsigned long base_reg;
void __iomem *va;
unsigned long trigger_map;
unsigned long wake_enable;
};
static u32 omap_l2_irq;
static unsigned int irq_bank_count;
static struct omap_irq_bank *irq_banks;
static struct irq_domain *domain;
static inline unsigned int irq_bank_readl(int bank, int offset)
{
return readl_relaxed(irq_banks[bank].va + offset);
}
static inline void irq_bank_writel(unsigned long value, int bank, int offset)
{
writel_relaxed(value, irq_banks[bank].va + offset);
}
static void omap_ack_irq(int irq)
{
if (irq > 31)
writel_relaxed(0x1, irq_banks[1].va + IRQ_CONTROL_REG_OFFSET);
writel_relaxed(0x1, irq_banks[0].va + IRQ_CONTROL_REG_OFFSET);
}
static void omap_mask_ack_irq(struct irq_data *d)
{
struct irq_chip_type *ct = irq_data_get_chip_type(d);
ct->chip.irq_mask(d);
omap_ack_irq(d->irq);
}
/*
* Allows tuning the IRQ type and priority
*
* NOTE: There is currently no OMAP fiq handler for Linux. Read the
* mailing list threads on FIQ handlers if you are planning to
* add a FIQ handler for OMAP.
*/
static void omap_irq_set_cfg(int irq, int fiq, int priority, int trigger)
{
signed int bank;
unsigned long val, offset;
bank = IRQ_BANK(irq);
/* FIQ is only available on bank 0 interrupts */
fiq = bank ? 0 : (fiq & 0x1);
val = fiq | ((priority & 0x1f) << 2) | ((trigger & 0x1) << 1);
offset = IRQ_ILR0_REG_OFFSET + IRQ_BIT(irq) * 0x4;
irq_bank_writel(val, bank, offset);
}
#ifdef CONFIG_ARCH_OMAP15XX
static struct omap_irq_bank omap1510_irq_banks[] = {
{ .base_reg = OMAP_IH1_BASE, .trigger_map = 0xb3febfff },
{ .base_reg = OMAP_IH2_BASE, .trigger_map = 0xffbfffed },
};
static struct omap_irq_bank omap310_irq_banks[] = {
{ .base_reg = OMAP_IH1_BASE, .trigger_map = 0xb3faefc3 },
{ .base_reg = OMAP_IH2_BASE, .trigger_map = 0x65b3c061 },
};
#endif
#if defined(CONFIG_ARCH_OMAP16XX)
static struct omap_irq_bank omap1610_irq_banks[] = {
{ .base_reg = OMAP_IH1_BASE, .trigger_map = 0xb3fefe8f },
{ .base_reg = OMAP_IH2_BASE, .trigger_map = 0xfdb7c1fd },
{ .base_reg = OMAP_IH2_BASE + 0x100, .trigger_map = 0xffffb7ff },
{ .base_reg = OMAP_IH2_BASE + 0x200, .trigger_map = 0xffffffff },
};
#endif
asmlinkage void __exception_irq_entry omap1_handle_irq(struct pt_regs *regs)
{
void __iomem *l1 = irq_banks[0].va;
void __iomem *l2 = irq_banks[1].va;
u32 irqnr;
do {
irqnr = readl_relaxed(l1 + IRQ_ITR_REG_OFFSET);
irqnr &= ~(readl_relaxed(l1 + IRQ_MIR_REG_OFFSET) & 0xffffffff);
if (!irqnr)
break;
irqnr = readl_relaxed(l1 + IRQ_SIR_FIQ_REG_OFFSET);
if (irqnr)
goto irq;
irqnr = readl_relaxed(l1 + IRQ_SIR_IRQ_REG_OFFSET);
if (irqnr == omap_l2_irq) {
irqnr = readl_relaxed(l2 + IRQ_SIR_IRQ_REG_OFFSET);
if (irqnr)
irqnr += 32;
}
irq:
if (irqnr)
generic_handle_domain_irq(domain, irqnr);
else
break;
} while (irqnr);
}
static __init void
omap_alloc_gc(void __iomem *base, unsigned int irq_start, unsigned int num)
{
struct irq_chip_generic *gc;
struct irq_chip_type *ct;
gc = irq_alloc_generic_chip("MPU", 1, irq_start, base,
handle_level_irq);
ct = gc->chip_types;
ct->chip.irq_ack = omap_mask_ack_irq;
ct->chip.irq_mask = irq_gc_mask_set_bit;
ct->chip.irq_unmask = irq_gc_mask_clr_bit;
ct->chip.irq_set_wake = irq_gc_set_wake;
ct->regs.mask = IRQ_MIR_REG_OFFSET;
irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE,
IRQ_NOREQUEST | IRQ_NOPROBE, 0);
}
void __init omap1_init_irq(void)
{
struct irq_chip_type *ct;
struct irq_data *d = NULL;
int i, j, irq_base;
unsigned long nr_irqs;
#ifdef CONFIG_ARCH_OMAP15XX
if (cpu_is_omap1510()) {
irq_banks = omap1510_irq_banks;
irq_bank_count = ARRAY_SIZE(omap1510_irq_banks);
}
if (cpu_is_omap310()) {
irq_banks = omap310_irq_banks;
irq_bank_count = ARRAY_SIZE(omap310_irq_banks);
}
#endif
#if defined(CONFIG_ARCH_OMAP16XX)
if (cpu_is_omap16xx()) {
irq_banks = omap1610_irq_banks;
irq_bank_count = ARRAY_SIZE(omap1610_irq_banks);
}
#endif
for (i = 0; i < irq_bank_count; i++) {
irq_banks[i].va = ioremap(irq_banks[i].base_reg, 0xff);
if (WARN_ON(!irq_banks[i].va))
return;
}
nr_irqs = irq_bank_count * 32;
irq_base = irq_alloc_descs(-1, 0, nr_irqs, 0);
if (irq_base < 0) {
pr_warn("Couldn't allocate IRQ numbers\n");
irq_base = 0;
}
omap_l2_irq = irq_base;
omap_l2_irq -= NR_IRQS_LEGACY;
domain = irq_domain_add_legacy(NULL, nr_irqs, irq_base, 0,
&irq_domain_simple_ops, NULL);
pr_info("Total of %lu interrupts in %i interrupt banks\n",
nr_irqs, irq_bank_count);
/* Mask and clear all interrupts */
for (i = 0; i < irq_bank_count; i++) {
irq_bank_writel(~0x0, i, IRQ_MIR_REG_OFFSET);
irq_bank_writel(0x0, i, IRQ_ITR_REG_OFFSET);
}
/* Clear any pending interrupts */
irq_bank_writel(0x03, 0, IRQ_CONTROL_REG_OFFSET);
irq_bank_writel(0x03, 1, IRQ_CONTROL_REG_OFFSET);
/* Install the interrupt handlers for each bank */
for (i = 0; i < irq_bank_count; i++) {
for (j = i * 32; j < (i + 1) * 32; j++) {
int irq_trigger;
irq_trigger = irq_banks[i].trigger_map >> IRQ_BIT(j);
omap_irq_set_cfg(j, 0, 0, irq_trigger);
irq_clear_status_flags(j, IRQ_NOREQUEST);
}
omap_alloc_gc(irq_banks[i].va, irq_base + i * 32, 32);
}
/* Unmask level 2 handler */
d = irq_get_irq_data(irq_find_mapping(domain, omap_l2_irq));
if (d) {
ct = irq_data_get_chip_type(d);
ct->chip.irq_unmask(d);
}
set_handle_irq(omap1_handle_irq);
}
| linux-master | arch/arm/mach-omap1/irq.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Platform level USB initialization for FS USB OTG controller on omap1
*
* Copyright (C) 2004 Texas Instruments, Inc.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/dma-map-ops.h>
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/soc/ti/omap1-io.h>
#include <asm/irq.h>
#include "hardware.h"
#include "mux.h"
#include "usb.h"
#include "common.h"
/* These routines should handle the standard chip-specific modes
* for usb0/1/2 ports, covering basic mux and transceiver setup.
*
* Some board-*.c files will need to set up additional mux options,
* like for suspend handling, vbus sensing, GPIOs, and the D+ pullup.
*/
/* TESTED ON:
* - 1611B H2 (with usb1 mini-AB) using standard Mini-B or OTG cables
* - 5912 OSK OHCI (with usb0 standard-A), standard A-to-B cables
* - 5912 OSK UDC, with *nonstandard* A-to-A cable
* - 1510 Innovator UDC with bundled usb0 cable
* - 1510 Innovator OHCI with bundled usb1/usb2 cable
* - 1510 Innovator OHCI with custom usb0 cable, feeding 5V VBUS
* - 1710 custom development board using alternate pin group
* - 1710 H3 (with usb1 mini-AB) using standard Mini-B or OTG cables
*/
#define INT_USB_IRQ_GEN IH2_BASE + 20
#define INT_USB_IRQ_NISO IH2_BASE + 30
#define INT_USB_IRQ_ISO IH2_BASE + 29
#define INT_USB_IRQ_HGEN INT_USB_HHC_1
#define INT_USB_IRQ_OTG IH2_BASE + 8
#ifdef CONFIG_ARCH_OMAP_OTG
static void __init
omap_otg_init(struct omap_usb_config *config)
{
u32 syscon;
int alt_pingroup = 0;
u16 w;
/* NOTE: no bus or clock setup (yet?) */
syscon = omap_readl(OTG_SYSCON_1) & 0xffff;
if (!(syscon & OTG_RESET_DONE))
pr_debug("USB resets not complete?\n");
//omap_writew(0, OTG_IRQ_EN);
/* pin muxing and transceiver pinouts */
if (config->pins[0] > 2) /* alt pingroup 2 */
alt_pingroup = 1;
syscon |= config->usb0_init(config->pins[0], is_usb0_device(config));
syscon |= config->usb1_init(config->pins[1]);
syscon |= config->usb2_init(config->pins[2], alt_pingroup);
pr_debug("OTG_SYSCON_1 = %08x\n", omap_readl(OTG_SYSCON_1));
omap_writel(syscon, OTG_SYSCON_1);
syscon = config->hmc_mode;
syscon |= USBX_SYNCHRO | (4 << 16) /* B_ASE0_BRST */;
#ifdef CONFIG_USB_OTG
if (config->otg)
syscon |= OTG_EN;
#endif
pr_debug("USB_TRANSCEIVER_CTRL = %03x\n",
omap_readl(USB_TRANSCEIVER_CTRL));
pr_debug("OTG_SYSCON_2 = %08x\n", omap_readl(OTG_SYSCON_2));
omap_writel(syscon, OTG_SYSCON_2);
printk("USB: hmc %d", config->hmc_mode);
if (!alt_pingroup)
pr_cont(", usb2 alt %d wires", config->pins[2]);
else if (config->pins[0])
pr_cont(", usb0 %d wires%s", config->pins[0],
is_usb0_device(config) ? " (dev)" : "");
if (config->pins[1])
pr_cont(", usb1 %d wires", config->pins[1]);
if (!alt_pingroup && config->pins[2])
pr_cont(", usb2 %d wires", config->pins[2]);
if (config->otg)
pr_cont(", Mini-AB on usb%d", config->otg - 1);
pr_cont("\n");
/* leave USB clocks/controllers off until needed */
w = omap_readw(ULPD_SOFT_REQ);
w &= ~SOFT_USB_CLK_REQ;
omap_writew(w, ULPD_SOFT_REQ);
w = omap_readw(ULPD_CLOCK_CTRL);
w &= ~USB_MCLK_EN;
w |= DIS_USB_PVCI_CLK;
omap_writew(w, ULPD_CLOCK_CTRL);
syscon = omap_readl(OTG_SYSCON_1);
syscon |= HST_IDLE_EN|DEV_IDLE_EN|OTG_IDLE_EN;
#if IS_ENABLED(CONFIG_USB_OMAP)
if (config->otg || config->register_dev) {
struct platform_device *udc_device = config->udc_device;
int status;
syscon &= ~DEV_IDLE_EN;
udc_device->dev.platform_data = config;
status = platform_device_register(udc_device);
if (status)
pr_debug("can't register UDC device, %d\n", status);
}
#endif
#if IS_ENABLED(CONFIG_USB_OHCI_HCD)
if (config->otg || config->register_host) {
struct platform_device *ohci_device = config->ohci_device;
int status;
syscon &= ~HST_IDLE_EN;
ohci_device->dev.platform_data = config;
status = platform_device_register(ohci_device);
if (status)
pr_debug("can't register OHCI device, %d\n", status);
}
#endif
#ifdef CONFIG_USB_OTG
if (config->otg) {
struct platform_device *otg_device = config->otg_device;
int status;
syscon &= ~OTG_IDLE_EN;
otg_device->dev.platform_data = config;
status = platform_device_register(otg_device);
if (status)
pr_debug("can't register OTG device, %d\n", status);
}
#endif
pr_debug("OTG_SYSCON_1 = %08x\n", omap_readl(OTG_SYSCON_1));
omap_writel(syscon, OTG_SYSCON_1);
}
#else
static void omap_otg_init(struct omap_usb_config *config) {}
#endif
#if IS_ENABLED(CONFIG_USB_OMAP)
static struct resource udc_resources[] = {
/* order is significant! */
{ /* registers */
.start = UDC_BASE,
.end = UDC_BASE + 0xff,
.flags = IORESOURCE_MEM,
}, { /* general IRQ */
.start = INT_USB_IRQ_GEN,
.flags = IORESOURCE_IRQ,
}, { /* PIO IRQ */
.start = INT_USB_IRQ_NISO,
.flags = IORESOURCE_IRQ,
}, { /* SOF IRQ */
.start = INT_USB_IRQ_ISO,
.flags = IORESOURCE_IRQ,
},
};
static u64 udc_dmamask = ~(u32)0;
static struct platform_device udc_device = {
.name = "omap_udc",
.id = -1,
.dev = {
.dma_mask = &udc_dmamask,
.coherent_dma_mask = 0xffffffff,
},
.num_resources = ARRAY_SIZE(udc_resources),
.resource = udc_resources,
};
static inline void udc_device_init(struct omap_usb_config *pdata)
{
pdata->udc_device = &udc_device;
}
#else
static inline void udc_device_init(struct omap_usb_config *pdata)
{
}
#endif
/* The dmamask must be set for OHCI to work */
static u64 ohci_dmamask = ~(u32)0;
static struct resource ohci_resources[] = {
{
.start = OMAP_OHCI_BASE,
.end = OMAP_OHCI_BASE + 0xff,
.flags = IORESOURCE_MEM,
},
{
.start = INT_USB_IRQ_HGEN,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device ohci_device = {
.name = "ohci",
.id = -1,
.dev = {
.dma_mask = &ohci_dmamask,
.coherent_dma_mask = 0xffffffff,
},
.num_resources = ARRAY_SIZE(ohci_resources),
.resource = ohci_resources,
};
static inline void ohci_device_init(struct omap_usb_config *pdata)
{
if (!IS_ENABLED(CONFIG_USB_OHCI_HCD))
return;
pdata->ohci_device = &ohci_device;
pdata->ocpi_enable = &ocpi_enable;
}
#if defined(CONFIG_USB_OTG) && defined(CONFIG_ARCH_OMAP_OTG)
static struct resource otg_resources[] = {
/* order is significant! */
{
.start = OTG_BASE,
.end = OTG_BASE + 0xff,
.flags = IORESOURCE_MEM,
}, {
.start = INT_USB_IRQ_OTG,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device otg_device = {
.name = "omap_otg",
.id = -1,
.num_resources = ARRAY_SIZE(otg_resources),
.resource = otg_resources,
};
static inline void otg_device_init(struct omap_usb_config *pdata)
{
pdata->otg_device = &otg_device;
}
#else
static inline void otg_device_init(struct omap_usb_config *pdata)
{
}
#endif
static u32 __init omap1_usb0_init(unsigned nwires, unsigned is_device)
{
u32 syscon1 = 0;
if (nwires == 0) {
if (!cpu_is_omap15xx()) {
u32 l;
/* pulldown D+/D- */
l = omap_readl(USB_TRANSCEIVER_CTRL);
l &= ~(3 << 1);
omap_writel(l, USB_TRANSCEIVER_CTRL);
}
return 0;
}
if (is_device) {
omap_cfg_reg(W4_USB_PUEN);
}
if (nwires == 2) {
u32 l;
// omap_cfg_reg(P9_USB_DP);
// omap_cfg_reg(R8_USB_DM);
if (cpu_is_omap15xx()) {
/* This works on 1510-Innovator */
return 0;
}
/* NOTES:
* - peripheral should configure VBUS detection!
* - only peripherals may use the internal D+/D- pulldowns
* - OTG support on this port not yet written
*/
l = omap_readl(USB_TRANSCEIVER_CTRL);
l &= ~(7 << 4);
if (!is_device)
l |= (3 << 1);
omap_writel(l, USB_TRANSCEIVER_CTRL);
return 3 << 16;
}
/* alternate pin config, external transceiver */
if (cpu_is_omap15xx()) {
printk(KERN_ERR "no usb0 alt pin config on 15xx\n");
return 0;
}
omap_cfg_reg(V6_USB0_TXD);
omap_cfg_reg(W9_USB0_TXEN);
omap_cfg_reg(W5_USB0_SE0);
if (nwires != 3)
omap_cfg_reg(Y5_USB0_RCV);
/* NOTE: SPEED and SUSP aren't configured here. OTG hosts
* may be able to use I2C requests to set those bits along
* with VBUS switching and overcurrent detection.
*/
if (nwires != 6) {
u32 l;
l = omap_readl(USB_TRANSCEIVER_CTRL);
l &= ~CONF_USB2_UNI_R;
omap_writel(l, USB_TRANSCEIVER_CTRL);
}
switch (nwires) {
case 3:
syscon1 = 2;
break;
case 4:
syscon1 = 1;
break;
case 6:
syscon1 = 3;
{
u32 l;
omap_cfg_reg(AA9_USB0_VP);
omap_cfg_reg(R9_USB0_VM);
l = omap_readl(USB_TRANSCEIVER_CTRL);
l |= CONF_USB2_UNI_R;
omap_writel(l, USB_TRANSCEIVER_CTRL);
}
break;
default:
printk(KERN_ERR "illegal usb%d %d-wire transceiver\n",
0, nwires);
}
return syscon1 << 16;
}
static u32 __init omap1_usb1_init(unsigned nwires)
{
u32 syscon1 = 0;
if (!cpu_is_omap15xx() && nwires != 6) {
u32 l;
l = omap_readl(USB_TRANSCEIVER_CTRL);
l &= ~CONF_USB1_UNI_R;
omap_writel(l, USB_TRANSCEIVER_CTRL);
}
if (nwires == 0)
return 0;
/* external transceiver */
omap_cfg_reg(USB1_TXD);
omap_cfg_reg(USB1_TXEN);
if (nwires != 3)
omap_cfg_reg(USB1_RCV);
if (cpu_is_omap15xx()) {
omap_cfg_reg(USB1_SEO);
omap_cfg_reg(USB1_SPEED);
// SUSP
} else if (cpu_is_omap1610() || cpu_is_omap5912()) {
omap_cfg_reg(W13_1610_USB1_SE0);
omap_cfg_reg(R13_1610_USB1_SPEED);
// SUSP
} else if (cpu_is_omap1710()) {
omap_cfg_reg(R13_1710_USB1_SE0);
// SUSP
} else {
pr_debug("usb%d cpu unrecognized\n", 1);
return 0;
}
switch (nwires) {
case 2:
goto bad;
case 3:
syscon1 = 2;
break;
case 4:
syscon1 = 1;
break;
case 6:
syscon1 = 3;
omap_cfg_reg(USB1_VP);
omap_cfg_reg(USB1_VM);
if (!cpu_is_omap15xx()) {
u32 l;
l = omap_readl(USB_TRANSCEIVER_CTRL);
l |= CONF_USB1_UNI_R;
omap_writel(l, USB_TRANSCEIVER_CTRL);
}
break;
default:
bad:
printk(KERN_ERR "illegal usb%d %d-wire transceiver\n",
1, nwires);
}
return syscon1 << 20;
}
static u32 __init omap1_usb2_init(unsigned nwires, unsigned alt_pingroup)
{
u32 syscon1 = 0;
/* NOTE omap1 erratum: must leave USB2_UNI_R set if usb0 in use */
if (alt_pingroup || nwires == 0)
return 0;
if (!cpu_is_omap15xx() && nwires != 6) {
u32 l;
l = omap_readl(USB_TRANSCEIVER_CTRL);
l &= ~CONF_USB2_UNI_R;
omap_writel(l, USB_TRANSCEIVER_CTRL);
}
/* external transceiver */
if (cpu_is_omap15xx()) {
omap_cfg_reg(USB2_TXD);
omap_cfg_reg(USB2_TXEN);
omap_cfg_reg(USB2_SEO);
if (nwires != 3)
omap_cfg_reg(USB2_RCV);
/* there is no USB2_SPEED */
} else if (cpu_is_omap16xx()) {
omap_cfg_reg(V6_USB2_TXD);
omap_cfg_reg(W9_USB2_TXEN);
omap_cfg_reg(W5_USB2_SE0);
if (nwires != 3)
omap_cfg_reg(Y5_USB2_RCV);
// FIXME omap_cfg_reg(USB2_SPEED);
} else {
pr_debug("usb%d cpu unrecognized\n", 1);
return 0;
}
// omap_cfg_reg(USB2_SUSP);
switch (nwires) {
case 2:
goto bad;
case 3:
syscon1 = 2;
break;
case 4:
syscon1 = 1;
break;
case 5:
goto bad;
case 6:
syscon1 = 3;
if (cpu_is_omap15xx()) {
omap_cfg_reg(USB2_VP);
omap_cfg_reg(USB2_VM);
} else {
u32 l;
omap_cfg_reg(AA9_USB2_VP);
omap_cfg_reg(R9_USB2_VM);
l = omap_readl(USB_TRANSCEIVER_CTRL);
l |= CONF_USB2_UNI_R;
omap_writel(l, USB_TRANSCEIVER_CTRL);
}
break;
default:
bad:
printk(KERN_ERR "illegal usb%d %d-wire transceiver\n",
2, nwires);
}
return syscon1 << 24;
}
#ifdef CONFIG_ARCH_OMAP15XX
/* OMAP-1510 OHCI has its own MMU for DMA */
#define OMAP1510_LB_MEMSIZE 32 /* Should be same as SDRAM size */
#define OMAP1510_LB_CLOCK_DIV 0xfffec10c
#define OMAP1510_LB_MMU_CTL 0xfffec208
#define OMAP1510_LB_MMU_LCK 0xfffec224
#define OMAP1510_LB_MMU_LD_TLB 0xfffec228
#define OMAP1510_LB_MMU_CAM_H 0xfffec22c
#define OMAP1510_LB_MMU_CAM_L 0xfffec230
#define OMAP1510_LB_MMU_RAM_H 0xfffec234
#define OMAP1510_LB_MMU_RAM_L 0xfffec238
/*
* Bus address is physical address, except for OMAP-1510 Local Bus.
* OMAP-1510 bus address is translated into a Local Bus address if the
* OMAP bus type is lbus.
*/
#define OMAP1510_LB_OFFSET UL(0x30000000)
/*
* OMAP-1510 specific Local Bus clock on/off
*/
static int omap_1510_local_bus_power(int on)
{
if (on) {
omap_writel((1 << 1) | (1 << 0), OMAP1510_LB_MMU_CTL);
udelay(200);
} else {
omap_writel(0, OMAP1510_LB_MMU_CTL);
}
return 0;
}
/*
* OMAP-1510 specific Local Bus initialization
* NOTE: This assumes 32MB memory size in OMAP1510LB_MEMSIZE.
* See also arch/mach-omap/memory.h for __virt_to_dma() and
* __dma_to_virt() which need to match with the physical
* Local Bus address below.
*/
static int omap_1510_local_bus_init(void)
{
unsigned int tlb;
unsigned long lbaddr, physaddr;
omap_writel((omap_readl(OMAP1510_LB_CLOCK_DIV) & 0xfffffff8) | 0x4,
OMAP1510_LB_CLOCK_DIV);
/* Configure the Local Bus MMU table */
for (tlb = 0; tlb < OMAP1510_LB_MEMSIZE; tlb++) {
lbaddr = tlb * 0x00100000 + OMAP1510_LB_OFFSET;
physaddr = tlb * 0x00100000 + PHYS_OFFSET;
omap_writel((lbaddr & 0x0fffffff) >> 22, OMAP1510_LB_MMU_CAM_H);
omap_writel(((lbaddr & 0x003ffc00) >> 6) | 0xc,
OMAP1510_LB_MMU_CAM_L);
omap_writel(physaddr >> 16, OMAP1510_LB_MMU_RAM_H);
omap_writel((physaddr & 0x0000fc00) | 0x300, OMAP1510_LB_MMU_RAM_L);
omap_writel(tlb << 4, OMAP1510_LB_MMU_LCK);
omap_writel(0x1, OMAP1510_LB_MMU_LD_TLB);
}
/* Enable the walking table */
omap_writel(omap_readl(OMAP1510_LB_MMU_CTL) | (1 << 3), OMAP1510_LB_MMU_CTL);
udelay(200);
return 0;
}
static void omap_1510_local_bus_reset(void)
{
omap_1510_local_bus_power(1);
omap_1510_local_bus_init();
}
/* ULPD_DPLL_CTRL */
#define DPLL_IOB (1 << 13)
#define DPLL_PLL_ENABLE (1 << 4)
#define DPLL_LOCK (1 << 0)
/* ULPD_APLL_CTRL */
#define APLL_NDPLL_SWITCH (1 << 0)
static void __init omap_1510_usb_init(struct omap_usb_config *config)
{
unsigned int val;
u16 w;
config->usb0_init(config->pins[0], is_usb0_device(config));
config->usb1_init(config->pins[1]);
config->usb2_init(config->pins[2], 0);
val = omap_readl(MOD_CONF_CTRL_0) & ~(0x3f << 1);
val |= (config->hmc_mode << 1);
omap_writel(val, MOD_CONF_CTRL_0);
printk("USB: hmc %d", config->hmc_mode);
if (config->pins[0])
pr_cont(", usb0 %d wires%s", config->pins[0],
is_usb0_device(config) ? " (dev)" : "");
if (config->pins[1])
pr_cont(", usb1 %d wires", config->pins[1]);
if (config->pins[2])
pr_cont(", usb2 %d wires", config->pins[2]);
pr_cont("\n");
/* use DPLL for 48 MHz function clock */
pr_debug("APLL %04x DPLL %04x REQ %04x\n", omap_readw(ULPD_APLL_CTRL),
omap_readw(ULPD_DPLL_CTRL), omap_readw(ULPD_SOFT_REQ));
w = omap_readw(ULPD_APLL_CTRL);
w &= ~APLL_NDPLL_SWITCH;
omap_writew(w, ULPD_APLL_CTRL);
w = omap_readw(ULPD_DPLL_CTRL);
w |= DPLL_IOB | DPLL_PLL_ENABLE;
omap_writew(w, ULPD_DPLL_CTRL);
w = omap_readw(ULPD_SOFT_REQ);
w |= SOFT_UDC_REQ | SOFT_DPLL_REQ;
omap_writew(w, ULPD_SOFT_REQ);
while (!(omap_readw(ULPD_DPLL_CTRL) & DPLL_LOCK))
cpu_relax();
#if IS_ENABLED(CONFIG_USB_OMAP)
if (config->register_dev) {
int status;
udc_device.dev.platform_data = config;
status = platform_device_register(&udc_device);
if (status)
pr_debug("can't register UDC device, %d\n", status);
/* udc driver gates 48MHz by D+ pullup */
}
#endif
if (IS_ENABLED(CONFIG_USB_OHCI_HCD) && config->register_host) {
int status;
ohci_device.dev.platform_data = config;
dma_direct_set_offset(&ohci_device.dev, PHYS_OFFSET,
OMAP1510_LB_OFFSET, (u64)-1);
status = platform_device_register(&ohci_device);
if (status)
pr_debug("can't register OHCI device, %d\n", status);
/* hcd explicitly gates 48MHz */
config->lb_reset = omap_1510_local_bus_reset;
}
}
#else
static inline void omap_1510_usb_init(struct omap_usb_config *config) {}
#endif
void __init omap1_usb_init(struct omap_usb_config *_pdata)
{
struct omap_usb_config *pdata;
pdata = kmemdup(_pdata, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return;
pdata->usb0_init = omap1_usb0_init;
pdata->usb1_init = omap1_usb1_init;
pdata->usb2_init = omap1_usb2_init;
udc_device_init(pdata);
ohci_device_init(pdata);
otg_device_init(pdata);
if (cpu_is_omap16xx())
omap_otg_init(pdata);
else if (cpu_is_omap15xx())
omap_1510_usb_init(pdata);
else
printk(KERN_ERR "USB: No init for your chip yet\n");
}
| linux-master | arch/arm/mach-omap1/usb.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/arch/arm/mach-omap1/io.c
*
* OMAP1 I/O mapping code
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/omap-dma.h>
#include <asm/tlb.h>
#include <asm/mach/map.h>
#include "tc.h"
#include "iomap.h"
#include "common.h"
/*
* The machine specific code may provide the extra mapping besides the
* default mapping provided here.
*/
static struct map_desc omap1_io_desc[] __initdata = {
{
.virtual = OMAP1_IO_VIRT,
.pfn = __phys_to_pfn(OMAP1_IO_PHYS),
.length = OMAP1_IO_SIZE,
.type = MT_DEVICE
}, {
.virtual = OMAP1_DSP_BASE,
.pfn = __phys_to_pfn(OMAP1_DSP_START),
.length = OMAP1_DSP_SIZE,
.type = MT_DEVICE
}, {
.virtual = OMAP1_DSPREG_BASE,
.pfn = __phys_to_pfn(OMAP1_DSPREG_START),
.length = OMAP1_DSPREG_SIZE,
.type = MT_DEVICE
}
};
/*
* Maps common IO regions for omap1
*/
void __init omap1_map_io(void)
{
iotable_init(omap1_io_desc, ARRAY_SIZE(omap1_io_desc));
}
/*
* Common low-level hardware init for omap1.
*/
void __init omap1_init_early(void)
{
omap_check_revision();
/* REVISIT: Refer to OMAP5910 Errata, Advisory SYS_1: "Timeout Abort
* on a Posted Write in the TIPB Bridge".
*/
omap_writew(0x0, MPU_PUBLIC_TIPB_CNTL);
omap_writew(0x0, MPU_PRIVATE_TIPB_CNTL);
}
void __init omap1_init_late(void)
{
omap_serial_wakeup_init();
}
/*
* NOTE: Please use ioremap + __raw_read/write where possible instead of these
*/
u8 omap_readb(u32 pa)
{
return __raw_readb(OMAP1_IO_ADDRESS(pa));
}
EXPORT_SYMBOL(omap_readb);
u16 omap_readw(u32 pa)
{
return __raw_readw(OMAP1_IO_ADDRESS(pa));
}
EXPORT_SYMBOL(omap_readw);
u32 omap_readl(u32 pa)
{
return __raw_readl(OMAP1_IO_ADDRESS(pa));
}
EXPORT_SYMBOL(omap_readl);
void omap_writeb(u8 v, u32 pa)
{
__raw_writeb(v, OMAP1_IO_ADDRESS(pa));
}
EXPORT_SYMBOL(omap_writeb);
void omap_writew(u16 v, u32 pa)
{
__raw_writew(v, OMAP1_IO_ADDRESS(pa));
}
EXPORT_SYMBOL(omap_writew);
void omap_writel(u32 v, u32 pa)
{
__raw_writel(v, OMAP1_IO_ADDRESS(pa));
}
EXPORT_SYMBOL(omap_writel);
| linux-master | arch/arm/mach-omap1/io.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/arch/arm/mach-omap1/board-sx1-mmc.c
*
* Copyright (C) 2007 Instituto Nokia de Tecnologia - INdT
* Author: Carlos Eduardo Aguiar <[email protected]>
*
* This code is based on linux/arch/arm/mach-omap1/board-h2-mmc.c, which is:
* Copyright (C) 2007 Instituto Nokia de Tecnologia - INdT
*/
#include <linux/platform_device.h>
#include "hardware.h"
#include "board-sx1.h"
#include "mmc.h"
#if IS_ENABLED(CONFIG_MMC_OMAP)
static int mmc_set_power(struct device *dev, int slot, int power_on,
int vdd)
{
int err;
u8 dat = 0;
err = sx1_i2c_read_byte(SOFIA_I2C_ADDR, SOFIA_POWER1_REG, &dat);
if (err < 0)
return err;
if (power_on)
dat |= SOFIA_MMC_POWER;
else
dat &= ~SOFIA_MMC_POWER;
return sx1_i2c_write_byte(SOFIA_I2C_ADDR, SOFIA_POWER1_REG, dat);
}
/* Cover switch is at OMAP_MPUIO(3) */
static struct omap_mmc_platform_data mmc1_data = {
.nr_slots = 1,
.slots[0] = {
.set_power = mmc_set_power,
.ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
.name = "mmcblk",
},
};
static struct omap_mmc_platform_data *mmc_data[OMAP15XX_NR_MMC];
void __init sx1_mmc_init(void)
{
mmc_data[0] = &mmc1_data;
omap1_init_mmc(mmc_data, OMAP15XX_NR_MMC);
}
#else
void __init sx1_mmc_init(void)
{
}
#endif
| linux-master | arch/arm/mach-omap1/board-sx1-mmc.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/arch/arm/mach-omap1/opp_data.c
*
* Copyright (C) 2004 - 2005 Nokia corporation
* Written by Tuukka Tikkanen <[email protected]>
* Based on clocks.h by Tony Lindgren, Gordon McNutt and RidgeRun, Inc
*/
#include "clock.h"
#include "opp.h"
/*-------------------------------------------------------------------------
* Omap1 MPU rate table
*-------------------------------------------------------------------------*/
struct mpu_rate omap1_rate_table[] = {
/* MPU MHz, xtal MHz, dpll1 MHz, CKCTL, DPLL_CTL
* NOTE: Comment order here is different from bits in CKCTL value:
* armdiv, dspdiv, dspmmu, tcdiv, perdiv, lcddiv
*/
{ 216000000, 12000000, 216000000, 0x050d, 0x2910, /* 1/1/2/2/2/8 */
CK_1710 },
{ 195000000, 13000000, 195000000, 0x050e, 0x2790, /* 1/1/2/2/4/8 */
CK_7XX },
{ 192000000, 19200000, 192000000, 0x050f, 0x2510, /* 1/1/2/2/8/8 */
CK_16XX },
{ 192000000, 12000000, 192000000, 0x050f, 0x2810, /* 1/1/2/2/8/8 */
CK_16XX },
{ 96000000, 12000000, 192000000, 0x055f, 0x2810, /* 2/2/2/2/8/8 */
CK_16XX },
{ 48000000, 12000000, 192000000, 0x0baf, 0x2810, /* 4/4/4/8/8/8 */
CK_16XX },
{ 24000000, 12000000, 192000000, 0x0fff, 0x2810, /* 8/8/8/8/8/8 */
CK_16XX },
{ 182000000, 13000000, 182000000, 0x050e, 0x2710, /* 1/1/2/2/4/8 */
CK_7XX },
{ 168000000, 12000000, 168000000, 0x010f, 0x2710, /* 1/1/1/2/8/8 */
CK_16XX|CK_7XX },
{ 150000000, 12000000, 150000000, 0x010a, 0x2cb0, /* 1/1/1/2/4/4 */
CK_1510 },
{ 120000000, 12000000, 120000000, 0x010a, 0x2510, /* 1/1/1/2/4/4 */
CK_16XX|CK_1510|CK_310|CK_7XX },
{ 96000000, 12000000, 96000000, 0x0005, 0x2410, /* 1/1/1/1/2/2 */
CK_16XX|CK_1510|CK_310|CK_7XX },
{ 60000000, 12000000, 60000000, 0x0005, 0x2290, /* 1/1/1/1/2/2 */
CK_16XX|CK_1510|CK_310|CK_7XX },
{ 30000000, 12000000, 60000000, 0x0555, 0x2290, /* 2/2/2/2/2/2 */
CK_16XX|CK_1510|CK_310|CK_7XX },
{ 0, 0, 0, 0, 0 },
};
| linux-master | arch/arm/mach-omap1/opp_data.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Amstrad E3 FIQ handling
*
* Copyright (C) 2009 Janusz Krzysztofik
* Copyright (c) 2006 Matt Callow
* Copyright (c) 2004 Amstrad Plc
* Copyright (C) 2001 RidgeRun, Inc.
*
* Parts of this code are taken from linux/arch/arm/mach-omap/irq.c
* in the MontaVista 2.4 kernel (and the Amstrad changes therein)
*/
#include <linux/gpio/consumer.h>
#include <linux/gpio/machine.h>
#include <linux/gpio/driver.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/io.h>
#include <linux/platform_data/ams-delta-fiq.h>
#include <linux/platform_device.h>
#include <asm/fiq.h>
#include <linux/soc/ti/omap1-io.h>
#include "hardware.h"
#include "ams-delta-fiq.h"
#include "board-ams-delta.h"
static struct fiq_handler fh = {
.name = "ams-delta-fiq"
};
/*
* This buffer is shared between FIQ and IRQ contexts.
* The FIQ and IRQ isrs can both read and write it.
* It is structured as a header section several 32bit slots,
* followed by the circular buffer where the FIQ isr stores
* keystrokes received from the qwerty keyboard. See
* <linux/platform_data/ams-delta-fiq.h> for details of offsets.
*/
static unsigned int fiq_buffer[1024];
static struct irq_chip *irq_chip;
static struct irq_data *irq_data[16];
static unsigned int irq_counter[16];
static const char *pin_name[16] __initconst = {
[AMS_DELTA_GPIO_PIN_KEYBRD_DATA] = "keybrd_data",
[AMS_DELTA_GPIO_PIN_KEYBRD_CLK] = "keybrd_clk",
};
static irqreturn_t deferred_fiq(int irq, void *dev_id)
{
struct irq_data *d;
int gpio, irq_num, fiq_count;
/*
* For each handled GPIO interrupt, keep calling its interrupt handler
* until the IRQ counter catches the FIQ incremented interrupt counter.
*/
for (gpio = AMS_DELTA_GPIO_PIN_KEYBRD_CLK;
gpio <= AMS_DELTA_GPIO_PIN_HOOK_SWITCH; gpio++) {
d = irq_data[gpio];
irq_num = d->irq;
fiq_count = fiq_buffer[FIQ_CNT_INT_00 + gpio];
if (irq_counter[gpio] < fiq_count &&
gpio != AMS_DELTA_GPIO_PIN_KEYBRD_CLK) {
/*
* handle_simple_irq() that OMAP GPIO edge
* interrupts default to since commit 80ac93c27441
* requires interrupt already acked and unmasked.
*/
if (!WARN_ON_ONCE(!irq_chip->irq_unmask))
irq_chip->irq_unmask(d);
}
for (; irq_counter[gpio] < fiq_count; irq_counter[gpio]++)
generic_handle_irq(irq_num);
}
return IRQ_HANDLED;
}
void __init ams_delta_init_fiq(struct gpio_chip *chip,
struct platform_device *serio)
{
struct gpio_desc *gpiod, *data = NULL, *clk = NULL;
void *fiqhandler_start;
unsigned int fiqhandler_length;
struct pt_regs FIQ_regs;
unsigned long val, offset;
int i, retval;
/* Store irq_chip location for IRQ handler use */
irq_chip = chip->irq.chip;
if (!irq_chip) {
pr_err("%s: GPIO chip %s is missing IRQ function\n", __func__,
chip->label);
return;
}
for (i = 0; i < ARRAY_SIZE(irq_data); i++) {
gpiod = gpiochip_request_own_desc(chip, i, pin_name[i],
GPIO_ACTIVE_HIGH, GPIOD_IN);
if (IS_ERR(gpiod)) {
pr_err("%s: failed to get GPIO pin %d (%ld)\n",
__func__, i, PTR_ERR(gpiod));
return;
}
/* Store irq_data location for IRQ handler use */
irq_data[i] = irq_get_irq_data(gpiod_to_irq(gpiod));
/*
* FIQ handler takes full control over serio data and clk GPIO
* pins. Initialize them and keep requested so nobody can
* interfere. Fail if any of those two couldn't be requested.
*/
switch (i) {
case AMS_DELTA_GPIO_PIN_KEYBRD_DATA:
data = gpiod;
gpiod_direction_input(data);
break;
case AMS_DELTA_GPIO_PIN_KEYBRD_CLK:
clk = gpiod;
gpiod_direction_input(clk);
break;
default:
gpiochip_free_own_desc(gpiod);
break;
}
}
if (!data || !clk)
goto out_gpio;
fiqhandler_start = &qwerty_fiqin_start;
fiqhandler_length = &qwerty_fiqin_end - &qwerty_fiqin_start;
pr_info("Installing fiq handler from %p, length 0x%x\n",
fiqhandler_start, fiqhandler_length);
retval = claim_fiq(&fh);
if (retval) {
pr_err("ams_delta_init_fiq(): couldn't claim FIQ, ret=%d\n",
retval);
goto out_gpio;
}
retval = request_irq(INT_DEFERRED_FIQ, deferred_fiq,
IRQ_TYPE_EDGE_RISING, "deferred_fiq", NULL);
if (retval < 0) {
pr_err("Failed to get deferred_fiq IRQ, ret=%d\n", retval);
release_fiq(&fh);
goto out_gpio;
}
/*
* Since no set_type() method is provided by OMAP irq chip,
* switch to edge triggered interrupt type manually.
*/
offset = IRQ_ILR0_REG_OFFSET +
((INT_DEFERRED_FIQ - NR_IRQS_LEGACY) & 0x1f) * 0x4;
val = omap_readl(DEFERRED_FIQ_IH_BASE + offset) & ~(1 << 1);
omap_writel(val, DEFERRED_FIQ_IH_BASE + offset);
set_fiq_handler(fiqhandler_start, fiqhandler_length);
/*
* Initialise the buffer which is shared
* between FIQ mode and IRQ mode
*/
fiq_buffer[FIQ_GPIO_INT_MASK] = 0;
fiq_buffer[FIQ_MASK] = 0;
fiq_buffer[FIQ_STATE] = 0;
fiq_buffer[FIQ_KEY] = 0;
fiq_buffer[FIQ_KEYS_CNT] = 0;
fiq_buffer[FIQ_KEYS_HICNT] = 0;
fiq_buffer[FIQ_TAIL_OFFSET] = 0;
fiq_buffer[FIQ_HEAD_OFFSET] = 0;
fiq_buffer[FIQ_BUF_LEN] = 256;
fiq_buffer[FIQ_MISSED_KEYS] = 0;
fiq_buffer[FIQ_BUFFER_START] =
(unsigned int) &fiq_buffer[FIQ_CIRC_BUFF];
for (i = FIQ_CNT_INT_00; i <= FIQ_CNT_INT_15; i++)
fiq_buffer[i] = 0;
/*
* FIQ mode r9 always points to the fiq_buffer, because the FIQ isr
* will run in an unpredictable context. The fiq_buffer is the FIQ isr's
* only means of communication with the IRQ level and other kernel
* context code.
*/
FIQ_regs.ARM_r9 = (unsigned int)fiq_buffer;
set_fiq_regs(&FIQ_regs);
pr_info("request_fiq(): fiq_buffer = %p\n", fiq_buffer);
/*
* Redirect GPIO interrupts to FIQ
*/
offset = IRQ_ILR0_REG_OFFSET + (INT_GPIO_BANK1 - NR_IRQS_LEGACY) * 0x4;
val = omap_readl(OMAP_IH1_BASE + offset) | 1;
omap_writel(val, OMAP_IH1_BASE + offset);
/* Initialize serio device IRQ resource and platform_data */
serio->resource[0].start = gpiod_to_irq(clk);
serio->resource[0].end = serio->resource[0].start;
serio->dev.platform_data = fiq_buffer;
/*
* Since FIQ handler performs handling of GPIO registers for
* "keybrd_clk" IRQ pin, ams_delta_serio driver used to set
* handle_simple_irq() as active IRQ handler for that pin to avoid
* bad interaction with gpio-omap driver. This is no longer needed
* as handle_simple_irq() is now the default handler for OMAP GPIO
* edge interrupts.
* This comment replaces the obsolete code which has been removed
* from the ams_delta_serio driver and stands here only as a reminder
* of that dependency on gpio-omap driver behavior.
*/
return;
out_gpio:
if (data)
gpiochip_free_own_desc(data);
if (clk)
gpiochip_free_own_desc(clk);
}
| linux-master | arch/arm/mach-omap1/ams-delta-fiq.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* OMAP SRAM detection and management
*
* Copyright (C) 2005 Nokia Corporation
* Written by Tony Lindgren <[email protected]>
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/set_memory.h>
#include <asm/fncpy.h>
#include <asm/tlb.h>
#include <asm/cacheflush.h>
#include <asm/mach/map.h>
#include "soc.h"
#include "sram.h"
#define OMAP1_SRAM_PA 0x20000000
#define SRAM_BOOTLOADER_SZ 0x80
#define ROUND_DOWN(value, boundary) ((value) & (~((boundary) - 1)))
static void __iomem *omap_sram_base;
static unsigned long omap_sram_start;
static unsigned long omap_sram_skip;
static unsigned long omap_sram_size;
static void __iomem *omap_sram_ceil;
/*
* Memory allocator for SRAM: calculates the new ceiling address
* for pushing a function using the fncpy API.
*
* Note that fncpy requires the returned address to be aligned
* to an 8-byte boundary.
*/
static void *omap_sram_push_address(unsigned long size)
{
unsigned long available, new_ceil = (unsigned long)omap_sram_ceil;
available = omap_sram_ceil - (omap_sram_base + omap_sram_skip);
if (size > available) {
pr_err("Not enough space in SRAM\n");
return NULL;
}
new_ceil -= size;
new_ceil = ROUND_DOWN(new_ceil, FNCPY_ALIGN);
omap_sram_ceil = IOMEM(new_ceil);
return (void __force *)omap_sram_ceil;
}
void *omap_sram_push(void *funcp, unsigned long size)
{
void *sram;
unsigned long base;
int pages;
void *dst = NULL;
sram = omap_sram_push_address(size);
if (!sram)
return NULL;
base = (unsigned long)sram & PAGE_MASK;
pages = PAGE_ALIGN(size) / PAGE_SIZE;
set_memory_rw(base, pages);
dst = fncpy(sram, funcp, size);
set_memory_rox(base, pages);
return dst;
}
/*
* The amount of SRAM depends on the core type.
* Note that we cannot try to test for SRAM here because writes
* to secure SRAM will hang the system. Also the SRAM is not
* yet mapped at this point.
* Note that we cannot use ioremap for SRAM, as clock init needs SRAM early.
*/
static void __init omap_detect_and_map_sram(void)
{
unsigned long base;
int pages;
omap_sram_skip = SRAM_BOOTLOADER_SZ;
omap_sram_start = OMAP1_SRAM_PA;
if (cpu_is_omap15xx())
omap_sram_size = 0x30000; /* 192K */
else if (cpu_is_omap1610() || cpu_is_omap1611() ||
cpu_is_omap1621() || cpu_is_omap1710())
omap_sram_size = 0x4000; /* 16K */
else {
pr_err("Could not detect SRAM size\n");
omap_sram_size = 0x4000;
}
omap_sram_start = ROUND_DOWN(omap_sram_start, PAGE_SIZE);
omap_sram_base = __arm_ioremap_exec(omap_sram_start, omap_sram_size, 1);
if (!omap_sram_base) {
pr_err("SRAM: Could not map\n");
return;
}
omap_sram_ceil = omap_sram_base + omap_sram_size;
/*
* Looks like we need to preserve some bootloader code at the
* beginning of SRAM for jumping to flash for reboot to work...
*/
memset_io(omap_sram_base + omap_sram_skip, 0,
omap_sram_size - omap_sram_skip);
base = (unsigned long)omap_sram_base;
pages = PAGE_ALIGN(omap_sram_size) / PAGE_SIZE;
set_memory_rox(base, pages);
}
static void (*_omap_sram_reprogram_clock)(u32 dpllctl, u32 ckctl);
void omap_sram_reprogram_clock(u32 dpllctl, u32 ckctl)
{
BUG_ON(!_omap_sram_reprogram_clock);
_omap_sram_reprogram_clock(dpllctl, ckctl);
}
int __init omap1_sram_init(void)
{
omap_detect_and_map_sram();
_omap_sram_reprogram_clock =
omap_sram_push(omap1_sram_reprogram_clock,
omap1_sram_reprogram_clock_sz);
return 0;
}
| linux-master | arch/arm/mach-omap1/sram-init.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* OMAP15xx specific gpio init
*
* Copyright (C) 2010 Texas Instruments Incorporated - https://www.ti.com/
*
* Author:
* Charulatha V <[email protected]>
*/
#include <linux/platform_data/gpio-omap.h>
#include <linux/soc/ti/omap1-soc.h>
#include <asm/irq.h>
#include "irqs.h"
#define OMAP1_MPUIO_VBASE OMAP1_MPUIO_BASE
#define OMAP1510_GPIO_BASE 0xFFFCE000
/* gpio1 */
static struct resource omap15xx_mpu_gpio_resources[] = {
{
.start = OMAP1_MPUIO_VBASE,
.end = OMAP1_MPUIO_VBASE + SZ_2K - 1,
.flags = IORESOURCE_MEM,
},
{
.start = INT_MPUIO,
.flags = IORESOURCE_IRQ,
},
};
static struct omap_gpio_reg_offs omap15xx_mpuio_regs = {
.revision = USHRT_MAX,
.direction = OMAP_MPUIO_IO_CNTL,
.datain = OMAP_MPUIO_INPUT_LATCH,
.dataout = OMAP_MPUIO_OUTPUT,
.irqstatus = OMAP_MPUIO_GPIO_INT,
.irqenable = OMAP_MPUIO_GPIO_MASKIT,
.irqenable_inv = true,
.irqctrl = OMAP_MPUIO_GPIO_INT_EDGE,
};
static struct omap_gpio_platform_data omap15xx_mpu_gpio_config = {
.is_mpuio = true,
.bank_width = 16,
.bank_stride = 1,
.regs = &omap15xx_mpuio_regs,
};
static struct platform_device omap15xx_mpu_gpio = {
.name = "omap_gpio",
.id = 0,
.dev = {
.platform_data = &omap15xx_mpu_gpio_config,
},
.num_resources = ARRAY_SIZE(omap15xx_mpu_gpio_resources),
.resource = omap15xx_mpu_gpio_resources,
};
/* gpio2 */
static struct resource omap15xx_gpio_resources[] = {
{
.start = OMAP1510_GPIO_BASE,
.end = OMAP1510_GPIO_BASE + SZ_2K - 1,
.flags = IORESOURCE_MEM,
},
{
.start = INT_GPIO_BANK1,
.flags = IORESOURCE_IRQ,
},
};
static struct omap_gpio_reg_offs omap15xx_gpio_regs = {
.revision = USHRT_MAX,
.direction = OMAP1510_GPIO_DIR_CONTROL,
.datain = OMAP1510_GPIO_DATA_INPUT,
.dataout = OMAP1510_GPIO_DATA_OUTPUT,
.irqstatus = OMAP1510_GPIO_INT_STATUS,
.irqenable = OMAP1510_GPIO_INT_MASK,
.irqenable_inv = true,
.irqctrl = OMAP1510_GPIO_INT_CONTROL,
.pinctrl = OMAP1510_GPIO_PIN_CONTROL,
};
static struct omap_gpio_platform_data omap15xx_gpio_config = {
.bank_width = 16,
.regs = &omap15xx_gpio_regs,
};
static struct platform_device omap15xx_gpio = {
.name = "omap_gpio",
.id = 1,
.dev = {
.platform_data = &omap15xx_gpio_config,
},
.num_resources = ARRAY_SIZE(omap15xx_gpio_resources),
.resource = omap15xx_gpio_resources,
};
/*
* omap15xx_gpio_init needs to be done before
* machine_init functions access gpio APIs.
* Hence omap15xx_gpio_init is a postcore_initcall.
*/
static int __init omap15xx_gpio_init(void)
{
if (!cpu_is_omap15xx())
return -EINVAL;
platform_device_register(&omap15xx_mpu_gpio);
platform_device_register(&omap15xx_gpio);
return 0;
}
postcore_initcall(omap15xx_gpio_init);
| linux-master | arch/arm/mach-omap1/gpio15xx.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/arch/arm/plat-omap/dma.c
*
* Copyright (C) 2003 - 2008 Nokia Corporation
* Author: Juha Yrjölä <[email protected]>
* DMA channel linking for 1610 by Samuel Ortiz <[email protected]>
* Graphics DMA and LCD DMA graphics tranformations
* by Imre Deak <[email protected]>
* OMAP2/3 support Copyright (C) 2004-2007 Texas Instruments, Inc.
* Merged to support both OMAP1 and OMAP2 by Tony Lindgren <[email protected]>
* Some functions based on earlier dma-omap.c Copyright (C) 2001 RidgeRun, Inc.
*
* Copyright (C) 2009 Texas Instruments
* Added OMAP4 support - Santosh Shilimkar <[email protected]>
*
* Support functions for the OMAP internal DMA channels.
*
* Copyright (C) 2010 Texas Instruments Incorporated - https://www.ti.com/
* Converted DMA library into DMA platform driver.
* - G, Manjunath Kondaiah <[email protected]>
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/omap-dma.h>
#include <linux/soc/ti/omap1-io.h>
#include <linux/soc/ti/omap1-soc.h>
#include "tc.h"
/*
* MAX_LOGICAL_DMA_CH_COUNT: the maximum number of logical DMA
* channels that an instance of the SDMA IP block can support. Used
* to size arrays. (The actual maximum on a particular SoC may be less
* than this -- for example, OMAP1 SDMA instances only support 17 logical
* DMA channels.)
*/
#define MAX_LOGICAL_DMA_CH_COUNT 32
#undef DEBUG
#define OMAP_DMA_ACTIVE 0x01
#define OMAP_FUNC_MUX_ARM_BASE (0xfffe1000 + 0xec)
static struct omap_system_dma_plat_info *p;
static struct omap_dma_dev_attr *d;
static int enable_1510_mode;
static u32 errata;
struct dma_link_info {
int *linked_dmach_q;
int no_of_lchs_linked;
int q_count;
int q_tail;
int q_head;
int chain_state;
int chain_mode;
};
static int dma_lch_count;
static int dma_chan_count;
static int omap_dma_reserve_channels;
static DEFINE_SPINLOCK(dma_chan_lock);
static struct omap_dma_lch *dma_chan;
static inline void omap_disable_channel_irq(int lch)
{
/* disable channel interrupts */
p->dma_write(0, CICR, lch);
/* Clear CSR */
p->dma_read(CSR, lch);
}
static inline void set_gdma_dev(int req, int dev)
{
u32 reg = OMAP_FUNC_MUX_ARM_BASE + ((req - 1) / 5) * 4;
int shift = ((req - 1) % 5) * 6;
u32 l;
l = omap_readl(reg);
l &= ~(0x3f << shift);
l |= (dev - 1) << shift;
omap_writel(l, reg);
}
#if IS_ENABLED(CONFIG_FB_OMAP)
void omap_set_dma_priority(int lch, int dst_port, int priority)
{
unsigned long reg;
u32 l;
if (dma_omap1()) {
switch (dst_port) {
case OMAP_DMA_PORT_OCP_T1: /* FFFECC00 */
reg = OMAP_TC_OCPT1_PRIOR;
break;
case OMAP_DMA_PORT_OCP_T2: /* FFFECCD0 */
reg = OMAP_TC_OCPT2_PRIOR;
break;
case OMAP_DMA_PORT_EMIFF: /* FFFECC08 */
reg = OMAP_TC_EMIFF_PRIOR;
break;
case OMAP_DMA_PORT_EMIFS: /* FFFECC04 */
reg = OMAP_TC_EMIFS_PRIOR;
break;
default:
BUG();
return;
}
l = omap_readl(reg);
l &= ~(0xf << 8);
l |= (priority & 0xf) << 8;
omap_writel(l, reg);
}
}
EXPORT_SYMBOL(omap_set_dma_priority);
#endif
#if IS_ENABLED(CONFIG_USB_OMAP)
#ifdef CONFIG_ARCH_OMAP15XX
/* Returns 1 if the DMA module is in OMAP1510-compatible mode, 0 otherwise */
static int omap_dma_in_1510_mode(void)
{
return enable_1510_mode;
}
#else
#define omap_dma_in_1510_mode() 0
#endif
void omap_set_dma_transfer_params(int lch, int data_type, int elem_count,
int frame_count, int sync_mode,
int dma_trigger, int src_or_dst_synch)
{
u32 l;
u16 ccr;
l = p->dma_read(CSDP, lch);
l &= ~0x03;
l |= data_type;
p->dma_write(l, CSDP, lch);
ccr = p->dma_read(CCR, lch);
ccr &= ~(1 << 5);
if (sync_mode == OMAP_DMA_SYNC_FRAME)
ccr |= 1 << 5;
p->dma_write(ccr, CCR, lch);
ccr = p->dma_read(CCR2, lch);
ccr &= ~(1 << 2);
if (sync_mode == OMAP_DMA_SYNC_BLOCK)
ccr |= 1 << 2;
p->dma_write(ccr, CCR2, lch);
p->dma_write(elem_count, CEN, lch);
p->dma_write(frame_count, CFN, lch);
}
EXPORT_SYMBOL(omap_set_dma_transfer_params);
void omap_set_dma_channel_mode(int lch, enum omap_dma_channel_mode mode)
{
if (!dma_omap15xx()) {
u32 l;
l = p->dma_read(LCH_CTRL, lch);
l &= ~0x7;
l |= mode;
p->dma_write(l, LCH_CTRL, lch);
}
}
EXPORT_SYMBOL(omap_set_dma_channel_mode);
/* Note that src_port is only for omap1 */
void omap_set_dma_src_params(int lch, int src_port, int src_amode,
unsigned long src_start,
int src_ei, int src_fi)
{
u32 l;
u16 w;
w = p->dma_read(CSDP, lch);
w &= ~(0x1f << 2);
w |= src_port << 2;
p->dma_write(w, CSDP, lch);
l = p->dma_read(CCR, lch);
l &= ~(0x03 << 12);
l |= src_amode << 12;
p->dma_write(l, CCR, lch);
p->dma_write(src_start, CSSA, lch);
p->dma_write(src_ei, CSEI, lch);
p->dma_write(src_fi, CSFI, lch);
}
EXPORT_SYMBOL(omap_set_dma_src_params);
void omap_set_dma_src_data_pack(int lch, int enable)
{
u32 l;
l = p->dma_read(CSDP, lch);
l &= ~(1 << 6);
if (enable)
l |= (1 << 6);
p->dma_write(l, CSDP, lch);
}
EXPORT_SYMBOL(omap_set_dma_src_data_pack);
void omap_set_dma_src_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
{
unsigned int burst = 0;
u32 l;
l = p->dma_read(CSDP, lch);
l &= ~(0x03 << 7);
switch (burst_mode) {
case OMAP_DMA_DATA_BURST_DIS:
break;
case OMAP_DMA_DATA_BURST_4:
burst = 0x2;
break;
case OMAP_DMA_DATA_BURST_8:
/*
* not supported by current hardware on OMAP1
* w |= (0x03 << 7);
*/
fallthrough;
case OMAP_DMA_DATA_BURST_16:
/* OMAP1 don't support burst 16 */
fallthrough;
default:
BUG();
}
l |= (burst << 7);
p->dma_write(l, CSDP, lch);
}
EXPORT_SYMBOL(omap_set_dma_src_burst_mode);
/* Note that dest_port is only for OMAP1 */
void omap_set_dma_dest_params(int lch, int dest_port, int dest_amode,
unsigned long dest_start,
int dst_ei, int dst_fi)
{
u32 l;
l = p->dma_read(CSDP, lch);
l &= ~(0x1f << 9);
l |= dest_port << 9;
p->dma_write(l, CSDP, lch);
l = p->dma_read(CCR, lch);
l &= ~(0x03 << 14);
l |= dest_amode << 14;
p->dma_write(l, CCR, lch);
p->dma_write(dest_start, CDSA, lch);
p->dma_write(dst_ei, CDEI, lch);
p->dma_write(dst_fi, CDFI, lch);
}
EXPORT_SYMBOL(omap_set_dma_dest_params);
void omap_set_dma_dest_data_pack(int lch, int enable)
{
u32 l;
l = p->dma_read(CSDP, lch);
l &= ~(1 << 13);
if (enable)
l |= 1 << 13;
p->dma_write(l, CSDP, lch);
}
EXPORT_SYMBOL(omap_set_dma_dest_data_pack);
void omap_set_dma_dest_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
{
unsigned int burst = 0;
u32 l;
l = p->dma_read(CSDP, lch);
l &= ~(0x03 << 14);
switch (burst_mode) {
case OMAP_DMA_DATA_BURST_DIS:
break;
case OMAP_DMA_DATA_BURST_4:
burst = 0x2;
break;
case OMAP_DMA_DATA_BURST_8:
burst = 0x3;
break;
case OMAP_DMA_DATA_BURST_16:
/* OMAP1 don't support burst 16 */
fallthrough;
default:
printk(KERN_ERR "Invalid DMA burst mode\n");
BUG();
return;
}
l |= (burst << 14);
p->dma_write(l, CSDP, lch);
}
EXPORT_SYMBOL(omap_set_dma_dest_burst_mode);
static inline void omap_enable_channel_irq(int lch)
{
/* Clear CSR */
p->dma_read(CSR, lch);
/* Enable some nice interrupts. */
p->dma_write(dma_chan[lch].enabled_irqs, CICR, lch);
}
void omap_disable_dma_irq(int lch, u16 bits)
{
dma_chan[lch].enabled_irqs &= ~bits;
}
EXPORT_SYMBOL(omap_disable_dma_irq);
static inline void enable_lnk(int lch)
{
u32 l;
l = p->dma_read(CLNK_CTRL, lch);
l &= ~(1 << 14);
/* Set the ENABLE_LNK bits */
if (dma_chan[lch].next_lch != -1)
l = dma_chan[lch].next_lch | (1 << 15);
p->dma_write(l, CLNK_CTRL, lch);
}
static inline void disable_lnk(int lch)
{
u32 l;
l = p->dma_read(CLNK_CTRL, lch);
/* Disable interrupts */
omap_disable_channel_irq(lch);
/* Set the STOP_LNK bit */
l |= 1 << 14;
p->dma_write(l, CLNK_CTRL, lch);
dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE;
}
#endif
int omap_request_dma(int dev_id, const char *dev_name,
void (*callback)(int lch, u16 ch_status, void *data),
void *data, int *dma_ch_out)
{
int ch, free_ch = -1;
unsigned long flags;
struct omap_dma_lch *chan;
WARN(strcmp(dev_name, "DMA engine"), "Using deprecated platform DMA API - please update to DMA engine");
spin_lock_irqsave(&dma_chan_lock, flags);
for (ch = 0; ch < dma_chan_count; ch++) {
if (free_ch == -1 && dma_chan[ch].dev_id == -1) {
free_ch = ch;
/* Exit after first free channel found */
break;
}
}
if (free_ch == -1) {
spin_unlock_irqrestore(&dma_chan_lock, flags);
return -EBUSY;
}
chan = dma_chan + free_ch;
chan->dev_id = dev_id;
if (p->clear_lch_regs)
p->clear_lch_regs(free_ch);
spin_unlock_irqrestore(&dma_chan_lock, flags);
chan->dev_name = dev_name;
chan->callback = callback;
chan->data = data;
chan->flags = 0;
chan->enabled_irqs = OMAP_DMA_DROP_IRQ | OMAP_DMA_BLOCK_IRQ;
chan->enabled_irqs |= OMAP1_DMA_TOUT_IRQ;
if (dma_omap16xx()) {
/* If the sync device is set, configure it dynamically. */
if (dev_id != 0) {
set_gdma_dev(free_ch + 1, dev_id);
dev_id = free_ch + 1;
}
/*
* Disable the 1510 compatibility mode and set the sync device
* id.
*/
p->dma_write(dev_id | (1 << 10), CCR, free_ch);
} else {
p->dma_write(dev_id, CCR, free_ch);
}
*dma_ch_out = free_ch;
return 0;
}
EXPORT_SYMBOL(omap_request_dma);
void omap_free_dma(int lch)
{
unsigned long flags;
if (dma_chan[lch].dev_id == -1) {
pr_err("omap_dma: trying to free unallocated DMA channel %d\n",
lch);
return;
}
/* Disable all DMA interrupts for the channel. */
omap_disable_channel_irq(lch);
/* Make sure the DMA transfer is stopped. */
p->dma_write(0, CCR, lch);
spin_lock_irqsave(&dma_chan_lock, flags);
dma_chan[lch].dev_id = -1;
dma_chan[lch].next_lch = -1;
dma_chan[lch].callback = NULL;
spin_unlock_irqrestore(&dma_chan_lock, flags);
}
EXPORT_SYMBOL(omap_free_dma);
/*
* Clears any DMA state so the DMA engine is ready to restart with new buffers
* through omap_start_dma(). Any buffers in flight are discarded.
*/
static void omap_clear_dma(int lch)
{
unsigned long flags;
local_irq_save(flags);
p->clear_dma(lch);
local_irq_restore(flags);
}
#if IS_ENABLED(CONFIG_USB_OMAP)
void omap_start_dma(int lch)
{
u32 l;
/*
* The CPC/CDAC register needs to be initialized to zero
* before starting dma transfer.
*/
if (dma_omap15xx())
p->dma_write(0, CPC, lch);
else
p->dma_write(0, CDAC, lch);
if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
int next_lch, cur_lch;
char dma_chan_link_map[MAX_LOGICAL_DMA_CH_COUNT];
/* Set the link register of the first channel */
enable_lnk(lch);
memset(dma_chan_link_map, 0, sizeof(dma_chan_link_map));
dma_chan_link_map[lch] = 1;
cur_lch = dma_chan[lch].next_lch;
do {
next_lch = dma_chan[cur_lch].next_lch;
/* The loop case: we've been here already */
if (dma_chan_link_map[cur_lch])
break;
/* Mark the current channel */
dma_chan_link_map[cur_lch] = 1;
enable_lnk(cur_lch);
omap_enable_channel_irq(cur_lch);
cur_lch = next_lch;
} while (next_lch != -1);
} else if (IS_DMA_ERRATA(DMA_ERRATA_PARALLEL_CHANNELS))
p->dma_write(lch, CLNK_CTRL, lch);
omap_enable_channel_irq(lch);
l = p->dma_read(CCR, lch);
if (IS_DMA_ERRATA(DMA_ERRATA_IFRAME_BUFFERING))
l |= OMAP_DMA_CCR_BUFFERING_DISABLE;
l |= OMAP_DMA_CCR_EN;
/*
* As dma_write() uses IO accessors which are weakly ordered, there
* is no guarantee that data in coherent DMA memory will be visible
* to the DMA device. Add a memory barrier here to ensure that any
* such data is visible prior to enabling DMA.
*/
mb();
p->dma_write(l, CCR, lch);
dma_chan[lch].flags |= OMAP_DMA_ACTIVE;
}
EXPORT_SYMBOL(omap_start_dma);
void omap_stop_dma(int lch)
{
u32 l;
/* Disable all interrupts on the channel */
omap_disable_channel_irq(lch);
l = p->dma_read(CCR, lch);
if (IS_DMA_ERRATA(DMA_ERRATA_i541) &&
(l & OMAP_DMA_CCR_SEL_SRC_DST_SYNC)) {
int i = 0;
u32 sys_cf;
/* Configure No-Standby */
l = p->dma_read(OCP_SYSCONFIG, lch);
sys_cf = l;
l &= ~DMA_SYSCONFIG_MIDLEMODE_MASK;
l |= DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_NO_IDLE);
p->dma_write(l , OCP_SYSCONFIG, 0);
l = p->dma_read(CCR, lch);
l &= ~OMAP_DMA_CCR_EN;
p->dma_write(l, CCR, lch);
/* Wait for sDMA FIFO drain */
l = p->dma_read(CCR, lch);
while (i < 100 && (l & (OMAP_DMA_CCR_RD_ACTIVE |
OMAP_DMA_CCR_WR_ACTIVE))) {
udelay(5);
i++;
l = p->dma_read(CCR, lch);
}
if (i >= 100)
pr_err("DMA drain did not complete on lch %d\n", lch);
/* Restore OCP_SYSCONFIG */
p->dma_write(sys_cf, OCP_SYSCONFIG, lch);
} else {
l &= ~OMAP_DMA_CCR_EN;
p->dma_write(l, CCR, lch);
}
/*
* Ensure that data transferred by DMA is visible to any access
* after DMA has been disabled. This is important for coherent
* DMA regions.
*/
mb();
if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
int next_lch, cur_lch = lch;
char dma_chan_link_map[MAX_LOGICAL_DMA_CH_COUNT];
memset(dma_chan_link_map, 0, sizeof(dma_chan_link_map));
do {
/* The loop case: we've been here already */
if (dma_chan_link_map[cur_lch])
break;
/* Mark the current channel */
dma_chan_link_map[cur_lch] = 1;
disable_lnk(cur_lch);
next_lch = dma_chan[cur_lch].next_lch;
cur_lch = next_lch;
} while (next_lch != -1);
}
dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE;
}
EXPORT_SYMBOL(omap_stop_dma);
/*
* Allows changing the DMA callback function or data. This may be needed if
* the driver shares a single DMA channel for multiple dma triggers.
*/
/*
* Returns current physical source address for the given DMA channel.
* If the channel is running the caller must disable interrupts prior calling
* this function and process the returned value before re-enabling interrupt to
* prevent races with the interrupt handler. Note that in continuous mode there
* is a chance for CSSA_L register overflow between the two reads resulting
* in incorrect return value.
*/
dma_addr_t omap_get_dma_src_pos(int lch)
{
dma_addr_t offset = 0;
if (dma_omap15xx())
offset = p->dma_read(CPC, lch);
else
offset = p->dma_read(CSAC, lch);
if (IS_DMA_ERRATA(DMA_ERRATA_3_3) && offset == 0)
offset = p->dma_read(CSAC, lch);
if (!dma_omap15xx()) {
/*
* CDAC == 0 indicates that the DMA transfer on the channel has
* not been started (no data has been transferred so far).
* Return the programmed source start address in this case.
*/
if (likely(p->dma_read(CDAC, lch)))
offset = p->dma_read(CSAC, lch);
else
offset = p->dma_read(CSSA, lch);
}
offset |= (p->dma_read(CSSA, lch) & 0xFFFF0000);
return offset;
}
EXPORT_SYMBOL(omap_get_dma_src_pos);
/*
* Returns current physical destination address for the given DMA channel.
* If the channel is running the caller must disable interrupts prior calling
* this function and process the returned value before re-enabling interrupt to
* prevent races with the interrupt handler. Note that in continuous mode there
* is a chance for CDSA_L register overflow between the two reads resulting
* in incorrect return value.
*/
dma_addr_t omap_get_dma_dst_pos(int lch)
{
dma_addr_t offset = 0;
if (dma_omap15xx())
offset = p->dma_read(CPC, lch);
else
offset = p->dma_read(CDAC, lch);
/*
* omap 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is
* read before the DMA controller finished disabling the channel.
*/
if (!dma_omap15xx() && offset == 0) {
offset = p->dma_read(CDAC, lch);
/*
* CDAC == 0 indicates that the DMA transfer on the channel has
* not been started (no data has been transferred so far).
* Return the programmed destination start address in this case.
*/
if (unlikely(!offset))
offset = p->dma_read(CDSA, lch);
}
offset |= (p->dma_read(CDSA, lch) & 0xFFFF0000);
return offset;
}
EXPORT_SYMBOL(omap_get_dma_dst_pos);
int omap_get_dma_active_status(int lch)
{
return (p->dma_read(CCR, lch) & OMAP_DMA_CCR_EN) != 0;
}
EXPORT_SYMBOL(omap_get_dma_active_status);
#endif
int omap_dma_running(void)
{
int lch;
if (omap_lcd_dma_running())
return 1;
for (lch = 0; lch < dma_chan_count; lch++)
if (p->dma_read(CCR, lch) & OMAP_DMA_CCR_EN)
return 1;
return 0;
}
/*----------------------------------------------------------------------------*/
static int omap1_dma_handle_ch(int ch)
{
u32 csr;
if (enable_1510_mode && ch >= 6) {
csr = dma_chan[ch].saved_csr;
dma_chan[ch].saved_csr = 0;
} else
csr = p->dma_read(CSR, ch);
if (enable_1510_mode && ch <= 2 && (csr >> 7) != 0) {
dma_chan[ch + 6].saved_csr = csr >> 7;
csr &= 0x7f;
}
if ((csr & 0x3f) == 0)
return 0;
if (unlikely(dma_chan[ch].dev_id == -1)) {
pr_warn("Spurious interrupt from DMA channel %d (CSR %04x)\n",
ch, csr);
return 0;
}
if (unlikely(csr & OMAP1_DMA_TOUT_IRQ))
pr_warn("DMA timeout with device %d\n", dma_chan[ch].dev_id);
if (unlikely(csr & OMAP_DMA_DROP_IRQ))
pr_warn("DMA synchronization event drop occurred with device %d\n",
dma_chan[ch].dev_id);
if (likely(csr & OMAP_DMA_BLOCK_IRQ))
dma_chan[ch].flags &= ~OMAP_DMA_ACTIVE;
if (likely(dma_chan[ch].callback != NULL))
dma_chan[ch].callback(ch, csr, dma_chan[ch].data);
return 1;
}
static irqreturn_t omap1_dma_irq_handler(int irq, void *dev_id)
{
int ch = ((int) dev_id) - 1;
int handled = 0;
for (;;) {
int handled_now = 0;
handled_now += omap1_dma_handle_ch(ch);
if (enable_1510_mode && dma_chan[ch + 6].saved_csr)
handled_now += omap1_dma_handle_ch(ch + 6);
if (!handled_now)
break;
handled += handled_now;
}
return handled ? IRQ_HANDLED : IRQ_NONE;
}
struct omap_system_dma_plat_info *omap_get_plat_info(void)
{
return p;
}
EXPORT_SYMBOL_GPL(omap_get_plat_info);
static int omap_system_dma_probe(struct platform_device *pdev)
{
int ch, ret = 0;
int dma_irq;
char irq_name[4];
p = pdev->dev.platform_data;
if (!p) {
dev_err(&pdev->dev,
"%s: System DMA initialized without platform data\n",
__func__);
return -EINVAL;
}
d = p->dma_attr;
errata = p->errata;
if ((d->dev_caps & RESERVE_CHANNEL) && omap_dma_reserve_channels
&& (omap_dma_reserve_channels < d->lch_count))
d->lch_count = omap_dma_reserve_channels;
dma_lch_count = d->lch_count;
dma_chan_count = dma_lch_count;
enable_1510_mode = d->dev_caps & ENABLE_1510_MODE;
dma_chan = devm_kcalloc(&pdev->dev, dma_lch_count,
sizeof(*dma_chan), GFP_KERNEL);
if (!dma_chan)
return -ENOMEM;
for (ch = 0; ch < dma_chan_count; ch++) {
omap_clear_dma(ch);
dma_chan[ch].dev_id = -1;
dma_chan[ch].next_lch = -1;
if (ch >= 6 && enable_1510_mode)
continue;
/*
* request_irq() doesn't like dev_id (ie. ch) being
* zero, so we have to kludge around this.
*/
sprintf(&irq_name[0], "%d", ch);
dma_irq = platform_get_irq_byname(pdev, irq_name);
if (dma_irq < 0) {
ret = dma_irq;
goto exit_dma_irq_fail;
}
/* INT_DMA_LCD is handled in lcd_dma.c */
if (dma_irq == INT_DMA_LCD)
continue;
ret = request_irq(dma_irq,
omap1_dma_irq_handler, 0, "DMA",
(void *) (ch + 1));
if (ret != 0)
goto exit_dma_irq_fail;
}
/* reserve dma channels 0 and 1 in high security devices on 34xx */
if (d->dev_caps & HS_CHANNELS_RESERVED) {
pr_info("Reserving DMA channels 0 and 1 for HS ROM code\n");
dma_chan[0].dev_id = 0;
dma_chan[1].dev_id = 1;
}
p->show_dma_caps();
return 0;
exit_dma_irq_fail:
return ret;
}
static void omap_system_dma_remove(struct platform_device *pdev)
{
int dma_irq, irq_rel = 0;
for ( ; irq_rel < dma_chan_count; irq_rel++) {
dma_irq = platform_get_irq(pdev, irq_rel);
free_irq(dma_irq, (void *)(irq_rel + 1));
}
}
static struct platform_driver omap_system_dma_driver = {
.probe = omap_system_dma_probe,
.remove_new = omap_system_dma_remove,
.driver = {
.name = "omap_dma_system"
},
};
static int __init omap_system_dma_init(void)
{
return platform_driver_register(&omap_system_dma_driver);
}
arch_initcall(omap_system_dma_init);
static void __exit omap_system_dma_exit(void)
{
platform_driver_unregister(&omap_system_dma_driver);
}
MODULE_DESCRIPTION("OMAP SYSTEM DMA DRIVER");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Texas Instruments Inc");
/*
* Reserve the omap SDMA channels using cmdline bootarg
* "omap_dma_reserve_ch=". The valid range is 1 to 32
*/
static int __init omap_dma_cmdline_reserve_ch(char *str)
{
if (get_option(&str, &omap_dma_reserve_channels) != 1)
omap_dma_reserve_channels = 0;
return 1;
}
__setup("omap_dma_reserve_ch=", omap_dma_cmdline_reserve_ch);
| linux-master | arch/arm/mach-omap1/omap-dma.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* OMAP16xx specific gpio init
*
* Copyright (C) 2010 Texas Instruments Incorporated - https://www.ti.com/
*
* Author:
* Charulatha V <[email protected]>
*/
#include <linux/platform_data/gpio-omap.h>
#include <linux/soc/ti/omap1-io.h>
#include "hardware.h"
#include "irqs.h"
#include "soc.h"
#define OMAP1610_GPIO1_BASE 0xfffbe400
#define OMAP1610_GPIO2_BASE 0xfffbec00
#define OMAP1610_GPIO3_BASE 0xfffbb400
#define OMAP1610_GPIO4_BASE 0xfffbbc00
#define OMAP1_MPUIO_VBASE OMAP1_MPUIO_BASE
/* smart idle, enable wakeup */
#define SYSCONFIG_WORD 0x14
/* mpu gpio */
static struct resource omap16xx_mpu_gpio_resources[] = {
{
.start = OMAP1_MPUIO_VBASE,
.end = OMAP1_MPUIO_VBASE + SZ_2K - 1,
.flags = IORESOURCE_MEM,
},
{
.start = INT_MPUIO,
.flags = IORESOURCE_IRQ,
},
};
static struct omap_gpio_reg_offs omap16xx_mpuio_regs = {
.revision = USHRT_MAX,
.direction = OMAP_MPUIO_IO_CNTL,
.datain = OMAP_MPUIO_INPUT_LATCH,
.dataout = OMAP_MPUIO_OUTPUT,
.irqstatus = OMAP_MPUIO_GPIO_INT,
.irqenable = OMAP_MPUIO_GPIO_MASKIT,
.irqenable_inv = true,
.irqctrl = OMAP_MPUIO_GPIO_INT_EDGE,
};
static struct omap_gpio_platform_data omap16xx_mpu_gpio_config = {
.is_mpuio = true,
.bank_width = 16,
.bank_stride = 1,
.regs = &omap16xx_mpuio_regs,
};
static struct platform_device omap16xx_mpu_gpio = {
.name = "omap_gpio",
.id = 0,
.dev = {
.platform_data = &omap16xx_mpu_gpio_config,
},
.num_resources = ARRAY_SIZE(omap16xx_mpu_gpio_resources),
.resource = omap16xx_mpu_gpio_resources,
};
/* gpio1 */
static struct resource omap16xx_gpio1_resources[] = {
{
.start = OMAP1610_GPIO1_BASE,
.end = OMAP1610_GPIO1_BASE + SZ_2K - 1,
.flags = IORESOURCE_MEM,
},
{
.start = INT_GPIO_BANK1,
.flags = IORESOURCE_IRQ,
},
};
static struct omap_gpio_reg_offs omap16xx_gpio_regs = {
.revision = OMAP1610_GPIO_REVISION,
.direction = OMAP1610_GPIO_DIRECTION,
.set_dataout = OMAP1610_GPIO_SET_DATAOUT,
.clr_dataout = OMAP1610_GPIO_CLEAR_DATAOUT,
.datain = OMAP1610_GPIO_DATAIN,
.dataout = OMAP1610_GPIO_DATAOUT,
.irqstatus = OMAP1610_GPIO_IRQSTATUS1,
.irqenable = OMAP1610_GPIO_IRQENABLE1,
.set_irqenable = OMAP1610_GPIO_SET_IRQENABLE1,
.clr_irqenable = OMAP1610_GPIO_CLEAR_IRQENABLE1,
.wkup_en = OMAP1610_GPIO_WAKEUPENABLE,
.edgectrl1 = OMAP1610_GPIO_EDGE_CTRL1,
.edgectrl2 = OMAP1610_GPIO_EDGE_CTRL2,
};
static struct omap_gpio_platform_data omap16xx_gpio1_config = {
.bank_width = 16,
.regs = &omap16xx_gpio_regs,
};
static struct platform_device omap16xx_gpio1 = {
.name = "omap_gpio",
.id = 1,
.dev = {
.platform_data = &omap16xx_gpio1_config,
},
.num_resources = ARRAY_SIZE(omap16xx_gpio1_resources),
.resource = omap16xx_gpio1_resources,
};
/* gpio2 */
static struct resource omap16xx_gpio2_resources[] = {
{
.start = OMAP1610_GPIO2_BASE,
.end = OMAP1610_GPIO2_BASE + SZ_2K - 1,
.flags = IORESOURCE_MEM,
},
{
.start = INT_1610_GPIO_BANK2,
.flags = IORESOURCE_IRQ,
},
};
static struct omap_gpio_platform_data omap16xx_gpio2_config = {
.bank_width = 16,
.regs = &omap16xx_gpio_regs,
};
static struct platform_device omap16xx_gpio2 = {
.name = "omap_gpio",
.id = 2,
.dev = {
.platform_data = &omap16xx_gpio2_config,
},
.num_resources = ARRAY_SIZE(omap16xx_gpio2_resources),
.resource = omap16xx_gpio2_resources,
};
/* gpio3 */
static struct resource omap16xx_gpio3_resources[] = {
{
.start = OMAP1610_GPIO3_BASE,
.end = OMAP1610_GPIO3_BASE + SZ_2K - 1,
.flags = IORESOURCE_MEM,
},
{
.start = INT_1610_GPIO_BANK3,
.flags = IORESOURCE_IRQ,
},
};
static struct omap_gpio_platform_data omap16xx_gpio3_config = {
.bank_width = 16,
.regs = &omap16xx_gpio_regs,
};
static struct platform_device omap16xx_gpio3 = {
.name = "omap_gpio",
.id = 3,
.dev = {
.platform_data = &omap16xx_gpio3_config,
},
.num_resources = ARRAY_SIZE(omap16xx_gpio3_resources),
.resource = omap16xx_gpio3_resources,
};
/* gpio4 */
static struct resource omap16xx_gpio4_resources[] = {
{
.start = OMAP1610_GPIO4_BASE,
.end = OMAP1610_GPIO4_BASE + SZ_2K - 1,
.flags = IORESOURCE_MEM,
},
{
.start = INT_1610_GPIO_BANK4,
.flags = IORESOURCE_IRQ,
},
};
static struct omap_gpio_platform_data omap16xx_gpio4_config = {
.bank_width = 16,
.regs = &omap16xx_gpio_regs,
};
static struct platform_device omap16xx_gpio4 = {
.name = "omap_gpio",
.id = 4,
.dev = {
.platform_data = &omap16xx_gpio4_config,
},
.num_resources = ARRAY_SIZE(omap16xx_gpio4_resources),
.resource = omap16xx_gpio4_resources,
};
static struct platform_device *omap16xx_gpio_dev[] __initdata = {
&omap16xx_mpu_gpio,
&omap16xx_gpio1,
&omap16xx_gpio2,
&omap16xx_gpio3,
&omap16xx_gpio4,
};
/*
* omap16xx_gpio_init needs to be done before
* machine_init functions access gpio APIs.
* Hence omap16xx_gpio_init is a postcore_initcall.
*/
static int __init omap16xx_gpio_init(void)
{
int i;
void __iomem *base;
struct resource *res;
struct platform_device *pdev;
struct omap_gpio_platform_data *pdata;
if (!cpu_is_omap16xx())
return -EINVAL;
/*
* Enable system clock for GPIO module.
* The CAM_CLK_CTRL *is* really the right place.
*/
omap_writel(omap_readl(ULPD_CAM_CLK_CTRL) | 0x04,
ULPD_CAM_CLK_CTRL);
for (i = 0; i < ARRAY_SIZE(omap16xx_gpio_dev); i++) {
pdev = omap16xx_gpio_dev[i];
pdata = pdev->dev.platform_data;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (unlikely(!res)) {
dev_err(&pdev->dev, "Invalid mem resource.\n");
return -ENODEV;
}
base = ioremap(res->start, resource_size(res));
if (unlikely(!base)) {
dev_err(&pdev->dev, "ioremap failed.\n");
return -ENOMEM;
}
__raw_writel(SYSCONFIG_WORD, base + OMAP1610_GPIO_SYSCONFIG);
iounmap(base);
platform_device_register(omap16xx_gpio_dev[i]);
}
return 0;
}
postcore_initcall(omap16xx_gpio_init);
| linux-master | arch/arm/mach-omap1/gpio16xx.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Helper module for board specific I2C bus registration
*
* Copyright (C) 2009 Nokia Corporation.
*/
#include <linux/i2c.h>
#include <linux/platform_data/i2c-omap.h>
#include "mux.h"
#include "soc.h"
#include "i2c.h"
#define OMAP_I2C_SIZE 0x3f
#define OMAP1_I2C_BASE 0xfffb3800
static const char name[] = "omap_i2c";
static struct resource i2c_resources[2] = {
};
static struct platform_device omap_i2c_devices[1] = {
};
static void __init omap1_i2c_mux_pins(int bus_id)
{
omap_cfg_reg(I2C_SDA);
omap_cfg_reg(I2C_SCL);
}
int __init omap_i2c_add_bus(struct omap_i2c_bus_platform_data *pdata,
int bus_id)
{
struct platform_device *pdev;
struct resource *res;
if (bus_id > 1)
return -EINVAL;
omap1_i2c_mux_pins(bus_id);
pdev = &omap_i2c_devices[bus_id - 1];
pdev->id = bus_id;
pdev->name = name;
pdev->num_resources = ARRAY_SIZE(i2c_resources);
res = i2c_resources;
res[0].start = OMAP1_I2C_BASE;
res[0].end = res[0].start + OMAP_I2C_SIZE;
res[0].flags = IORESOURCE_MEM;
res[1].start = INT_I2C;
res[1].flags = IORESOURCE_IRQ;
pdev->resource = res;
/* all OMAP1 have IP version 1 register set */
pdata->rev = OMAP_I2C_IP_VERSION_1;
/* all OMAP1 I2C are implemented like this */
pdata->flags = OMAP_I2C_FLAG_NO_FIFO |
OMAP_I2C_FLAG_SIMPLE_CLOCK |
OMAP_I2C_FLAG_16BIT_DATA_REG |
OMAP_I2C_FLAG_ALWAYS_ARMXOR_CLK;
/* how the cpu bus is wired up differs for 7xx only */
pdata->flags |= OMAP_I2C_FLAG_BUS_SHIFT_2;
pdev->dev.platform_data = pdata;
return platform_device_register(pdev);
}
#define OMAP_I2C_MAX_CONTROLLERS 4
static struct omap_i2c_bus_platform_data i2c_pdata[OMAP_I2C_MAX_CONTROLLERS];
#define OMAP_I2C_CMDLINE_SETUP (BIT(31))
/**
* omap_i2c_bus_setup - Process command line options for the I2C bus speed
* @str: String of options
*
* This function allow to override the default I2C bus speed for given I2C
* bus with a command line option.
*
* Format: i2c_bus=bus_id,clkrate (in kHz)
*
* Returns 1 on success, 0 otherwise.
*/
static int __init omap_i2c_bus_setup(char *str)
{
int ints[3];
get_options(str, 3, ints);
if (ints[0] < 2 || ints[1] < 1 ||
ints[1] > OMAP_I2C_MAX_CONTROLLERS)
return 0;
i2c_pdata[ints[1] - 1].clkrate = ints[2];
i2c_pdata[ints[1] - 1].clkrate |= OMAP_I2C_CMDLINE_SETUP;
return 1;
}
__setup("i2c_bus=", omap_i2c_bus_setup);
/*
* Register busses defined in command line but that are not registered with
* omap_register_i2c_bus from board initialization code.
*/
int __init omap_register_i2c_bus_cmdline(void)
{
int i, err = 0;
for (i = 0; i < ARRAY_SIZE(i2c_pdata); i++)
if (i2c_pdata[i].clkrate & OMAP_I2C_CMDLINE_SETUP) {
i2c_pdata[i].clkrate &= ~OMAP_I2C_CMDLINE_SETUP;
err = omap_i2c_add_bus(&i2c_pdata[i], i + 1);
if (err)
goto out;
}
out:
return err;
}
/**
* omap_register_i2c_bus - register I2C bus with device descriptors
* @bus_id: bus id counting from number 1
* @clkrate: clock rate of the bus in kHz
* @info: pointer into I2C device descriptor table or NULL
* @len: number of descriptors in the table
*
* Returns 0 on success or an error code.
*/
int __init omap_register_i2c_bus(int bus_id, u32 clkrate,
struct i2c_board_info const *info,
unsigned len)
{
int err;
BUG_ON(bus_id < 1 || bus_id > OMAP_I2C_MAX_CONTROLLERS);
if (info) {
err = i2c_register_board_info(bus_id, info, len);
if (err)
return err;
}
if (!i2c_pdata[bus_id - 1].clkrate)
i2c_pdata[bus_id - 1].clkrate = clkrate;
i2c_pdata[bus_id - 1].clkrate &= ~OMAP_I2C_CMDLINE_SETUP;
return omap_i2c_add_bus(&i2c_pdata[bus_id - 1], bus_id);
}
static int __init omap_i2c_cmdline(void)
{
return omap_register_i2c_bus_cmdline();
}
subsys_initcall(omap_i2c_cmdline);
| linux-master | arch/arm/mach-omap1/i2c.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/arch/arm/mach-omap1/clock.c
*
* Copyright (C) 2004 - 2005, 2009-2010 Nokia Corporation
* Written by Tuukka Tikkanen <[email protected]>
*
* Modified to use omap shared clock framework by
* Tony Lindgren <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/list.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/clk-provider.h>
#include <linux/soc/ti/omap1-io.h>
#include <linux/spinlock.h>
#include <asm/mach-types.h>
#include "hardware.h"
#include "soc.h"
#include "iomap.h"
#include "clock.h"
#include "opp.h"
#include "sram.h"
__u32 arm_idlect1_mask;
/* provide direct internal access (not via clk API) to some clocks */
struct omap1_clk *api_ck_p, *ck_dpll1_p, *ck_ref_p;
/* protect registeres shared among clk_enable/disable() and clk_set_rate() operations */
static DEFINE_SPINLOCK(arm_ckctl_lock);
static DEFINE_SPINLOCK(arm_idlect2_lock);
static DEFINE_SPINLOCK(mod_conf_ctrl_0_lock);
static DEFINE_SPINLOCK(mod_conf_ctrl_1_lock);
static DEFINE_SPINLOCK(swd_clk_div_ctrl_sel_lock);
/*
* Omap1 specific clock functions
*/
unsigned long omap1_uart_recalc(struct omap1_clk *clk, unsigned long p_rate)
{
unsigned int val = __raw_readl(clk->enable_reg);
return val & 1 << clk->enable_bit ? 48000000 : 12000000;
}
unsigned long omap1_sossi_recalc(struct omap1_clk *clk, unsigned long p_rate)
{
u32 div = omap_readl(MOD_CONF_CTRL_1);
div = (div >> 17) & 0x7;
div++;
return p_rate / div;
}
static void omap1_clk_allow_idle(struct omap1_clk *clk)
{
struct arm_idlect1_clk * iclk = (struct arm_idlect1_clk *)clk;
if (!(clk->flags & CLOCK_IDLE_CONTROL))
return;
if (iclk->no_idle_count > 0 && !(--iclk->no_idle_count))
arm_idlect1_mask |= 1 << iclk->idlect_shift;
}
static void omap1_clk_deny_idle(struct omap1_clk *clk)
{
struct arm_idlect1_clk * iclk = (struct arm_idlect1_clk *)clk;
if (!(clk->flags & CLOCK_IDLE_CONTROL))
return;
if (iclk->no_idle_count++ == 0)
arm_idlect1_mask &= ~(1 << iclk->idlect_shift);
}
static __u16 verify_ckctl_value(__u16 newval)
{
/* This function checks for following limitations set
* by the hardware (all conditions must be true):
* DSPMMU_CK == DSP_CK or DSPMMU_CK == DSP_CK/2
* ARM_CK >= TC_CK
* DSP_CK >= TC_CK
* DSPMMU_CK >= TC_CK
*
* In addition following rules are enforced:
* LCD_CK <= TC_CK
* ARMPER_CK <= TC_CK
*
* However, maximum frequencies are not checked for!
*/
__u8 per_exp;
__u8 lcd_exp;
__u8 arm_exp;
__u8 dsp_exp;
__u8 tc_exp;
__u8 dspmmu_exp;
per_exp = (newval >> CKCTL_PERDIV_OFFSET) & 3;
lcd_exp = (newval >> CKCTL_LCDDIV_OFFSET) & 3;
arm_exp = (newval >> CKCTL_ARMDIV_OFFSET) & 3;
dsp_exp = (newval >> CKCTL_DSPDIV_OFFSET) & 3;
tc_exp = (newval >> CKCTL_TCDIV_OFFSET) & 3;
dspmmu_exp = (newval >> CKCTL_DSPMMUDIV_OFFSET) & 3;
if (dspmmu_exp < dsp_exp)
dspmmu_exp = dsp_exp;
if (dspmmu_exp > dsp_exp+1)
dspmmu_exp = dsp_exp+1;
if (tc_exp < arm_exp)
tc_exp = arm_exp;
if (tc_exp < dspmmu_exp)
tc_exp = dspmmu_exp;
if (tc_exp > lcd_exp)
lcd_exp = tc_exp;
if (tc_exp > per_exp)
per_exp = tc_exp;
newval &= 0xf000;
newval |= per_exp << CKCTL_PERDIV_OFFSET;
newval |= lcd_exp << CKCTL_LCDDIV_OFFSET;
newval |= arm_exp << CKCTL_ARMDIV_OFFSET;
newval |= dsp_exp << CKCTL_DSPDIV_OFFSET;
newval |= tc_exp << CKCTL_TCDIV_OFFSET;
newval |= dspmmu_exp << CKCTL_DSPMMUDIV_OFFSET;
return newval;
}
static int calc_dsor_exp(unsigned long rate, unsigned long realrate)
{
/* Note: If target frequency is too low, this function will return 4,
* which is invalid value. Caller must check for this value and act
* accordingly.
*
* Note: This function does not check for following limitations set
* by the hardware (all conditions must be true):
* DSPMMU_CK == DSP_CK or DSPMMU_CK == DSP_CK/2
* ARM_CK >= TC_CK
* DSP_CK >= TC_CK
* DSPMMU_CK >= TC_CK
*/
unsigned dsor_exp;
if (unlikely(realrate == 0))
return -EIO;
for (dsor_exp=0; dsor_exp<4; dsor_exp++) {
if (realrate <= rate)
break;
realrate /= 2;
}
return dsor_exp;
}
unsigned long omap1_ckctl_recalc(struct omap1_clk *clk, unsigned long p_rate)
{
/* Calculate divisor encoded as 2-bit exponent */
int dsor = 1 << (3 & (omap_readw(ARM_CKCTL) >> clk->rate_offset));
/* update locally maintained rate, required by arm_ck for omap1_show_rates() */
clk->rate = p_rate / dsor;
return clk->rate;
}
static int omap1_clk_is_enabled(struct clk_hw *hw)
{
struct omap1_clk *clk = to_omap1_clk(hw);
bool api_ck_was_enabled = true;
__u32 regval32;
int ret;
if (!clk->ops) /* no gate -- always enabled */
return 1;
if (clk->ops == &clkops_dspck) {
api_ck_was_enabled = omap1_clk_is_enabled(&api_ck_p->hw);
if (!api_ck_was_enabled)
if (api_ck_p->ops->enable(api_ck_p) < 0)
return 0;
}
if (clk->flags & ENABLE_REG_32BIT)
regval32 = __raw_readl(clk->enable_reg);
else
regval32 = __raw_readw(clk->enable_reg);
ret = regval32 & (1 << clk->enable_bit);
if (!api_ck_was_enabled)
api_ck_p->ops->disable(api_ck_p);
return ret;
}
unsigned long omap1_ckctl_recalc_dsp_domain(struct omap1_clk *clk, unsigned long p_rate)
{
bool api_ck_was_enabled;
int dsor;
/* Calculate divisor encoded as 2-bit exponent
*
* The clock control bits are in DSP domain,
* so api_ck is needed for access.
* Note that DSP_CKCTL virt addr = phys addr, so
* we must use __raw_readw() instead of omap_readw().
*/
api_ck_was_enabled = omap1_clk_is_enabled(&api_ck_p->hw);
if (!api_ck_was_enabled)
api_ck_p->ops->enable(api_ck_p);
dsor = 1 << (3 & (__raw_readw(DSP_CKCTL) >> clk->rate_offset));
if (!api_ck_was_enabled)
api_ck_p->ops->disable(api_ck_p);
return p_rate / dsor;
}
/* MPU virtual clock functions */
int omap1_select_table_rate(struct omap1_clk *clk, unsigned long rate, unsigned long p_rate)
{
/* Find the highest supported frequency <= rate and switch to it */
struct mpu_rate * ptr;
unsigned long ref_rate;
ref_rate = ck_ref_p->rate;
for (ptr = omap1_rate_table; ptr->rate; ptr++) {
if (!(ptr->flags & cpu_mask))
continue;
if (ptr->xtal != ref_rate)
continue;
/* Can check only after xtal frequency check */
if (ptr->rate <= rate)
break;
}
if (!ptr->rate)
return -EINVAL;
/*
* In most cases we should not need to reprogram DPLL.
* Reprogramming the DPLL is tricky, it must be done from SRAM.
*/
omap_sram_reprogram_clock(ptr->dpllctl_val, ptr->ckctl_val);
/* XXX Do we need to recalculate the tree below DPLL1 at this point? */
ck_dpll1_p->rate = ptr->pll_rate;
return 0;
}
int omap1_clk_set_rate_dsp_domain(struct omap1_clk *clk, unsigned long rate, unsigned long p_rate)
{
int dsor_exp;
u16 regval;
dsor_exp = calc_dsor_exp(rate, p_rate);
if (dsor_exp > 3)
dsor_exp = -EINVAL;
if (dsor_exp < 0)
return dsor_exp;
regval = __raw_readw(DSP_CKCTL);
regval &= ~(3 << clk->rate_offset);
regval |= dsor_exp << clk->rate_offset;
__raw_writew(regval, DSP_CKCTL);
clk->rate = p_rate / (1 << dsor_exp);
return 0;
}
long omap1_clk_round_rate_ckctl_arm(struct omap1_clk *clk, unsigned long rate,
unsigned long *p_rate)
{
int dsor_exp = calc_dsor_exp(rate, *p_rate);
if (dsor_exp < 0)
return dsor_exp;
if (dsor_exp > 3)
dsor_exp = 3;
return *p_rate / (1 << dsor_exp);
}
int omap1_clk_set_rate_ckctl_arm(struct omap1_clk *clk, unsigned long rate, unsigned long p_rate)
{
unsigned long flags;
int dsor_exp;
u16 regval;
dsor_exp = calc_dsor_exp(rate, p_rate);
if (dsor_exp > 3)
dsor_exp = -EINVAL;
if (dsor_exp < 0)
return dsor_exp;
/* protect ARM_CKCTL register from concurrent access via clk_enable/disable() */
spin_lock_irqsave(&arm_ckctl_lock, flags);
regval = omap_readw(ARM_CKCTL);
regval &= ~(3 << clk->rate_offset);
regval |= dsor_exp << clk->rate_offset;
regval = verify_ckctl_value(regval);
omap_writew(regval, ARM_CKCTL);
clk->rate = p_rate / (1 << dsor_exp);
spin_unlock_irqrestore(&arm_ckctl_lock, flags);
return 0;
}
long omap1_round_to_table_rate(struct omap1_clk *clk, unsigned long rate, unsigned long *p_rate)
{
/* Find the highest supported frequency <= rate */
struct mpu_rate * ptr;
long highest_rate;
unsigned long ref_rate;
ref_rate = ck_ref_p->rate;
highest_rate = -EINVAL;
for (ptr = omap1_rate_table; ptr->rate; ptr++) {
if (!(ptr->flags & cpu_mask))
continue;
if (ptr->xtal != ref_rate)
continue;
highest_rate = ptr->rate;
/* Can check only after xtal frequency check */
if (ptr->rate <= rate)
break;
}
return highest_rate;
}
static unsigned calc_ext_dsor(unsigned long rate)
{
unsigned dsor;
/* MCLK and BCLK divisor selection is not linear:
* freq = 96MHz / dsor
*
* RATIO_SEL range: dsor <-> RATIO_SEL
* 0..6: (RATIO_SEL+2) <-> (dsor-2)
* 6..48: (8+(RATIO_SEL-6)*2) <-> ((dsor-8)/2+6)
* Minimum dsor is 2 and maximum is 96. Odd divisors starting from 9
* can not be used.
*/
for (dsor = 2; dsor < 96; ++dsor) {
if ((dsor & 1) && dsor > 8)
continue;
if (rate >= 96000000 / dsor)
break;
}
return dsor;
}
/* XXX Only needed on 1510 */
long omap1_round_uart_rate(struct omap1_clk *clk, unsigned long rate, unsigned long *p_rate)
{
return rate > 24000000 ? 48000000 : 12000000;
}
int omap1_set_uart_rate(struct omap1_clk *clk, unsigned long rate, unsigned long p_rate)
{
unsigned long flags;
unsigned int val;
if (rate == 12000000)
val = 0;
else if (rate == 48000000)
val = 1 << clk->enable_bit;
else
return -EINVAL;
/* protect MOD_CONF_CTRL_0 register from concurrent access via clk_enable/disable() */
spin_lock_irqsave(&mod_conf_ctrl_0_lock, flags);
val |= __raw_readl(clk->enable_reg) & ~(1 << clk->enable_bit);
__raw_writel(val, clk->enable_reg);
spin_unlock_irqrestore(&mod_conf_ctrl_0_lock, flags);
clk->rate = rate;
return 0;
}
/* External clock (MCLK & BCLK) functions */
int omap1_set_ext_clk_rate(struct omap1_clk *clk, unsigned long rate, unsigned long p_rate)
{
unsigned long flags;
unsigned dsor;
__u16 ratio_bits;
dsor = calc_ext_dsor(rate);
clk->rate = 96000000 / dsor;
if (dsor > 8)
ratio_bits = ((dsor - 8) / 2 + 6) << 2;
else
ratio_bits = (dsor - 2) << 2;
/* protect SWD_CLK_DIV_CTRL_SEL register from concurrent access via clk_enable/disable() */
spin_lock_irqsave(&swd_clk_div_ctrl_sel_lock, flags);
ratio_bits |= __raw_readw(clk->enable_reg) & ~0xfd;
__raw_writew(ratio_bits, clk->enable_reg);
spin_unlock_irqrestore(&swd_clk_div_ctrl_sel_lock, flags);
return 0;
}
static int calc_div_sossi(unsigned long rate, unsigned long p_rate)
{
int div;
/* Round towards slower frequency */
div = (p_rate + rate - 1) / rate;
return --div;
}
long omap1_round_sossi_rate(struct omap1_clk *clk, unsigned long rate, unsigned long *p_rate)
{
int div;
div = calc_div_sossi(rate, *p_rate);
if (div < 0)
div = 0;
else if (div > 7)
div = 7;
return *p_rate / (div + 1);
}
int omap1_set_sossi_rate(struct omap1_clk *clk, unsigned long rate, unsigned long p_rate)
{
unsigned long flags;
u32 l;
int div;
div = calc_div_sossi(rate, p_rate);
if (div < 0 || div > 7)
return -EINVAL;
/* protect MOD_CONF_CTRL_1 register from concurrent access via clk_enable/disable() */
spin_lock_irqsave(&mod_conf_ctrl_1_lock, flags);
l = omap_readl(MOD_CONF_CTRL_1);
l &= ~(7 << 17);
l |= div << 17;
omap_writel(l, MOD_CONF_CTRL_1);
clk->rate = p_rate / (div + 1);
spin_unlock_irqrestore(&mod_conf_ctrl_1_lock, flags);
return 0;
}
long omap1_round_ext_clk_rate(struct omap1_clk *clk, unsigned long rate, unsigned long *p_rate)
{
return 96000000 / calc_ext_dsor(rate);
}
int omap1_init_ext_clk(struct omap1_clk *clk)
{
unsigned dsor;
__u16 ratio_bits;
/* Determine current rate and ensure clock is based on 96MHz APLL */
ratio_bits = __raw_readw(clk->enable_reg) & ~1;
__raw_writew(ratio_bits, clk->enable_reg);
ratio_bits = (ratio_bits & 0xfc) >> 2;
if (ratio_bits > 6)
dsor = (ratio_bits - 6) * 2 + 8;
else
dsor = ratio_bits + 2;
clk-> rate = 96000000 / dsor;
return 0;
}
static int omap1_clk_enable(struct clk_hw *hw)
{
struct omap1_clk *clk = to_omap1_clk(hw), *parent = to_omap1_clk(clk_hw_get_parent(hw));
int ret = 0;
if (parent && clk->flags & CLOCK_NO_IDLE_PARENT)
omap1_clk_deny_idle(parent);
if (clk->ops && !(WARN_ON(!clk->ops->enable)))
ret = clk->ops->enable(clk);
return ret;
}
static void omap1_clk_disable(struct clk_hw *hw)
{
struct omap1_clk *clk = to_omap1_clk(hw), *parent = to_omap1_clk(clk_hw_get_parent(hw));
if (clk->ops && !(WARN_ON(!clk->ops->disable)))
clk->ops->disable(clk);
if (likely(parent) && clk->flags & CLOCK_NO_IDLE_PARENT)
omap1_clk_allow_idle(parent);
}
static int omap1_clk_enable_generic(struct omap1_clk *clk)
{
unsigned long flags;
__u16 regval16;
__u32 regval32;
if (unlikely(clk->enable_reg == NULL)) {
printk(KERN_ERR "clock.c: Enable for %s without enable code\n",
clk_hw_get_name(&clk->hw));
return -EINVAL;
}
/* protect clk->enable_reg from concurrent access via clk_set_rate() */
if (clk->enable_reg == OMAP1_IO_ADDRESS(ARM_CKCTL))
spin_lock_irqsave(&arm_ckctl_lock, flags);
else if (clk->enable_reg == OMAP1_IO_ADDRESS(ARM_IDLECT2))
spin_lock_irqsave(&arm_idlect2_lock, flags);
else if (clk->enable_reg == OMAP1_IO_ADDRESS(MOD_CONF_CTRL_0))
spin_lock_irqsave(&mod_conf_ctrl_0_lock, flags);
else if (clk->enable_reg == OMAP1_IO_ADDRESS(MOD_CONF_CTRL_1))
spin_lock_irqsave(&mod_conf_ctrl_1_lock, flags);
else if (clk->enable_reg == OMAP1_IO_ADDRESS(SWD_CLK_DIV_CTRL_SEL))
spin_lock_irqsave(&swd_clk_div_ctrl_sel_lock, flags);
if (clk->flags & ENABLE_REG_32BIT) {
regval32 = __raw_readl(clk->enable_reg);
regval32 |= (1 << clk->enable_bit);
__raw_writel(regval32, clk->enable_reg);
} else {
regval16 = __raw_readw(clk->enable_reg);
regval16 |= (1 << clk->enable_bit);
__raw_writew(regval16, clk->enable_reg);
}
if (clk->enable_reg == OMAP1_IO_ADDRESS(ARM_CKCTL))
spin_unlock_irqrestore(&arm_ckctl_lock, flags);
else if (clk->enable_reg == OMAP1_IO_ADDRESS(ARM_IDLECT2))
spin_unlock_irqrestore(&arm_idlect2_lock, flags);
else if (clk->enable_reg == OMAP1_IO_ADDRESS(MOD_CONF_CTRL_0))
spin_unlock_irqrestore(&mod_conf_ctrl_0_lock, flags);
else if (clk->enable_reg == OMAP1_IO_ADDRESS(MOD_CONF_CTRL_1))
spin_unlock_irqrestore(&mod_conf_ctrl_1_lock, flags);
else if (clk->enable_reg == OMAP1_IO_ADDRESS(SWD_CLK_DIV_CTRL_SEL))
spin_unlock_irqrestore(&swd_clk_div_ctrl_sel_lock, flags);
return 0;
}
static void omap1_clk_disable_generic(struct omap1_clk *clk)
{
unsigned long flags;
__u16 regval16;
__u32 regval32;
if (clk->enable_reg == NULL)
return;
/* protect clk->enable_reg from concurrent access via clk_set_rate() */
if (clk->enable_reg == OMAP1_IO_ADDRESS(ARM_CKCTL))
spin_lock_irqsave(&arm_ckctl_lock, flags);
else if (clk->enable_reg == OMAP1_IO_ADDRESS(ARM_IDLECT2))
spin_lock_irqsave(&arm_idlect2_lock, flags);
else if (clk->enable_reg == OMAP1_IO_ADDRESS(MOD_CONF_CTRL_0))
spin_lock_irqsave(&mod_conf_ctrl_0_lock, flags);
else if (clk->enable_reg == OMAP1_IO_ADDRESS(MOD_CONF_CTRL_1))
spin_lock_irqsave(&mod_conf_ctrl_1_lock, flags);
else if (clk->enable_reg == OMAP1_IO_ADDRESS(SWD_CLK_DIV_CTRL_SEL))
spin_lock_irqsave(&swd_clk_div_ctrl_sel_lock, flags);
if (clk->flags & ENABLE_REG_32BIT) {
regval32 = __raw_readl(clk->enable_reg);
regval32 &= ~(1 << clk->enable_bit);
__raw_writel(regval32, clk->enable_reg);
} else {
regval16 = __raw_readw(clk->enable_reg);
regval16 &= ~(1 << clk->enable_bit);
__raw_writew(regval16, clk->enable_reg);
}
if (clk->enable_reg == OMAP1_IO_ADDRESS(ARM_CKCTL))
spin_unlock_irqrestore(&arm_ckctl_lock, flags);
else if (clk->enable_reg == OMAP1_IO_ADDRESS(ARM_IDLECT2))
spin_unlock_irqrestore(&arm_idlect2_lock, flags);
else if (clk->enable_reg == OMAP1_IO_ADDRESS(MOD_CONF_CTRL_0))
spin_unlock_irqrestore(&mod_conf_ctrl_0_lock, flags);
else if (clk->enable_reg == OMAP1_IO_ADDRESS(MOD_CONF_CTRL_1))
spin_unlock_irqrestore(&mod_conf_ctrl_1_lock, flags);
else if (clk->enable_reg == OMAP1_IO_ADDRESS(SWD_CLK_DIV_CTRL_SEL))
spin_unlock_irqrestore(&swd_clk_div_ctrl_sel_lock, flags);
}
const struct clkops clkops_generic = {
.enable = omap1_clk_enable_generic,
.disable = omap1_clk_disable_generic,
};
static int omap1_clk_enable_dsp_domain(struct omap1_clk *clk)
{
bool api_ck_was_enabled;
int retval = 0;
api_ck_was_enabled = omap1_clk_is_enabled(&api_ck_p->hw);
if (!api_ck_was_enabled)
retval = api_ck_p->ops->enable(api_ck_p);
if (!retval) {
retval = omap1_clk_enable_generic(clk);
if (!api_ck_was_enabled)
api_ck_p->ops->disable(api_ck_p);
}
return retval;
}
static void omap1_clk_disable_dsp_domain(struct omap1_clk *clk)
{
bool api_ck_was_enabled;
api_ck_was_enabled = omap1_clk_is_enabled(&api_ck_p->hw);
if (!api_ck_was_enabled)
if (api_ck_p->ops->enable(api_ck_p) < 0)
return;
omap1_clk_disable_generic(clk);
if (!api_ck_was_enabled)
api_ck_p->ops->disable(api_ck_p);
}
const struct clkops clkops_dspck = {
.enable = omap1_clk_enable_dsp_domain,
.disable = omap1_clk_disable_dsp_domain,
};
/* XXX SYSC register handling does not belong in the clock framework */
static int omap1_clk_enable_uart_functional_16xx(struct omap1_clk *clk)
{
int ret;
struct uart_clk *uclk;
ret = omap1_clk_enable_generic(clk);
if (ret == 0) {
/* Set smart idle acknowledgement mode */
uclk = (struct uart_clk *)clk;
omap_writeb((omap_readb(uclk->sysc_addr) & ~0x10) | 8,
uclk->sysc_addr);
}
return ret;
}
/* XXX SYSC register handling does not belong in the clock framework */
static void omap1_clk_disable_uart_functional_16xx(struct omap1_clk *clk)
{
struct uart_clk *uclk;
/* Set force idle acknowledgement mode */
uclk = (struct uart_clk *)clk;
omap_writeb((omap_readb(uclk->sysc_addr) & ~0x18), uclk->sysc_addr);
omap1_clk_disable_generic(clk);
}
/* XXX SYSC register handling does not belong in the clock framework */
const struct clkops clkops_uart_16xx = {
.enable = omap1_clk_enable_uart_functional_16xx,
.disable = omap1_clk_disable_uart_functional_16xx,
};
static unsigned long omap1_clk_recalc_rate(struct clk_hw *hw, unsigned long p_rate)
{
struct omap1_clk *clk = to_omap1_clk(hw);
if (clk->recalc)
return clk->recalc(clk, p_rate);
return clk->rate;
}
static long omap1_clk_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *p_rate)
{
struct omap1_clk *clk = to_omap1_clk(hw);
if (clk->round_rate != NULL)
return clk->round_rate(clk, rate, p_rate);
return omap1_clk_recalc_rate(hw, *p_rate);
}
static int omap1_clk_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long p_rate)
{
struct omap1_clk *clk = to_omap1_clk(hw);
int ret = -EINVAL;
if (clk->set_rate)
ret = clk->set_rate(clk, rate, p_rate);
return ret;
}
/*
* Omap1 clock reset and init functions
*/
static int omap1_clk_init_op(struct clk_hw *hw)
{
struct omap1_clk *clk = to_omap1_clk(hw);
if (clk->init)
return clk->init(clk);
return 0;
}
#ifdef CONFIG_OMAP_RESET_CLOCKS
static void omap1_clk_disable_unused(struct clk_hw *hw)
{
struct omap1_clk *clk = to_omap1_clk(hw);
const char *name = clk_hw_get_name(hw);
/* Clocks in the DSP domain need api_ck. Just assume bootloader
* has not enabled any DSP clocks */
if (clk->enable_reg == DSP_IDLECT2) {
pr_info("Skipping reset check for DSP domain clock \"%s\"\n", name);
return;
}
pr_info("Disabling unused clock \"%s\"... ", name);
omap1_clk_disable(hw);
printk(" done\n");
}
#endif
const struct clk_ops omap1_clk_gate_ops = {
.enable = omap1_clk_enable,
.disable = omap1_clk_disable,
.is_enabled = omap1_clk_is_enabled,
#ifdef CONFIG_OMAP_RESET_CLOCKS
.disable_unused = omap1_clk_disable_unused,
#endif
};
const struct clk_ops omap1_clk_rate_ops = {
.recalc_rate = omap1_clk_recalc_rate,
.round_rate = omap1_clk_round_rate,
.set_rate = omap1_clk_set_rate,
.init = omap1_clk_init_op,
};
const struct clk_ops omap1_clk_full_ops = {
.enable = omap1_clk_enable,
.disable = omap1_clk_disable,
.is_enabled = omap1_clk_is_enabled,
#ifdef CONFIG_OMAP_RESET_CLOCKS
.disable_unused = omap1_clk_disable_unused,
#endif
.recalc_rate = omap1_clk_recalc_rate,
.round_rate = omap1_clk_round_rate,
.set_rate = omap1_clk_set_rate,
.init = omap1_clk_init_op,
};
/*
* OMAP specific clock functions shared between omap1 and omap2
*/
/* Used for clocks that always have same value as the parent clock */
unsigned long followparent_recalc(struct omap1_clk *clk, unsigned long p_rate)
{
return p_rate;
}
/*
* Used for clocks that have the same value as the parent clock,
* divided by some factor
*/
unsigned long omap_fixed_divisor_recalc(struct omap1_clk *clk, unsigned long p_rate)
{
WARN_ON(!clk->fixed_div);
return p_rate / clk->fixed_div;
}
/* Propagate rate to children */
void propagate_rate(struct omap1_clk *tclk)
{
struct clk *clkp;
/* depend on CCF ability to recalculate new rates across whole clock subtree */
if (WARN_ON(!(clk_hw_get_flags(&tclk->hw) & CLK_GET_RATE_NOCACHE)))
return;
clkp = clk_get_sys(NULL, clk_hw_get_name(&tclk->hw));
if (WARN_ON(!clkp))
return;
clk_get_rate(clkp);
clk_put(clkp);
}
const struct clk_ops omap1_clk_null_ops = {
};
/*
* Dummy clock
*
* Used for clock aliases that are needed on some OMAPs, but not others
*/
struct omap1_clk dummy_ck __refdata = {
.hw.init = CLK_HW_INIT_NO_PARENT("dummy", &omap1_clk_null_ops, 0),
};
| linux-master | arch/arm/mach-omap1/clock.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/arch/arm/mach-omap1/mcbsp.c
*
* Copyright (C) 2008 Instituto Nokia de Tecnologia
* Contact: Eduardo Valentin <[email protected]>
*
* Multichannel mode not supported.
*/
#include <linux/ioport.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/omap-dma.h>
#include <linux/soc/ti/omap1-io.h>
#include <linux/platform_data/asoc-ti-mcbsp.h>
#include "mux.h"
#include "soc.h"
#include "irqs.h"
#include "iomap.h"
#define DPS_RSTCT2_PER_EN (1 << 0)
#define DSP_RSTCT2_WD_PER_EN (1 << 1)
static int dsp_use;
static struct clk *api_clk;
static struct clk *dsp_clk;
static struct platform_device **omap_mcbsp_devices;
static void omap1_mcbsp_request(unsigned int id)
{
/*
* On 1510, 1610 and 1710, McBSP1 and McBSP3
* are DSP public peripherals.
*/
if (id == 0 || id == 2) {
if (dsp_use++ == 0) {
api_clk = clk_get(NULL, "api_ck");
dsp_clk = clk_get(NULL, "dsp_ck");
if (!IS_ERR(api_clk) && !IS_ERR(dsp_clk)) {
clk_prepare_enable(api_clk);
clk_prepare_enable(dsp_clk);
/*
* DSP external peripheral reset
* FIXME: This should be moved to dsp code
*/
__raw_writew(__raw_readw(DSP_RSTCT2) | DPS_RSTCT2_PER_EN |
DSP_RSTCT2_WD_PER_EN, DSP_RSTCT2);
}
}
}
}
static void omap1_mcbsp_free(unsigned int id)
{
if (id == 0 || id == 2) {
if (--dsp_use == 0) {
if (!IS_ERR(api_clk)) {
clk_disable_unprepare(api_clk);
clk_put(api_clk);
}
if (!IS_ERR(dsp_clk)) {
clk_disable_unprepare(dsp_clk);
clk_put(dsp_clk);
}
}
}
}
static struct omap_mcbsp_ops omap1_mcbsp_ops = {
.request = omap1_mcbsp_request,
.free = omap1_mcbsp_free,
};
#define OMAP7XX_MCBSP1_BASE 0xfffb1000
#define OMAP7XX_MCBSP2_BASE 0xfffb1800
#define OMAP1510_MCBSP1_BASE 0xe1011800
#define OMAP1510_MCBSP2_BASE 0xfffb1000
#define OMAP1510_MCBSP3_BASE 0xe1017000
#define OMAP1610_MCBSP1_BASE 0xe1011800
#define OMAP1610_MCBSP2_BASE 0xfffb1000
#define OMAP1610_MCBSP3_BASE 0xe1017000
struct resource omap15xx_mcbsp_res[][6] = {
{
{
.start = OMAP1510_MCBSP1_BASE,
.end = OMAP1510_MCBSP1_BASE + SZ_256,
.flags = IORESOURCE_MEM,
},
{
.name = "rx",
.start = INT_McBSP1RX,
.flags = IORESOURCE_IRQ,
},
{
.name = "tx",
.start = INT_McBSP1TX,
.flags = IORESOURCE_IRQ,
},
{
.name = "rx",
.start = 9,
.flags = IORESOURCE_DMA,
},
{
.name = "tx",
.start = 8,
.flags = IORESOURCE_DMA,
},
},
{
{
.start = OMAP1510_MCBSP2_BASE,
.end = OMAP1510_MCBSP2_BASE + SZ_256,
.flags = IORESOURCE_MEM,
},
{
.name = "rx",
.start = INT_1510_SPI_RX,
.flags = IORESOURCE_IRQ,
},
{
.name = "tx",
.start = INT_1510_SPI_TX,
.flags = IORESOURCE_IRQ,
},
{
.name = "rx",
.start = 17,
.flags = IORESOURCE_DMA,
},
{
.name = "tx",
.start = 16,
.flags = IORESOURCE_DMA,
},
},
{
{
.start = OMAP1510_MCBSP3_BASE,
.end = OMAP1510_MCBSP3_BASE + SZ_256,
.flags = IORESOURCE_MEM,
},
{
.name = "rx",
.start = INT_McBSP3RX,
.flags = IORESOURCE_IRQ,
},
{
.name = "tx",
.start = INT_McBSP3TX,
.flags = IORESOURCE_IRQ,
},
{
.name = "rx",
.start = 11,
.flags = IORESOURCE_DMA,
},
{
.name = "tx",
.start = 10,
.flags = IORESOURCE_DMA,
},
},
};
#define omap15xx_mcbsp_res_0 omap15xx_mcbsp_res[0]
static struct omap_mcbsp_platform_data omap15xx_mcbsp_pdata[] = {
{
.ops = &omap1_mcbsp_ops,
},
{
.ops = &omap1_mcbsp_ops,
},
{
.ops = &omap1_mcbsp_ops,
},
};
#define OMAP15XX_MCBSP_RES_SZ ARRAY_SIZE(omap15xx_mcbsp_res[1])
#define OMAP15XX_MCBSP_COUNT ARRAY_SIZE(omap15xx_mcbsp_res)
struct resource omap16xx_mcbsp_res[][6] = {
{
{
.start = OMAP1610_MCBSP1_BASE,
.end = OMAP1610_MCBSP1_BASE + SZ_256,
.flags = IORESOURCE_MEM,
},
{
.name = "rx",
.start = INT_McBSP1RX,
.flags = IORESOURCE_IRQ,
},
{
.name = "tx",
.start = INT_McBSP1TX,
.flags = IORESOURCE_IRQ,
},
{
.name = "rx",
.start = 9,
.flags = IORESOURCE_DMA,
},
{
.name = "tx",
.start = 8,
.flags = IORESOURCE_DMA,
},
},
{
{
.start = OMAP1610_MCBSP2_BASE,
.end = OMAP1610_MCBSP2_BASE + SZ_256,
.flags = IORESOURCE_MEM,
},
{
.name = "rx",
.start = INT_1610_McBSP2_RX,
.flags = IORESOURCE_IRQ,
},
{
.name = "tx",
.start = INT_1610_McBSP2_TX,
.flags = IORESOURCE_IRQ,
},
{
.name = "rx",
.start = 17,
.flags = IORESOURCE_DMA,
},
{
.name = "tx",
.start = 16,
.flags = IORESOURCE_DMA,
},
},
{
{
.start = OMAP1610_MCBSP3_BASE,
.end = OMAP1610_MCBSP3_BASE + SZ_256,
.flags = IORESOURCE_MEM,
},
{
.name = "rx",
.start = INT_McBSP3RX,
.flags = IORESOURCE_IRQ,
},
{
.name = "tx",
.start = INT_McBSP3TX,
.flags = IORESOURCE_IRQ,
},
{
.name = "rx",
.start = 11,
.flags = IORESOURCE_DMA,
},
{
.name = "tx",
.start = 10,
.flags = IORESOURCE_DMA,
},
},
};
#define omap16xx_mcbsp_res_0 omap16xx_mcbsp_res[0]
static struct omap_mcbsp_platform_data omap16xx_mcbsp_pdata[] = {
{
.ops = &omap1_mcbsp_ops,
},
{
.ops = &omap1_mcbsp_ops,
},
{
.ops = &omap1_mcbsp_ops,
},
};
#define OMAP16XX_MCBSP_RES_SZ ARRAY_SIZE(omap16xx_mcbsp_res[1])
#define OMAP16XX_MCBSP_COUNT ARRAY_SIZE(omap16xx_mcbsp_res)
static void omap_mcbsp_register_board_cfg(struct resource *res, int res_count,
struct omap_mcbsp_platform_data *config, int size)
{
int i;
omap_mcbsp_devices = kcalloc(size, sizeof(struct platform_device *),
GFP_KERNEL);
if (!omap_mcbsp_devices) {
printk(KERN_ERR "Could not register McBSP devices\n");
return;
}
for (i = 0; i < size; i++) {
struct platform_device *new_mcbsp;
int ret;
new_mcbsp = platform_device_alloc("omap-mcbsp", i + 1);
if (!new_mcbsp)
continue;
platform_device_add_resources(new_mcbsp, &res[i * res_count],
res_count);
config[i].reg_size = 2;
config[i].reg_step = 2;
new_mcbsp->dev.platform_data = &config[i];
ret = platform_device_add(new_mcbsp);
if (ret) {
platform_device_put(new_mcbsp);
continue;
}
omap_mcbsp_devices[i] = new_mcbsp;
}
}
static int __init omap1_mcbsp_init(void)
{
if (!cpu_class_is_omap1())
return -ENODEV;
if (cpu_is_omap15xx())
omap_mcbsp_register_board_cfg(omap15xx_mcbsp_res_0,
OMAP15XX_MCBSP_RES_SZ,
omap15xx_mcbsp_pdata,
OMAP15XX_MCBSP_COUNT);
if (cpu_is_omap16xx())
omap_mcbsp_register_board_cfg(omap16xx_mcbsp_res_0,
OMAP16XX_MCBSP_RES_SZ,
omap16xx_mcbsp_pdata,
OMAP16XX_MCBSP_COUNT);
return 0;
}
arch_initcall(omap1_mcbsp_init);
| linux-master | arch/arm/mach-omap1/mcbsp.c |
// SPDX-License-Identifier: GPL-2.0
/*
* OMAP1 reset support
*/
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/reboot.h>
#include "hardware.h"
#include "iomap.h"
#include "common.h"
/* ARM_SYSST bit shifts related to SoC reset sources */
#define ARM_SYSST_POR_SHIFT 5
#define ARM_SYSST_EXT_RST_SHIFT 4
#define ARM_SYSST_ARM_WDRST_SHIFT 2
#define ARM_SYSST_GLOB_SWRST_SHIFT 1
/* Standardized reset source bits (across all OMAP SoCs) */
#define OMAP_GLOBAL_COLD_RST_SRC_ID_SHIFT 0
#define OMAP_GLOBAL_WARM_RST_SRC_ID_SHIFT 1
#define OMAP_MPU_WD_RST_SRC_ID_SHIFT 3
#define OMAP_EXTWARM_RST_SRC_ID_SHIFT 5
void omap1_restart(enum reboot_mode mode, const char *cmd)
{
/*
* Workaround for 5912/1611b bug mentioned in sprz209d.pdf p. 28
* "Global Software Reset Affects Traffic Controller Frequency".
*/
if (cpu_is_omap5912()) {
omap_writew(omap_readw(DPLL_CTL) & ~(1 << 4), DPLL_CTL);
omap_writew(0x8, ARM_RSTCT1);
}
omap_writew(1, ARM_RSTCT1);
}
/**
* omap1_get_reset_sources - return the source of the SoC's last reset
*
* Returns bits that represent the last reset source for the SoC. The
* format is standardized across OMAPs for use by the OMAP watchdog.
*/
u32 omap1_get_reset_sources(void)
{
u32 ret = 0;
u16 rs;
rs = __raw_readw(OMAP1_IO_ADDRESS(ARM_SYSST));
if (rs & (1 << ARM_SYSST_POR_SHIFT))
ret |= 1 << OMAP_GLOBAL_COLD_RST_SRC_ID_SHIFT;
if (rs & (1 << ARM_SYSST_EXT_RST_SHIFT))
ret |= 1 << OMAP_EXTWARM_RST_SRC_ID_SHIFT;
if (rs & (1 << ARM_SYSST_ARM_WDRST_SHIFT))
ret |= 1 << OMAP_MPU_WD_RST_SRC_ID_SHIFT;
if (rs & (1 << ARM_SYSST_GLOB_SWRST_SHIFT))
ret |= 1 << OMAP_GLOBAL_WARM_RST_SRC_ID_SHIFT;
return ret;
}
| linux-master | arch/arm/mach-omap1/reset.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/arch/arm/mach-omap1/board-sx1.c
*
* Modified from board-generic.c
*
* Support for the Siemens SX1 mobile phone.
*
* Original version : Vladimir Ananiev (Vovan888-at-gmail com)
*
* Maintainters : Vladimir Ananiev (aka Vovan888), Sergge
* oslik.ru
*/
#include <linux/gpio/machine.h>
#include <linux/gpio/consumer.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/input.h>
#include <linux/platform_device.h>
#include <linux/notifier.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/physmap.h>
#include <linux/types.h>
#include <linux/i2c.h>
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/omapfb.h>
#include <linux/platform_data/keypad-omap.h>
#include <linux/omap-dma.h>
#include "tc.h"
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include "flash.h"
#include "mux.h"
#include "board-sx1.h"
#include "hardware.h"
#include "usb.h"
#include "common.h"
/* Write to I2C device */
int sx1_i2c_write_byte(u8 devaddr, u8 regoffset, u8 value)
{
struct i2c_adapter *adap;
int err;
struct i2c_msg msg[1];
unsigned char data[2];
adap = i2c_get_adapter(0);
if (!adap)
return -ENODEV;
msg->addr = devaddr; /* I2C address of chip */
msg->flags = 0;
msg->len = 2;
msg->buf = data;
data[0] = regoffset; /* register num */
data[1] = value; /* register data */
err = i2c_transfer(adap, msg, 1);
i2c_put_adapter(adap);
if (err >= 0)
return 0;
return err;
}
/* Read from I2C device */
int sx1_i2c_read_byte(u8 devaddr, u8 regoffset, u8 *value)
{
struct i2c_adapter *adap;
int err;
struct i2c_msg msg[1];
unsigned char data[2];
adap = i2c_get_adapter(0);
if (!adap)
return -ENODEV;
msg->addr = devaddr; /* I2C address of chip */
msg->flags = 0;
msg->len = 1;
msg->buf = data;
data[0] = regoffset; /* register num */
err = i2c_transfer(adap, msg, 1);
msg->addr = devaddr; /* I2C address */
msg->flags = I2C_M_RD;
msg->len = 1;
msg->buf = data;
err = i2c_transfer(adap, msg, 1);
*value = data[0];
i2c_put_adapter(adap);
if (err >= 0)
return 0;
return err;
}
/* set keyboard backlight intensity */
int sx1_setkeylight(u8 keylight)
{
if (keylight > SOFIA_MAX_LIGHT_VAL)
keylight = SOFIA_MAX_LIGHT_VAL;
return sx1_i2c_write_byte(SOFIA_I2C_ADDR, SOFIA_KEYLIGHT_REG, keylight);
}
/* get current keylight intensity */
int sx1_getkeylight(u8 * keylight)
{
return sx1_i2c_read_byte(SOFIA_I2C_ADDR, SOFIA_KEYLIGHT_REG, keylight);
}
/* set LCD backlight intensity */
int sx1_setbacklight(u8 backlight)
{
if (backlight > SOFIA_MAX_LIGHT_VAL)
backlight = SOFIA_MAX_LIGHT_VAL;
return sx1_i2c_write_byte(SOFIA_I2C_ADDR, SOFIA_BACKLIGHT_REG,
backlight);
}
/* get current LCD backlight intensity */
int sx1_getbacklight (u8 * backlight)
{
return sx1_i2c_read_byte(SOFIA_I2C_ADDR, SOFIA_BACKLIGHT_REG,
backlight);
}
/* set LCD backlight power on/off */
int sx1_setmmipower(u8 onoff)
{
int err;
u8 dat = 0;
err = sx1_i2c_read_byte(SOFIA_I2C_ADDR, SOFIA_POWER1_REG, &dat);
if (err < 0)
return err;
if (onoff)
dat |= SOFIA_MMILIGHT_POWER;
else
dat &= ~SOFIA_MMILIGHT_POWER;
return sx1_i2c_write_byte(SOFIA_I2C_ADDR, SOFIA_POWER1_REG, dat);
}
/* set USB power on/off */
int sx1_setusbpower(u8 onoff)
{
int err;
u8 dat = 0;
err = sx1_i2c_read_byte(SOFIA_I2C_ADDR, SOFIA_POWER1_REG, &dat);
if (err < 0)
return err;
if (onoff)
dat |= SOFIA_USB_POWER;
else
dat &= ~SOFIA_USB_POWER;
return sx1_i2c_write_byte(SOFIA_I2C_ADDR, SOFIA_POWER1_REG, dat);
}
EXPORT_SYMBOL(sx1_setkeylight);
EXPORT_SYMBOL(sx1_getkeylight);
EXPORT_SYMBOL(sx1_setbacklight);
EXPORT_SYMBOL(sx1_getbacklight);
EXPORT_SYMBOL(sx1_setmmipower);
EXPORT_SYMBOL(sx1_setusbpower);
/*----------- Keypad -------------------------*/
static const unsigned int sx1_keymap[] = {
KEY(3, 5, GROUP_0 | 117), /* camera Qt::Key_F17 */
KEY(4, 0, GROUP_0 | 114), /* voice memo Qt::Key_F14 */
KEY(4, 1, GROUP_2 | 114), /* voice memo */
KEY(4, 2, GROUP_3 | 114), /* voice memo */
KEY(0, 0, GROUP_1 | KEY_F12), /* red button Qt::Key_Hangup */
KEY(3, 4, GROUP_1 | KEY_LEFT),
KEY(3, 2, GROUP_1 | KEY_DOWN),
KEY(3, 1, GROUP_1 | KEY_RIGHT),
KEY(3, 0, GROUP_1 | KEY_UP),
KEY(3, 3, GROUP_1 | KEY_POWER), /* joystick press or Qt::Key_Select */
KEY(0, 5, GROUP_1 | KEY_1),
KEY(0, 4, GROUP_1 | KEY_2),
KEY(0, 3, GROUP_1 | KEY_3),
KEY(4, 3, GROUP_1 | KEY_4),
KEY(4, 4, GROUP_1 | KEY_5),
KEY(4, 5, GROUP_1 | KEY_KPASTERISK),/* "*" */
KEY(1, 4, GROUP_1 | KEY_6),
KEY(1, 5, GROUP_1 | KEY_7),
KEY(1, 3, GROUP_1 | KEY_8),
KEY(2, 3, GROUP_1 | KEY_9),
KEY(2, 5, GROUP_1 | KEY_0),
KEY(2, 4, GROUP_1 | 113), /* # F13 Toggle input method Qt::Key_F13 */
KEY(1, 0, GROUP_1 | KEY_F11), /* green button Qt::Key_Call */
KEY(2, 1, GROUP_1 | KEY_YEN), /* left soft Qt::Key_Context1 */
KEY(2, 2, GROUP_1 | KEY_F8), /* right soft Qt::Key_Back */
KEY(1, 2, GROUP_1 | KEY_LEFTSHIFT), /* shift */
KEY(1, 1, GROUP_1 | KEY_BACKSPACE), /* C (clear) */
KEY(2, 0, GROUP_1 | KEY_F7), /* menu Qt::Key_Menu */
};
static struct resource sx1_kp_resources[] = {
[0] = {
.start = INT_KEYBOARD,
.end = INT_KEYBOARD,
.flags = IORESOURCE_IRQ,
},
};
static const struct matrix_keymap_data sx1_keymap_data = {
.keymap = sx1_keymap,
.keymap_size = ARRAY_SIZE(sx1_keymap),
};
static struct omap_kp_platform_data sx1_kp_data = {
.rows = 6,
.cols = 6,
.keymap_data = &sx1_keymap_data,
.delay = 80,
};
static struct platform_device sx1_kp_device = {
.name = "omap-keypad",
.id = -1,
.dev = {
.platform_data = &sx1_kp_data,
},
.num_resources = ARRAY_SIZE(sx1_kp_resources),
.resource = sx1_kp_resources,
};
/*----------- MTD -------------------------*/
static struct mtd_partition sx1_partitions[] = {
/* bootloader (U-Boot, etc) in first sector */
{
.name = "bootloader",
.offset = 0x01800000,
.size = SZ_128K,
.mask_flags = MTD_WRITEABLE, /* force read-only */
},
/* bootloader params in the next sector */
{
.name = "params",
.offset = MTDPART_OFS_APPEND,
.size = SZ_128K,
.mask_flags = 0,
},
/* kernel */
{
.name = "kernel",
.offset = MTDPART_OFS_APPEND,
.size = SZ_2M - 2 * SZ_128K,
.mask_flags = 0
},
/* file system */
{
.name = "filesystem",
.offset = MTDPART_OFS_APPEND,
.size = MTDPART_SIZ_FULL,
.mask_flags = 0
}
};
static struct physmap_flash_data sx1_flash_data = {
.width = 2,
.set_vpp = omap1_set_vpp,
.parts = sx1_partitions,
.nr_parts = ARRAY_SIZE(sx1_partitions),
};
/* MTD Intel 4000 flash - new flashes */
static struct resource sx1_new_flash_resource = {
.start = OMAP_CS0_PHYS,
.end = OMAP_CS0_PHYS + SZ_32M - 1,
.flags = IORESOURCE_MEM,
};
static struct platform_device sx1_flash_device = {
.name = "physmap-flash",
.id = 0,
.dev = {
.platform_data = &sx1_flash_data,
},
.num_resources = 1,
.resource = &sx1_new_flash_resource,
};
/*----------- USB -------------------------*/
static struct omap_usb_config sx1_usb_config __initdata = {
.otg = 0,
.register_dev = 1,
.register_host = 0,
.hmc_mode = 0,
.pins[0] = 2,
.pins[1] = 0,
.pins[2] = 0,
};
/*----------- LCD -------------------------*/
static const struct omap_lcd_config sx1_lcd_config __initconst = {
.ctrl_name = "internal",
};
/*-----------------------------------------*/
static struct platform_device *sx1_devices[] __initdata = {
&sx1_flash_device,
&sx1_kp_device,
};
/*-----------------------------------------*/
static struct gpiod_lookup_table sx1_gpio_table = {
.dev_id = NULL,
.table = {
GPIO_LOOKUP("gpio-0-15", 1, "irda_off",
GPIO_ACTIVE_HIGH),
GPIO_LOOKUP("gpio-0-15", 11, "switch",
GPIO_ACTIVE_HIGH),
GPIO_LOOKUP("gpio-0-15", 15, "usb_on",
GPIO_ACTIVE_HIGH),
{ }
},
};
static void __init omap_sx1_init(void)
{
struct gpio_desc *d;
/* mux pins for uarts */
omap_cfg_reg(UART1_TX);
omap_cfg_reg(UART1_RTS);
omap_cfg_reg(UART2_TX);
omap_cfg_reg(UART2_RTS);
omap_cfg_reg(UART3_TX);
omap_cfg_reg(UART3_RX);
platform_add_devices(sx1_devices, ARRAY_SIZE(sx1_devices));
omap_serial_init();
omap_register_i2c_bus(1, 100, NULL, 0);
omap1_usb_init(&sx1_usb_config);
sx1_mmc_init();
gpiod_add_lookup_table(&sx1_gpio_table);
/* turn on USB power */
/* sx1_setusbpower(1); can't do it here because i2c is not ready */
d = gpiod_get(NULL, "irda_off", GPIOD_OUT_HIGH);
if (IS_ERR(d))
pr_err("Unable to get IRDA OFF GPIO descriptor\n");
else
gpiod_put(d);
d = gpiod_get(NULL, "switch", GPIOD_OUT_LOW);
if (IS_ERR(d))
pr_err("Unable to get SWITCH GPIO descriptor\n");
else
gpiod_put(d);
d = gpiod_get(NULL, "usb_on", GPIOD_OUT_LOW);
if (IS_ERR(d))
pr_err("Unable to get USB ON GPIO descriptor\n");
else
gpiod_put(d);
omapfb_set_lcd_config(&sx1_lcd_config);
}
MACHINE_START(SX1, "OMAP310 based Siemens SX1")
.atag_offset = 0x100,
.map_io = omap1_map_io,
.init_early = omap1_init_early,
.init_irq = omap1_init_irq,
.init_machine = omap_sx1_init,
.init_late = omap1_init_late,
.init_time = omap1_timer_init,
.restart = omap1_restart,
MACHINE_END
| linux-master | arch/arm/mach-omap1/board-sx1.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/arch/arm/mach-omap1/clock_data.c
*
* Copyright (C) 2004 - 2005, 2009-2010 Nokia Corporation
* Written by Tuukka Tikkanen <[email protected]>
* Based on clocks.h by Tony Lindgren, Gordon McNutt and RidgeRun, Inc
*
* To do:
* - Clocks that are only available on some chips should be marked with the
* chips that they are present on.
*/
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/clk-provider.h>
#include <linux/cpufreq.h>
#include <linux/delay.h>
#include <linux/soc/ti/omap1-io.h>
#include <asm/mach-types.h> /* for machine_is_* */
#include "soc.h"
#include "hardware.h"
#include "usb.h" /* for OTG_BASE */
#include "iomap.h"
#include "clock.h"
#include "sram.h"
/* Some ARM_IDLECT1 bit shifts - used in struct arm_idlect1_clk */
#define IDL_CLKOUT_ARM_SHIFT 12
#define IDLTIM_ARM_SHIFT 9
#define IDLAPI_ARM_SHIFT 8
#define IDLIF_ARM_SHIFT 6
#define IDLLB_ARM_SHIFT 4 /* undocumented? */
#define OMAP1510_IDLLCD_ARM_SHIFT 3 /* undocumented? */
#define IDLPER_ARM_SHIFT 2
#define IDLXORP_ARM_SHIFT 1
#define IDLWDT_ARM_SHIFT 0
/* Some MOD_CONF_CTRL_0 bit shifts - used in struct clk.enable_bit */
#define CONF_MOD_UART3_CLK_MODE_R 31
#define CONF_MOD_UART2_CLK_MODE_R 30
#define CONF_MOD_UART1_CLK_MODE_R 29
#define CONF_MOD_MMC_SD_CLK_REQ_R 23
#define CONF_MOD_MCBSP3_AUXON 20
/* Some MOD_CONF_CTRL_1 bit shifts - used in struct clk.enable_bit */
#define CONF_MOD_SOSSI_CLK_EN_R 16
/* Some OTG_SYSCON_2-specific bit fields */
#define OTG_SYSCON_2_UHOST_EN_SHIFT 8
/* Some SOFT_REQ_REG bit fields - used in struct clk.enable_bit */
#define SOFT_MMC2_DPLL_REQ_SHIFT 13
#define SOFT_MMC_DPLL_REQ_SHIFT 12
#define SOFT_UART3_DPLL_REQ_SHIFT 11
#define SOFT_UART2_DPLL_REQ_SHIFT 10
#define SOFT_UART1_DPLL_REQ_SHIFT 9
#define SOFT_USB_OTG_DPLL_REQ_SHIFT 8
#define SOFT_CAM_DPLL_REQ_SHIFT 7
#define SOFT_COM_MCKO_REQ_SHIFT 6
#define SOFT_PERIPH_REQ_SHIFT 5 /* sys_ck gate for UART2 ? */
#define USB_REQ_EN_SHIFT 4
#define SOFT_USB_REQ_SHIFT 3 /* sys_ck gate for USB host? */
#define SOFT_SDW_REQ_SHIFT 2 /* sys_ck gate for Bluetooth? */
#define SOFT_COM_REQ_SHIFT 1 /* sys_ck gate for com proc? */
#define SOFT_DPLL_REQ_SHIFT 0
/*
* Omap1 clocks
*/
static struct omap1_clk ck_ref = {
.hw.init = CLK_HW_INIT_NO_PARENT("ck_ref", &omap1_clk_rate_ops, 0),
.rate = 12000000,
};
static struct omap1_clk ck_dpll1 = {
.hw.init = CLK_HW_INIT("ck_dpll1", "ck_ref", &omap1_clk_rate_ops,
/*
* force recursive refresh of rates of the clock
* and its children when clk_get_rate() is called
*/
CLK_GET_RATE_NOCACHE),
};
/*
* FIXME: This clock seems to be necessary but no-one has asked for its
* activation. [ FIX: SoSSI, SSR ]
*/
static struct arm_idlect1_clk ck_dpll1out = {
.clk = {
.hw.init = CLK_HW_INIT("ck_dpll1out", "ck_dpll1", &omap1_clk_gate_ops, 0),
.ops = &clkops_generic,
.flags = CLOCK_IDLE_CONTROL | ENABLE_REG_32BIT,
.enable_reg = OMAP1_IO_ADDRESS(ARM_IDLECT2),
.enable_bit = EN_CKOUT_ARM,
},
.idlect_shift = IDL_CLKOUT_ARM_SHIFT,
};
static struct omap1_clk sossi_ck = {
.hw.init = CLK_HW_INIT("ck_sossi", "ck_dpll1out", &omap1_clk_full_ops, 0),
.ops = &clkops_generic,
.flags = CLOCK_NO_IDLE_PARENT | ENABLE_REG_32BIT,
.enable_reg = OMAP1_IO_ADDRESS(MOD_CONF_CTRL_1),
.enable_bit = CONF_MOD_SOSSI_CLK_EN_R,
.recalc = &omap1_sossi_recalc,
.round_rate = &omap1_round_sossi_rate,
.set_rate = &omap1_set_sossi_rate,
};
static struct omap1_clk arm_ck = {
.hw.init = CLK_HW_INIT("arm_ck", "ck_dpll1", &omap1_clk_rate_ops, 0),
.rate_offset = CKCTL_ARMDIV_OFFSET,
.recalc = &omap1_ckctl_recalc,
.round_rate = omap1_clk_round_rate_ckctl_arm,
.set_rate = omap1_clk_set_rate_ckctl_arm,
};
static struct arm_idlect1_clk armper_ck = {
.clk = {
.hw.init = CLK_HW_INIT("armper_ck", "ck_dpll1", &omap1_clk_full_ops,
CLK_IS_CRITICAL),
.ops = &clkops_generic,
.flags = CLOCK_IDLE_CONTROL,
.enable_reg = OMAP1_IO_ADDRESS(ARM_IDLECT2),
.enable_bit = EN_PERCK,
.rate_offset = CKCTL_PERDIV_OFFSET,
.recalc = &omap1_ckctl_recalc,
.round_rate = omap1_clk_round_rate_ckctl_arm,
.set_rate = omap1_clk_set_rate_ckctl_arm,
},
.idlect_shift = IDLPER_ARM_SHIFT,
};
/*
* FIXME: This clock seems to be necessary but no-one has asked for its
* activation. [ GPIO code for 1510 ]
*/
static struct omap1_clk arm_gpio_ck = {
.hw.init = CLK_HW_INIT("ick", "ck_dpll1", &omap1_clk_gate_ops, CLK_IS_CRITICAL),
.ops = &clkops_generic,
.enable_reg = OMAP1_IO_ADDRESS(ARM_IDLECT2),
.enable_bit = EN_GPIOCK,
};
static struct arm_idlect1_clk armxor_ck = {
.clk = {
.hw.init = CLK_HW_INIT("armxor_ck", "ck_ref", &omap1_clk_gate_ops,
CLK_IS_CRITICAL),
.ops = &clkops_generic,
.flags = CLOCK_IDLE_CONTROL,
.enable_reg = OMAP1_IO_ADDRESS(ARM_IDLECT2),
.enable_bit = EN_XORPCK,
},
.idlect_shift = IDLXORP_ARM_SHIFT,
};
static struct arm_idlect1_clk armtim_ck = {
.clk = {
.hw.init = CLK_HW_INIT("armtim_ck", "ck_ref", &omap1_clk_gate_ops,
CLK_IS_CRITICAL),
.ops = &clkops_generic,
.flags = CLOCK_IDLE_CONTROL,
.enable_reg = OMAP1_IO_ADDRESS(ARM_IDLECT2),
.enable_bit = EN_TIMCK,
},
.idlect_shift = IDLTIM_ARM_SHIFT,
};
static struct arm_idlect1_clk armwdt_ck = {
.clk = {
.hw.init = CLK_HW_INIT("armwdt_ck", "ck_ref", &omap1_clk_full_ops, 0),
.ops = &clkops_generic,
.flags = CLOCK_IDLE_CONTROL,
.enable_reg = OMAP1_IO_ADDRESS(ARM_IDLECT2),
.enable_bit = EN_WDTCK,
.fixed_div = 14,
.recalc = &omap_fixed_divisor_recalc,
},
.idlect_shift = IDLWDT_ARM_SHIFT,
};
static struct omap1_clk arminth_ck16xx = {
.hw.init = CLK_HW_INIT("arminth_ck", "arm_ck", &omap1_clk_null_ops, 0),
/* Note: On 16xx the frequency can be divided by 2 by programming
* ARM_CKCTL:ARM_INTHCK_SEL(14) to 1
*
* 1510 version is in TC clocks.
*/
};
static struct omap1_clk dsp_ck = {
.hw.init = CLK_HW_INIT("dsp_ck", "ck_dpll1", &omap1_clk_full_ops, 0),
.ops = &clkops_generic,
.enable_reg = OMAP1_IO_ADDRESS(ARM_CKCTL),
.enable_bit = EN_DSPCK,
.rate_offset = CKCTL_DSPDIV_OFFSET,
.recalc = &omap1_ckctl_recalc,
.round_rate = omap1_clk_round_rate_ckctl_arm,
.set_rate = omap1_clk_set_rate_ckctl_arm,
};
static struct omap1_clk dspmmu_ck = {
.hw.init = CLK_HW_INIT("dspmmu_ck", "ck_dpll1", &omap1_clk_rate_ops, 0),
.rate_offset = CKCTL_DSPMMUDIV_OFFSET,
.recalc = &omap1_ckctl_recalc,
.round_rate = omap1_clk_round_rate_ckctl_arm,
.set_rate = omap1_clk_set_rate_ckctl_arm,
};
static struct omap1_clk dspper_ck = {
.hw.init = CLK_HW_INIT("dspper_ck", "ck_dpll1", &omap1_clk_full_ops, 0),
.ops = &clkops_dspck,
.enable_reg = DSP_IDLECT2,
.enable_bit = EN_PERCK,
.rate_offset = CKCTL_PERDIV_OFFSET,
.recalc = &omap1_ckctl_recalc_dsp_domain,
.round_rate = omap1_clk_round_rate_ckctl_arm,
.set_rate = &omap1_clk_set_rate_dsp_domain,
};
static struct omap1_clk dspxor_ck = {
.hw.init = CLK_HW_INIT("dspxor_ck", "ck_ref", &omap1_clk_gate_ops, 0),
.ops = &clkops_dspck,
.enable_reg = DSP_IDLECT2,
.enable_bit = EN_XORPCK,
};
static struct omap1_clk dsptim_ck = {
.hw.init = CLK_HW_INIT("dsptim_ck", "ck_ref", &omap1_clk_gate_ops, 0),
.ops = &clkops_dspck,
.enable_reg = DSP_IDLECT2,
.enable_bit = EN_DSPTIMCK,
};
static struct arm_idlect1_clk tc_ck = {
.clk = {
.hw.init = CLK_HW_INIT("tc_ck", "ck_dpll1", &omap1_clk_rate_ops, 0),
.flags = CLOCK_IDLE_CONTROL,
.rate_offset = CKCTL_TCDIV_OFFSET,
.recalc = &omap1_ckctl_recalc,
.round_rate = omap1_clk_round_rate_ckctl_arm,
.set_rate = omap1_clk_set_rate_ckctl_arm,
},
.idlect_shift = IDLIF_ARM_SHIFT,
};
static struct omap1_clk arminth_ck1510 = {
.hw.init = CLK_HW_INIT("arminth_ck", "tc_ck", &omap1_clk_null_ops, 0),
/* Note: On 1510 the frequency follows TC_CK
*
* 16xx version is in MPU clocks.
*/
};
static struct omap1_clk tipb_ck = {
/* No-idle controlled by "tc_ck" */
.hw.init = CLK_HW_INIT("tipb_ck", "tc_ck", &omap1_clk_null_ops, 0),
};
static struct omap1_clk l3_ocpi_ck = {
/* No-idle controlled by "tc_ck" */
.hw.init = CLK_HW_INIT("l3_ocpi_ck", "tc_ck", &omap1_clk_gate_ops, 0),
.ops = &clkops_generic,
.enable_reg = OMAP1_IO_ADDRESS(ARM_IDLECT3),
.enable_bit = EN_OCPI_CK,
};
static struct omap1_clk tc1_ck = {
.hw.init = CLK_HW_INIT("tc1_ck", "tc_ck", &omap1_clk_gate_ops, 0),
.ops = &clkops_generic,
.enable_reg = OMAP1_IO_ADDRESS(ARM_IDLECT3),
.enable_bit = EN_TC1_CK,
};
/*
* FIXME: This clock seems to be necessary but no-one has asked for its
* activation. [ pm.c (SRAM), CCP, Camera ]
*/
static struct omap1_clk tc2_ck = {
.hw.init = CLK_HW_INIT("tc2_ck", "tc_ck", &omap1_clk_gate_ops, CLK_IS_CRITICAL),
.ops = &clkops_generic,
.enable_reg = OMAP1_IO_ADDRESS(ARM_IDLECT3),
.enable_bit = EN_TC2_CK,
};
static struct omap1_clk dma_ck = {
/* No-idle controlled by "tc_ck" */
.hw.init = CLK_HW_INIT("dma_ck", "tc_ck", &omap1_clk_null_ops, 0),
};
static struct omap1_clk dma_lcdfree_ck = {
.hw.init = CLK_HW_INIT("dma_lcdfree_ck", "tc_ck", &omap1_clk_null_ops, 0),
};
static struct arm_idlect1_clk api_ck = {
.clk = {
.hw.init = CLK_HW_INIT("api_ck", "tc_ck", &omap1_clk_gate_ops, 0),
.ops = &clkops_generic,
.flags = CLOCK_IDLE_CONTROL,
.enable_reg = OMAP1_IO_ADDRESS(ARM_IDLECT2),
.enable_bit = EN_APICK,
},
.idlect_shift = IDLAPI_ARM_SHIFT,
};
static struct arm_idlect1_clk lb_ck = {
.clk = {
.hw.init = CLK_HW_INIT("lb_ck", "tc_ck", &omap1_clk_gate_ops, 0),
.ops = &clkops_generic,
.flags = CLOCK_IDLE_CONTROL,
.enable_reg = OMAP1_IO_ADDRESS(ARM_IDLECT2),
.enable_bit = EN_LBCK,
},
.idlect_shift = IDLLB_ARM_SHIFT,
};
static struct omap1_clk rhea1_ck = {
.hw.init = CLK_HW_INIT("rhea1_ck", "tc_ck", &omap1_clk_null_ops, 0),
};
static struct omap1_clk rhea2_ck = {
.hw.init = CLK_HW_INIT("rhea2_ck", "tc_ck", &omap1_clk_null_ops, 0),
};
static struct omap1_clk lcd_ck_16xx = {
.hw.init = CLK_HW_INIT("lcd_ck", "ck_dpll1", &omap1_clk_full_ops, 0),
.ops = &clkops_generic,
.enable_reg = OMAP1_IO_ADDRESS(ARM_IDLECT2),
.enable_bit = EN_LCDCK,
.rate_offset = CKCTL_LCDDIV_OFFSET,
.recalc = &omap1_ckctl_recalc,
.round_rate = omap1_clk_round_rate_ckctl_arm,
.set_rate = omap1_clk_set_rate_ckctl_arm,
};
static struct arm_idlect1_clk lcd_ck_1510 = {
.clk = {
.hw.init = CLK_HW_INIT("lcd_ck", "ck_dpll1", &omap1_clk_full_ops, 0),
.ops = &clkops_generic,
.flags = CLOCK_IDLE_CONTROL,
.enable_reg = OMAP1_IO_ADDRESS(ARM_IDLECT2),
.enable_bit = EN_LCDCK,
.rate_offset = CKCTL_LCDDIV_OFFSET,
.recalc = &omap1_ckctl_recalc,
.round_rate = omap1_clk_round_rate_ckctl_arm,
.set_rate = omap1_clk_set_rate_ckctl_arm,
},
.idlect_shift = OMAP1510_IDLLCD_ARM_SHIFT,
};
/*
* XXX The enable_bit here is misused - it simply switches between 12MHz
* and 48MHz. Reimplement with clk_mux.
*
* XXX does this need SYSC register handling?
*/
static struct omap1_clk uart1_1510 = {
/* Direct from ULPD, no real parent */
.hw.init = CLK_HW_INIT("uart1_ck", "armper_ck", &omap1_clk_full_ops, 0),
.flags = ENABLE_REG_32BIT | CLOCK_NO_IDLE_PARENT,
.enable_reg = OMAP1_IO_ADDRESS(MOD_CONF_CTRL_0),
.enable_bit = CONF_MOD_UART1_CLK_MODE_R,
.round_rate = &omap1_round_uart_rate,
.set_rate = &omap1_set_uart_rate,
.recalc = &omap1_uart_recalc,
};
/*
* XXX The enable_bit here is misused - it simply switches between 12MHz
* and 48MHz. Reimplement with clk_mux.
*
* XXX SYSC register handling does not belong in the clock framework
*/
static struct uart_clk uart1_16xx = {
.clk = {
.ops = &clkops_uart_16xx,
/* Direct from ULPD, no real parent */
.hw.init = CLK_HW_INIT("uart1_ck", "armper_ck", &omap1_clk_full_ops, 0),
.rate = 48000000,
.flags = ENABLE_REG_32BIT | CLOCK_NO_IDLE_PARENT,
.enable_reg = OMAP1_IO_ADDRESS(MOD_CONF_CTRL_0),
.enable_bit = CONF_MOD_UART1_CLK_MODE_R,
},
.sysc_addr = 0xfffb0054,
};
/*
* XXX The enable_bit here is misused - it simply switches between 12MHz
* and 48MHz. Reimplement with clk_mux.
*
* XXX does this need SYSC register handling?
*/
static struct omap1_clk uart2_ck = {
/* Direct from ULPD, no real parent */
.hw.init = CLK_HW_INIT("uart2_ck", "armper_ck", &omap1_clk_full_ops, 0),
.flags = ENABLE_REG_32BIT | CLOCK_NO_IDLE_PARENT,
.enable_reg = OMAP1_IO_ADDRESS(MOD_CONF_CTRL_0),
.enable_bit = CONF_MOD_UART2_CLK_MODE_R,
.round_rate = &omap1_round_uart_rate,
.set_rate = &omap1_set_uart_rate,
.recalc = &omap1_uart_recalc,
};
/*
* XXX The enable_bit here is misused - it simply switches between 12MHz
* and 48MHz. Reimplement with clk_mux.
*
* XXX does this need SYSC register handling?
*/
static struct omap1_clk uart3_1510 = {
/* Direct from ULPD, no real parent */
.hw.init = CLK_HW_INIT("uart3_ck", "armper_ck", &omap1_clk_full_ops, 0),
.flags = ENABLE_REG_32BIT | CLOCK_NO_IDLE_PARENT,
.enable_reg = OMAP1_IO_ADDRESS(MOD_CONF_CTRL_0),
.enable_bit = CONF_MOD_UART3_CLK_MODE_R,
.round_rate = &omap1_round_uart_rate,
.set_rate = &omap1_set_uart_rate,
.recalc = &omap1_uart_recalc,
};
/*
* XXX The enable_bit here is misused - it simply switches between 12MHz
* and 48MHz. Reimplement with clk_mux.
*
* XXX SYSC register handling does not belong in the clock framework
*/
static struct uart_clk uart3_16xx = {
.clk = {
.ops = &clkops_uart_16xx,
/* Direct from ULPD, no real parent */
.hw.init = CLK_HW_INIT("uart3_ck", "armper_ck", &omap1_clk_full_ops, 0),
.rate = 48000000,
.flags = ENABLE_REG_32BIT | CLOCK_NO_IDLE_PARENT,
.enable_reg = OMAP1_IO_ADDRESS(MOD_CONF_CTRL_0),
.enable_bit = CONF_MOD_UART3_CLK_MODE_R,
},
.sysc_addr = 0xfffb9854,
};
static struct omap1_clk usb_clko = { /* 6 MHz output on W4_USB_CLKO */
.ops = &clkops_generic,
/* Direct from ULPD, no parent */
.hw.init = CLK_HW_INIT_NO_PARENT("usb_clko", &omap1_clk_full_ops, 0),
.rate = 6000000,
.flags = ENABLE_REG_32BIT,
.enable_reg = OMAP1_IO_ADDRESS(ULPD_CLOCK_CTRL),
.enable_bit = USB_MCLK_EN_BIT,
};
static struct omap1_clk usb_hhc_ck1510 = {
.ops = &clkops_generic,
/* Direct from ULPD, no parent */
.hw.init = CLK_HW_INIT_NO_PARENT("usb_hhc_ck", &omap1_clk_full_ops, 0),
.rate = 48000000, /* Actually 2 clocks, 12MHz and 48MHz */
.flags = ENABLE_REG_32BIT,
.enable_reg = OMAP1_IO_ADDRESS(MOD_CONF_CTRL_0),
.enable_bit = USB_HOST_HHC_UHOST_EN,
};
static struct omap1_clk usb_hhc_ck16xx = {
.ops = &clkops_generic,
/* Direct from ULPD, no parent */
.hw.init = CLK_HW_INIT_NO_PARENT("usb_hhc_ck", &omap1_clk_full_ops, 0),
.rate = 48000000,
/* OTG_SYSCON_2.OTG_PADEN == 0 (not 1510-compatible) */
.flags = ENABLE_REG_32BIT,
.enable_reg = OMAP1_IO_ADDRESS(OTG_BASE + 0x08), /* OTG_SYSCON_2 */
.enable_bit = OTG_SYSCON_2_UHOST_EN_SHIFT
};
static struct omap1_clk usb_dc_ck = {
.ops = &clkops_generic,
/* Direct from ULPD, no parent */
.hw.init = CLK_HW_INIT_NO_PARENT("usb_dc_ck", &omap1_clk_full_ops, 0),
.rate = 48000000,
.enable_reg = OMAP1_IO_ADDRESS(SOFT_REQ_REG),
.enable_bit = SOFT_USB_OTG_DPLL_REQ_SHIFT,
};
static struct omap1_clk uart1_7xx = {
.ops = &clkops_generic,
/* Direct from ULPD, no parent */
.hw.init = CLK_HW_INIT_NO_PARENT("uart1_ck", &omap1_clk_full_ops, 0),
.rate = 12000000,
.enable_reg = OMAP1_IO_ADDRESS(SOFT_REQ_REG),
.enable_bit = 9,
};
static struct omap1_clk uart2_7xx = {
.ops = &clkops_generic,
/* Direct from ULPD, no parent */
.hw.init = CLK_HW_INIT_NO_PARENT("uart2_ck", &omap1_clk_full_ops, 0),
.rate = 12000000,
.enable_reg = OMAP1_IO_ADDRESS(SOFT_REQ_REG),
.enable_bit = 11,
};
static struct omap1_clk mclk_1510 = {
.ops = &clkops_generic,
/* Direct from ULPD, no parent. May be enabled by ext hardware. */
.hw.init = CLK_HW_INIT_NO_PARENT("mclk", &omap1_clk_full_ops, 0),
.rate = 12000000,
.enable_reg = OMAP1_IO_ADDRESS(SOFT_REQ_REG),
.enable_bit = SOFT_COM_MCKO_REQ_SHIFT,
};
static struct omap1_clk mclk_16xx = {
.ops = &clkops_generic,
/* Direct from ULPD, no parent. May be enabled by ext hardware. */
.hw.init = CLK_HW_INIT_NO_PARENT("mclk", &omap1_clk_full_ops, 0),
.enable_reg = OMAP1_IO_ADDRESS(COM_CLK_DIV_CTRL_SEL),
.enable_bit = COM_ULPD_PLL_CLK_REQ,
.set_rate = &omap1_set_ext_clk_rate,
.round_rate = &omap1_round_ext_clk_rate,
.init = &omap1_init_ext_clk,
};
static struct omap1_clk bclk_1510 = {
/* Direct from ULPD, no parent. May be enabled by ext hardware. */
.hw.init = CLK_HW_INIT_NO_PARENT("bclk", &omap1_clk_rate_ops, 0),
.rate = 12000000,
};
static struct omap1_clk bclk_16xx = {
.ops = &clkops_generic,
/* Direct from ULPD, no parent. May be enabled by ext hardware. */
.hw.init = CLK_HW_INIT_NO_PARENT("bclk", &omap1_clk_full_ops, 0),
.enable_reg = OMAP1_IO_ADDRESS(SWD_CLK_DIV_CTRL_SEL),
.enable_bit = SWD_ULPD_PLL_CLK_REQ,
.set_rate = &omap1_set_ext_clk_rate,
.round_rate = &omap1_round_ext_clk_rate,
.init = &omap1_init_ext_clk,
};
static struct omap1_clk mmc1_ck = {
.ops = &clkops_generic,
/* Functional clock is direct from ULPD, interface clock is ARMPER */
.hw.init = CLK_HW_INIT("mmc1_ck", "armper_ck", &omap1_clk_full_ops, 0),
.rate = 48000000,
.flags = ENABLE_REG_32BIT | CLOCK_NO_IDLE_PARENT,
.enable_reg = OMAP1_IO_ADDRESS(MOD_CONF_CTRL_0),
.enable_bit = CONF_MOD_MMC_SD_CLK_REQ_R,
};
/*
* XXX MOD_CONF_CTRL_0 bit 20 is defined in the 1510 TRM as
* CONF_MOD_MCBSP3_AUXON ??
*/
static struct omap1_clk mmc2_ck = {
.ops = &clkops_generic,
/* Functional clock is direct from ULPD, interface clock is ARMPER */
.hw.init = CLK_HW_INIT("mmc2_ck", "armper_ck", &omap1_clk_full_ops, 0),
.rate = 48000000,
.flags = ENABLE_REG_32BIT | CLOCK_NO_IDLE_PARENT,
.enable_reg = OMAP1_IO_ADDRESS(MOD_CONF_CTRL_0),
.enable_bit = 20,
};
static struct omap1_clk mmc3_ck = {
.ops = &clkops_generic,
/* Functional clock is direct from ULPD, interface clock is ARMPER */
.hw.init = CLK_HW_INIT("mmc3_ck", "armper_ck", &omap1_clk_full_ops, 0),
.rate = 48000000,
.flags = ENABLE_REG_32BIT | CLOCK_NO_IDLE_PARENT,
.enable_reg = OMAP1_IO_ADDRESS(SOFT_REQ_REG),
.enable_bit = SOFT_MMC_DPLL_REQ_SHIFT,
};
static struct omap1_clk virtual_ck_mpu = {
/* Is smarter alias for arm_ck */
.hw.init = CLK_HW_INIT("mpu", "arm_ck", &omap1_clk_rate_ops, 0),
.recalc = &followparent_recalc,
.set_rate = &omap1_select_table_rate,
.round_rate = &omap1_round_to_table_rate,
};
/* virtual functional clock domain for I2C. Just for making sure that ARMXOR_CK
remains active during MPU idle whenever this is enabled */
static struct omap1_clk i2c_fck = {
.hw.init = CLK_HW_INIT("i2c_fck", "armxor_ck", &omap1_clk_gate_ops, 0),
.flags = CLOCK_NO_IDLE_PARENT,
};
static struct omap1_clk i2c_ick = {
.hw.init = CLK_HW_INIT("i2c_ick", "armper_ck", &omap1_clk_gate_ops, 0),
.flags = CLOCK_NO_IDLE_PARENT,
};
/*
* clkdev integration
*/
static struct omap_clk omap_clks[] = {
/* non-ULPD clocks */
CLK(NULL, "ck_ref", &ck_ref.hw, CK_16XX | CK_1510 | CK_310 | CK_7XX),
CLK(NULL, "ck_dpll1", &ck_dpll1.hw, CK_16XX | CK_1510 | CK_310 | CK_7XX),
/* CK_GEN1 clocks */
CLK(NULL, "ck_dpll1out", &ck_dpll1out.clk.hw, CK_16XX),
CLK(NULL, "ck_sossi", &sossi_ck.hw, CK_16XX),
CLK(NULL, "arm_ck", &arm_ck.hw, CK_16XX | CK_1510 | CK_310),
CLK(NULL, "armper_ck", &armper_ck.clk.hw, CK_16XX | CK_1510 | CK_310),
CLK("omap_gpio.0", "ick", &arm_gpio_ck.hw, CK_1510 | CK_310),
CLK(NULL, "armxor_ck", &armxor_ck.clk.hw, CK_16XX | CK_1510 | CK_310 | CK_7XX),
CLK(NULL, "armtim_ck", &armtim_ck.clk.hw, CK_16XX | CK_1510 | CK_310),
CLK("omap_wdt", "fck", &armwdt_ck.clk.hw, CK_16XX | CK_1510 | CK_310),
CLK("omap_wdt", "ick", &armper_ck.clk.hw, CK_16XX),
CLK("omap_wdt", "ick", &dummy_ck.hw, CK_1510 | CK_310),
CLK(NULL, "arminth_ck", &arminth_ck1510.hw, CK_1510 | CK_310),
CLK(NULL, "arminth_ck", &arminth_ck16xx.hw, CK_16XX),
/* CK_GEN2 clocks */
CLK(NULL, "dsp_ck", &dsp_ck.hw, CK_16XX | CK_1510 | CK_310),
CLK(NULL, "dspmmu_ck", &dspmmu_ck.hw, CK_16XX | CK_1510 | CK_310),
CLK(NULL, "dspper_ck", &dspper_ck.hw, CK_16XX | CK_1510 | CK_310),
CLK(NULL, "dspxor_ck", &dspxor_ck.hw, CK_16XX | CK_1510 | CK_310),
CLK(NULL, "dsptim_ck", &dsptim_ck.hw, CK_16XX | CK_1510 | CK_310),
/* CK_GEN3 clocks */
CLK(NULL, "tc_ck", &tc_ck.clk.hw, CK_16XX | CK_1510 | CK_310 | CK_7XX),
CLK(NULL, "tipb_ck", &tipb_ck.hw, CK_1510 | CK_310),
CLK(NULL, "l3_ocpi_ck", &l3_ocpi_ck.hw, CK_16XX | CK_7XX),
CLK(NULL, "tc1_ck", &tc1_ck.hw, CK_16XX),
CLK(NULL, "tc2_ck", &tc2_ck.hw, CK_16XX),
CLK(NULL, "dma_ck", &dma_ck.hw, CK_16XX | CK_1510 | CK_310),
CLK(NULL, "dma_lcdfree_ck", &dma_lcdfree_ck.hw, CK_16XX),
CLK(NULL, "api_ck", &api_ck.clk.hw, CK_16XX | CK_1510 | CK_310 | CK_7XX),
CLK(NULL, "lb_ck", &lb_ck.clk.hw, CK_1510 | CK_310),
CLK(NULL, "rhea1_ck", &rhea1_ck.hw, CK_16XX),
CLK(NULL, "rhea2_ck", &rhea2_ck.hw, CK_16XX),
CLK(NULL, "lcd_ck", &lcd_ck_16xx.hw, CK_16XX | CK_7XX),
CLK(NULL, "lcd_ck", &lcd_ck_1510.clk.hw, CK_1510 | CK_310),
/* ULPD clocks */
CLK(NULL, "uart1_ck", &uart1_1510.hw, CK_1510 | CK_310),
CLK(NULL, "uart1_ck", &uart1_16xx.clk.hw, CK_16XX),
CLK(NULL, "uart1_ck", &uart1_7xx.hw, CK_7XX),
CLK(NULL, "uart2_ck", &uart2_ck.hw, CK_16XX | CK_1510 | CK_310),
CLK(NULL, "uart2_ck", &uart2_7xx.hw, CK_7XX),
CLK(NULL, "uart3_ck", &uart3_1510.hw, CK_1510 | CK_310),
CLK(NULL, "uart3_ck", &uart3_16xx.clk.hw, CK_16XX),
CLK(NULL, "usb_clko", &usb_clko.hw, CK_16XX | CK_1510 | CK_310),
CLK(NULL, "usb_hhc_ck", &usb_hhc_ck1510.hw, CK_1510 | CK_310),
CLK(NULL, "usb_hhc_ck", &usb_hhc_ck16xx.hw, CK_16XX),
CLK(NULL, "usb_dc_ck", &usb_dc_ck.hw, CK_16XX | CK_7XX),
CLK(NULL, "mclk", &mclk_1510.hw, CK_1510 | CK_310),
CLK(NULL, "mclk", &mclk_16xx.hw, CK_16XX),
CLK(NULL, "bclk", &bclk_1510.hw, CK_1510 | CK_310),
CLK(NULL, "bclk", &bclk_16xx.hw, CK_16XX),
CLK("mmci-omap.0", "fck", &mmc1_ck.hw, CK_16XX | CK_1510 | CK_310),
CLK("mmci-omap.0", "fck", &mmc3_ck.hw, CK_7XX),
CLK("mmci-omap.0", "ick", &armper_ck.clk.hw, CK_16XX | CK_1510 | CK_310 | CK_7XX),
CLK("mmci-omap.1", "fck", &mmc2_ck.hw, CK_16XX),
CLK("mmci-omap.1", "ick", &armper_ck.clk.hw, CK_16XX),
/* Virtual clocks */
CLK(NULL, "mpu", &virtual_ck_mpu.hw, CK_16XX | CK_1510 | CK_310),
CLK("omap_i2c.1", "fck", &i2c_fck.hw, CK_16XX | CK_1510 | CK_310 | CK_7XX),
CLK("omap_i2c.1", "ick", &i2c_ick.hw, CK_16XX),
CLK("omap_i2c.1", "ick", &dummy_ck.hw, CK_1510 | CK_310 | CK_7XX),
CLK("omap1_spi100k.1", "fck", &dummy_ck.hw, CK_7XX),
CLK("omap1_spi100k.1", "ick", &dummy_ck.hw, CK_7XX),
CLK("omap1_spi100k.2", "fck", &dummy_ck.hw, CK_7XX),
CLK("omap1_spi100k.2", "ick", &dummy_ck.hw, CK_7XX),
CLK("omap_uwire", "fck", &armxor_ck.clk.hw, CK_16XX | CK_1510 | CK_310),
CLK("omap-mcbsp.1", "ick", &dspper_ck.hw, CK_16XX),
CLK("omap-mcbsp.1", "ick", &dummy_ck.hw, CK_1510 | CK_310),
CLK("omap-mcbsp.2", "ick", &armper_ck.clk.hw, CK_16XX),
CLK("omap-mcbsp.2", "ick", &dummy_ck.hw, CK_1510 | CK_310),
CLK("omap-mcbsp.3", "ick", &dspper_ck.hw, CK_16XX),
CLK("omap-mcbsp.3", "ick", &dummy_ck.hw, CK_1510 | CK_310),
CLK("omap-mcbsp.1", "fck", &dspxor_ck.hw, CK_16XX | CK_1510 | CK_310),
CLK("omap-mcbsp.2", "fck", &armper_ck.clk.hw, CK_16XX | CK_1510 | CK_310),
CLK("omap-mcbsp.3", "fck", &dspxor_ck.hw, CK_16XX | CK_1510 | CK_310),
};
/*
* init
*/
static void __init omap1_show_rates(void)
{
pr_notice("Clocking rate (xtal/DPLL1/MPU): %ld.%01ld/%ld.%01ld/%ld.%01ld MHz\n",
ck_ref.rate / 1000000, (ck_ref.rate / 100000) % 10,
ck_dpll1.rate / 1000000, (ck_dpll1.rate / 100000) % 10,
arm_ck.rate / 1000000, (arm_ck.rate / 100000) % 10);
}
u32 cpu_mask;
int __init omap1_clk_init(void)
{
struct omap_clk *c;
u32 reg;
#ifdef CONFIG_DEBUG_LL
/* Make sure UART clocks are enabled early */
if (cpu_is_omap16xx())
omap_writel(omap_readl(MOD_CONF_CTRL_0) |
CONF_MOD_UART1_CLK_MODE_R |
CONF_MOD_UART3_CLK_MODE_R, MOD_CONF_CTRL_0);
#endif
/* USB_REQ_EN will be disabled later if necessary (usb_dc_ck) */
reg = omap_readw(SOFT_REQ_REG) & (1 << 4);
omap_writew(reg, SOFT_REQ_REG);
if (!cpu_is_omap15xx())
omap_writew(0, SOFT_REQ_REG2);
/* By default all idlect1 clocks are allowed to idle */
arm_idlect1_mask = ~0;
cpu_mask = 0;
if (cpu_is_omap1710())
cpu_mask |= CK_1710;
if (cpu_is_omap16xx())
cpu_mask |= CK_16XX;
if (cpu_is_omap1510())
cpu_mask |= CK_1510;
if (cpu_is_omap310())
cpu_mask |= CK_310;
/* Pointers to these clocks are needed by code in clock.c */
api_ck_p = &api_ck.clk;
ck_dpll1_p = &ck_dpll1;
ck_ref_p = &ck_ref;
pr_info("Clocks: ARM_SYSST: 0x%04x DPLL_CTL: 0x%04x ARM_CKCTL: 0x%04x\n",
omap_readw(ARM_SYSST), omap_readw(DPLL_CTL),
omap_readw(ARM_CKCTL));
/* We want to be in synchronous scalable mode */
omap_writew(0x1000, ARM_SYSST);
/*
* Initially use the values set by bootloader. Determine PLL rate and
* recalculate dependent clocks as if kernel had changed PLL or
* divisors. See also omap1_clk_late_init() that can reprogram dpll1
* after the SRAM is initialized.
*/
{
unsigned pll_ctl_val = omap_readw(DPLL_CTL);
ck_dpll1.rate = ck_ref.rate; /* Base xtal rate */
if (pll_ctl_val & 0x10) {
/* PLL enabled, apply multiplier and divisor */
if (pll_ctl_val & 0xf80)
ck_dpll1.rate *= (pll_ctl_val & 0xf80) >> 7;
ck_dpll1.rate /= ((pll_ctl_val & 0x60) >> 5) + 1;
} else {
/* PLL disabled, apply bypass divisor */
switch (pll_ctl_val & 0xc) {
case 0:
break;
case 0x4:
ck_dpll1.rate /= 2;
break;
default:
ck_dpll1.rate /= 4;
break;
}
}
}
/* Amstrad Delta wants BCLK high when inactive */
if (machine_is_ams_delta())
omap_writel(omap_readl(ULPD_CLOCK_CTRL) |
(1 << SDW_MCLK_INV_BIT),
ULPD_CLOCK_CTRL);
/* Turn off DSP and ARM_TIMXO. Make sure ARM_INTHCK is not divided */
omap_writew(omap_readw(ARM_CKCTL) & 0x0fff, ARM_CKCTL);
/* Put DSP/MPUI into reset until needed */
omap_writew(0, ARM_RSTCT1);
omap_writew(1, ARM_RSTCT2);
omap_writew(0x400, ARM_IDLECT1);
/*
* According to OMAP5910 Erratum SYS_DMA_1, bit DMACK_REQ (bit 8)
* of the ARM_IDLECT2 register must be set to zero. The power-on
* default value of this bit is one.
*/
omap_writew(0x0000, ARM_IDLECT2); /* Turn LCD clock off also */
for (c = omap_clks; c < omap_clks + ARRAY_SIZE(omap_clks); c++) {
if (!(c->cpu & cpu_mask))
continue;
if (c->lk.clk_hw->init) { /* NULL if provider already registered */
const struct clk_init_data *init = c->lk.clk_hw->init;
const char *name = c->lk.clk_hw->init->name;
int err;
err = clk_hw_register(NULL, c->lk.clk_hw);
if (err < 0) {
pr_err("failed to register clock \"%s\"! (%d)\n", name, err);
/* may be tried again, restore init data */
c->lk.clk_hw->init = init;
continue;
}
}
clk_hw_register_clkdev(c->lk.clk_hw, c->lk.con_id, c->lk.dev_id);
}
omap1_show_rates();
return 0;
}
#define OMAP1_DPLL1_SANE_VALUE 60000000
void __init omap1_clk_late_init(void)
{
unsigned long rate = ck_dpll1.rate;
/* Find the highest supported frequency and enable it */
if (omap1_select_table_rate(&virtual_ck_mpu, ~0, arm_ck.rate)) {
pr_err("System frequencies not set, using default. Check your config.\n");
/*
* Reprogramming the DPLL is tricky, it must be done from SRAM.
*/
omap_sram_reprogram_clock(0x2290, 0x0005);
ck_dpll1.rate = OMAP1_DPLL1_SANE_VALUE;
}
propagate_rate(&ck_dpll1);
omap1_show_rates();
loops_per_jiffy = cpufreq_scale(loops_per_jiffy, rate, ck_dpll1.rate);
}
| linux-master | arch/arm/mach-omap1/clock_data.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* OMAP1/OMAP7xx - specific DMA driver
*
* Copyright (C) 2003 - 2008 Nokia Corporation
* Author: Juha Yrjölä <[email protected]>
* DMA channel linking for 1610 by Samuel Ortiz <[email protected]>
* Graphics DMA and LCD DMA graphics tranformations
* by Imre Deak <[email protected]>
* OMAP2/3 support Copyright (C) 2004-2007 Texas Instruments, Inc.
* Some functions based on earlier dma-omap.c Copyright (C) 2001 RidgeRun, Inc.
*
* Copyright (C) 2010 Texas Instruments Incorporated - https://www.ti.com/
* Converted DMA library into platform driver
* - G, Manjunath Kondaiah <[email protected]>
*/
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/omap-dma.h>
#include "tc.h"
#include "soc.h"
#define OMAP1_DMA_BASE (0xfffed800)
static u32 enable_1510_mode;
static const struct omap_dma_reg reg_map[] = {
[GCR] = { 0x0400, 0x00, OMAP_DMA_REG_16BIT },
[GSCR] = { 0x0404, 0x00, OMAP_DMA_REG_16BIT },
[GRST1] = { 0x0408, 0x00, OMAP_DMA_REG_16BIT },
[HW_ID] = { 0x0442, 0x00, OMAP_DMA_REG_16BIT },
[PCH2_ID] = { 0x0444, 0x00, OMAP_DMA_REG_16BIT },
[PCH0_ID] = { 0x0446, 0x00, OMAP_DMA_REG_16BIT },
[PCH1_ID] = { 0x0448, 0x00, OMAP_DMA_REG_16BIT },
[PCHG_ID] = { 0x044a, 0x00, OMAP_DMA_REG_16BIT },
[PCHD_ID] = { 0x044c, 0x00, OMAP_DMA_REG_16BIT },
[CAPS_0] = { 0x044e, 0x00, OMAP_DMA_REG_2X16BIT },
[CAPS_1] = { 0x0452, 0x00, OMAP_DMA_REG_2X16BIT },
[CAPS_2] = { 0x0456, 0x00, OMAP_DMA_REG_16BIT },
[CAPS_3] = { 0x0458, 0x00, OMAP_DMA_REG_16BIT },
[CAPS_4] = { 0x045a, 0x00, OMAP_DMA_REG_16BIT },
[PCH2_SR] = { 0x0460, 0x00, OMAP_DMA_REG_16BIT },
[PCH0_SR] = { 0x0480, 0x00, OMAP_DMA_REG_16BIT },
[PCH1_SR] = { 0x0482, 0x00, OMAP_DMA_REG_16BIT },
[PCHD_SR] = { 0x04c0, 0x00, OMAP_DMA_REG_16BIT },
/* Common Registers */
[CSDP] = { 0x0000, 0x40, OMAP_DMA_REG_16BIT },
[CCR] = { 0x0002, 0x40, OMAP_DMA_REG_16BIT },
[CICR] = { 0x0004, 0x40, OMAP_DMA_REG_16BIT },
[CSR] = { 0x0006, 0x40, OMAP_DMA_REG_16BIT },
[CEN] = { 0x0010, 0x40, OMAP_DMA_REG_16BIT },
[CFN] = { 0x0012, 0x40, OMAP_DMA_REG_16BIT },
[CSFI] = { 0x0014, 0x40, OMAP_DMA_REG_16BIT },
[CSEI] = { 0x0016, 0x40, OMAP_DMA_REG_16BIT },
[CPC] = { 0x0018, 0x40, OMAP_DMA_REG_16BIT }, /* 15xx only */
[CSAC] = { 0x0018, 0x40, OMAP_DMA_REG_16BIT },
[CDAC] = { 0x001a, 0x40, OMAP_DMA_REG_16BIT },
[CDEI] = { 0x001c, 0x40, OMAP_DMA_REG_16BIT },
[CDFI] = { 0x001e, 0x40, OMAP_DMA_REG_16BIT },
[CLNK_CTRL] = { 0x0028, 0x40, OMAP_DMA_REG_16BIT },
/* Channel specific register offsets */
[CSSA] = { 0x0008, 0x40, OMAP_DMA_REG_2X16BIT },
[CDSA] = { 0x000c, 0x40, OMAP_DMA_REG_2X16BIT },
[COLOR] = { 0x0020, 0x40, OMAP_DMA_REG_2X16BIT },
[CCR2] = { 0x0024, 0x40, OMAP_DMA_REG_16BIT },
[LCH_CTRL] = { 0x002a, 0x40, OMAP_DMA_REG_16BIT },
};
static struct resource res[] __initdata = {
[0] = {
.start = OMAP1_DMA_BASE,
.end = OMAP1_DMA_BASE + SZ_2K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.name = "0",
.start = INT_DMA_CH0_6,
.flags = IORESOURCE_IRQ,
},
[2] = {
.name = "1",
.start = INT_DMA_CH1_7,
.flags = IORESOURCE_IRQ,
},
[3] = {
.name = "2",
.start = INT_DMA_CH2_8,
.flags = IORESOURCE_IRQ,
},
[4] = {
.name = "3",
.start = INT_DMA_CH3,
.flags = IORESOURCE_IRQ,
},
[5] = {
.name = "4",
.start = INT_DMA_CH4,
.flags = IORESOURCE_IRQ,
},
[6] = {
.name = "5",
.start = INT_DMA_CH5,
.flags = IORESOURCE_IRQ,
},
/* Handled in lcd_dma.c */
[7] = {
.name = "6",
.start = INT_1610_DMA_CH6,
.flags = IORESOURCE_IRQ,
},
/* irq's for omap16xx and omap7xx */
[8] = {
.name = "7",
.start = INT_1610_DMA_CH7,
.flags = IORESOURCE_IRQ,
},
[9] = {
.name = "8",
.start = INT_1610_DMA_CH8,
.flags = IORESOURCE_IRQ,
},
[10] = {
.name = "9",
.start = INT_1610_DMA_CH9,
.flags = IORESOURCE_IRQ,
},
[11] = {
.name = "10",
.start = INT_1610_DMA_CH10,
.flags = IORESOURCE_IRQ,
},
[12] = {
.name = "11",
.start = INT_1610_DMA_CH11,
.flags = IORESOURCE_IRQ,
},
[13] = {
.name = "12",
.start = INT_1610_DMA_CH12,
.flags = IORESOURCE_IRQ,
},
[14] = {
.name = "13",
.start = INT_1610_DMA_CH13,
.flags = IORESOURCE_IRQ,
},
[15] = {
.name = "14",
.start = INT_1610_DMA_CH14,
.flags = IORESOURCE_IRQ,
},
[16] = {
.name = "15",
.start = INT_1610_DMA_CH15,
.flags = IORESOURCE_IRQ,
},
[17] = {
.name = "16",
.start = INT_DMA_LCD,
.flags = IORESOURCE_IRQ,
},
};
static void __iomem *dma_base;
static inline void dma_write(u32 val, int reg, int lch)
{
void __iomem *addr = dma_base;
addr += reg_map[reg].offset;
addr += reg_map[reg].stride * lch;
__raw_writew(val, addr);
if (reg_map[reg].type == OMAP_DMA_REG_2X16BIT)
__raw_writew(val >> 16, addr + 2);
}
static inline u32 dma_read(int reg, int lch)
{
void __iomem *addr = dma_base;
uint32_t val;
addr += reg_map[reg].offset;
addr += reg_map[reg].stride * lch;
val = __raw_readw(addr);
if (reg_map[reg].type == OMAP_DMA_REG_2X16BIT)
val |= __raw_readw(addr + 2) << 16;
return val;
}
static void omap1_clear_lch_regs(int lch)
{
int i;
for (i = CPC; i <= COLOR; i += 1)
dma_write(0, i, lch);
}
static void omap1_clear_dma(int lch)
{
u32 l;
l = dma_read(CCR, lch);
l &= ~OMAP_DMA_CCR_EN;
dma_write(l, CCR, lch);
/* Clear pending interrupts */
l = dma_read(CSR, lch);
}
static void omap1_show_dma_caps(void)
{
if (enable_1510_mode) {
printk(KERN_INFO "DMA support for OMAP15xx initialized\n");
} else {
u16 w;
printk(KERN_INFO "OMAP DMA hardware version %d\n",
dma_read(HW_ID, 0));
printk(KERN_INFO "DMA capabilities: %08x:%08x:%04x:%04x:%04x\n",
dma_read(CAPS_0, 0), dma_read(CAPS_1, 0),
dma_read(CAPS_2, 0), dma_read(CAPS_3, 0),
dma_read(CAPS_4, 0));
/* Disable OMAP 3.0/3.1 compatibility mode. */
w = dma_read(GSCR, 0);
w |= 1 << 3;
dma_write(w, GSCR, 0);
}
}
static unsigned configure_dma_errata(void)
{
unsigned errata = 0;
/*
* Erratum 3.2/3.3: sometimes 0 is returned if CSAC/CDAC is
* read before the DMA controller finished disabling the channel.
*/
if (!cpu_is_omap15xx())
SET_DMA_ERRATA(DMA_ERRATA_3_3);
return errata;
}
static const struct platform_device_info omap_dma_dev_info = {
.name = "omap-dma-engine",
.id = -1,
.dma_mask = DMA_BIT_MASK(32),
.res = res,
.num_res = 1,
};
/* OMAP1510, OMAP1610*/
static const struct dma_slave_map omap1xxx_sdma_map[] = {
{ "omap-mcbsp.1", "tx", SDMA_FILTER_PARAM(8) },
{ "omap-mcbsp.1", "rx", SDMA_FILTER_PARAM(9) },
{ "omap-mcbsp.3", "tx", SDMA_FILTER_PARAM(10) },
{ "omap-mcbsp.3", "rx", SDMA_FILTER_PARAM(11) },
{ "omap-mcbsp.2", "tx", SDMA_FILTER_PARAM(16) },
{ "omap-mcbsp.2", "rx", SDMA_FILTER_PARAM(17) },
{ "mmci-omap.0", "tx", SDMA_FILTER_PARAM(21) },
{ "mmci-omap.0", "rx", SDMA_FILTER_PARAM(22) },
{ "omap_udc", "rx0", SDMA_FILTER_PARAM(26) },
{ "omap_udc", "rx1", SDMA_FILTER_PARAM(27) },
{ "omap_udc", "rx2", SDMA_FILTER_PARAM(28) },
{ "omap_udc", "tx0", SDMA_FILTER_PARAM(29) },
{ "omap_udc", "tx1", SDMA_FILTER_PARAM(30) },
{ "omap_udc", "tx2", SDMA_FILTER_PARAM(31) },
{ "mmci-omap.1", "tx", SDMA_FILTER_PARAM(54) },
{ "mmci-omap.1", "rx", SDMA_FILTER_PARAM(55) },
};
static struct omap_system_dma_plat_info dma_plat_info __initdata = {
.reg_map = reg_map,
.channel_stride = 0x40,
.show_dma_caps = omap1_show_dma_caps,
.clear_lch_regs = omap1_clear_lch_regs,
.clear_dma = omap1_clear_dma,
.dma_write = dma_write,
.dma_read = dma_read,
};
static int __init omap1_system_dma_init(void)
{
struct omap_system_dma_plat_info p;
struct omap_dma_dev_attr *d;
struct platform_device *pdev, *dma_pdev;
int ret;
pdev = platform_device_alloc("omap_dma_system", 0);
if (!pdev) {
pr_err("%s: Unable to device alloc for dma\n",
__func__);
return -ENOMEM;
}
dma_base = ioremap(res[0].start, resource_size(&res[0]));
if (!dma_base) {
pr_err("%s: Unable to ioremap\n", __func__);
ret = -ENODEV;
goto exit_device_put;
}
ret = platform_device_add_resources(pdev, res, ARRAY_SIZE(res));
if (ret) {
dev_err(&pdev->dev, "%s: Unable to add resources for %s%d\n",
__func__, pdev->name, pdev->id);
goto exit_iounmap;
}
d = kzalloc(sizeof(*d), GFP_KERNEL);
if (!d) {
ret = -ENOMEM;
goto exit_iounmap;
}
/* Valid attributes for omap1 plus processors */
if (cpu_is_omap15xx())
d->dev_caps = ENABLE_1510_MODE;
enable_1510_mode = d->dev_caps & ENABLE_1510_MODE;
if (cpu_is_omap16xx())
d->dev_caps = ENABLE_16XX_MODE;
d->dev_caps |= SRC_PORT;
d->dev_caps |= DST_PORT;
d->dev_caps |= SRC_INDEX;
d->dev_caps |= DST_INDEX;
d->dev_caps |= IS_BURST_ONLY4;
d->dev_caps |= CLEAR_CSR_ON_READ;
d->dev_caps |= IS_WORD_16;
/* available logical channels */
if (cpu_is_omap15xx()) {
d->lch_count = 9;
} else {
if (d->dev_caps & ENABLE_1510_MODE)
d->lch_count = 9;
else
d->lch_count = 16;
}
p = dma_plat_info;
p.dma_attr = d;
p.errata = configure_dma_errata();
p.slave_map = omap1xxx_sdma_map;
p.slavecnt = ARRAY_SIZE(omap1xxx_sdma_map);
ret = platform_device_add_data(pdev, &p, sizeof(p));
if (ret) {
dev_err(&pdev->dev, "%s: Unable to add resources for %s%d\n",
__func__, pdev->name, pdev->id);
goto exit_release_d;
}
ret = platform_device_add(pdev);
if (ret) {
dev_err(&pdev->dev, "%s: Unable to add resources for %s%d\n",
__func__, pdev->name, pdev->id);
goto exit_release_d;
}
dma_pdev = platform_device_register_full(&omap_dma_dev_info);
if (IS_ERR(dma_pdev)) {
ret = PTR_ERR(dma_pdev);
goto exit_release_pdev;
}
return ret;
exit_release_pdev:
platform_device_del(pdev);
exit_release_d:
kfree(d);
exit_iounmap:
iounmap(dma_base);
exit_device_put:
platform_device_put(pdev);
return ret;
}
arch_initcall(omap1_system_dma_init);
| linux-master | arch/arm/mach-omap1/dma.c |
/*
* linux/arch/arm/mach-omap1/pm.c
*
* OMAP Power Management Routines
*
* Original code for the SA11x0:
* Copyright (c) 2001 Cliff Brake <[email protected]>
*
* Modified for the PXA250 by Nicolas Pitre:
* Copyright (c) 2002 Monta Vista Software, Inc.
*
* Modified for the OMAP1510 by David Singleton:
* Copyright (c) 2002 Monta Vista Software, Inc.
*
* Cleanup 2004 for OMAP1510/1610 by Dirk Behme <[email protected]>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
* NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/suspend.h>
#include <linux/sched.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/interrupt.h>
#include <linux/sysfs.h>
#include <linux/module.h>
#include <linux/io.h>
#include <linux/atomic.h>
#include <linux/cpu.h>
#include <asm/fncpy.h>
#include <asm/system_misc.h>
#include <asm/irq.h>
#include <asm/mach/time.h>
#include <asm/mach/irq.h>
#include <linux/soc/ti/omap1-io.h>
#include "tc.h"
#include <linux/omap-dma.h>
#include <clocksource/timer-ti-dm.h>
#include "hardware.h"
#include "mux.h"
#include "irqs.h"
#include "iomap.h"
#include "clock.h"
#include "pm.h"
#include "soc.h"
#include "sram.h"
static unsigned int arm_sleep_save[ARM_SLEEP_SAVE_SIZE];
static unsigned short dsp_sleep_save[DSP_SLEEP_SAVE_SIZE];
static unsigned short ulpd_sleep_save[ULPD_SLEEP_SAVE_SIZE];
static unsigned int mpui1510_sleep_save[MPUI1510_SLEEP_SAVE_SIZE];
static unsigned int mpui1610_sleep_save[MPUI1610_SLEEP_SAVE_SIZE];
static unsigned short enable_dyn_sleep;
static ssize_t idle_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
return sprintf(buf, "%hu\n", enable_dyn_sleep);
}
static ssize_t idle_store(struct kobject *kobj, struct kobj_attribute *attr,
const char * buf, size_t n)
{
unsigned short value;
if (sscanf(buf, "%hu", &value) != 1 ||
(value != 0 && value != 1) ||
(value != 0 && !IS_ENABLED(CONFIG_OMAP_32K_TIMER))) {
pr_err("idle_sleep_store: Invalid value\n");
return -EINVAL;
}
enable_dyn_sleep = value;
return n;
}
static struct kobj_attribute sleep_while_idle_attr =
__ATTR(sleep_while_idle, 0644, idle_show, idle_store);
static void (*omap_sram_suspend)(unsigned long r0, unsigned long r1) = NULL;
/*
* Let's power down on idle, but only if we are really
* idle, because once we start down the path of
* going idle we continue to do idle even if we get
* a clock tick interrupt . .
*/
void omap1_pm_idle(void)
{
extern __u32 arm_idlect1_mask;
__u32 use_idlect1 = arm_idlect1_mask;
local_fiq_disable();
#if defined(CONFIG_OMAP_MPU_TIMER) && !defined(CONFIG_OMAP_DM_TIMER)
use_idlect1 = use_idlect1 & ~(1 << 9);
#endif
#ifdef CONFIG_OMAP_DM_TIMER
use_idlect1 = omap_dm_timer_modify_idlect_mask(use_idlect1);
#endif
if (omap_dma_running())
use_idlect1 &= ~(1 << 6);
/*
* We should be able to remove the do_sleep variable and multiple
* tests above as soon as drivers, timer and DMA code have been fixed.
* Even the sleep block count should become obsolete.
*/
if ((use_idlect1 != ~0) || !enable_dyn_sleep) {
__u32 saved_idlect1 = omap_readl(ARM_IDLECT1);
if (cpu_is_omap15xx())
use_idlect1 &= OMAP1510_BIG_SLEEP_REQUEST;
else
use_idlect1 &= OMAP1610_IDLECT1_SLEEP_VAL;
omap_writel(use_idlect1, ARM_IDLECT1);
__asm__ volatile ("mcr p15, 0, r0, c7, c0, 4");
omap_writel(saved_idlect1, ARM_IDLECT1);
local_fiq_enable();
return;
}
omap_sram_suspend(omap_readl(ARM_IDLECT1),
omap_readl(ARM_IDLECT2));
local_fiq_enable();
}
/*
* Configuration of the wakeup event is board specific. For the
* moment we put it into this helper function. Later it may move
* to board specific files.
*/
static void omap_pm_wakeup_setup(void)
{
u32 level1_wake = 0;
u32 level2_wake = OMAP_IRQ_BIT(INT_UART2);
/*
* Turn off all interrupts except GPIO bank 1, L1-2nd level cascade,
* and the L2 wakeup interrupts: keypad and UART2. Note that the
* drivers must still separately call omap_set_gpio_wakeup() to
* wake up to a GPIO interrupt.
*/
if (cpu_is_omap15xx())
level1_wake = OMAP_IRQ_BIT(INT_GPIO_BANK1) |
OMAP_IRQ_BIT(INT_1510_IH2_IRQ);
else if (cpu_is_omap16xx())
level1_wake = OMAP_IRQ_BIT(INT_GPIO_BANK1) |
OMAP_IRQ_BIT(INT_1610_IH2_IRQ);
omap_writel(~level1_wake, OMAP_IH1_MIR);
if (cpu_is_omap15xx()) {
level2_wake |= OMAP_IRQ_BIT(INT_KEYBOARD);
omap_writel(~level2_wake, OMAP_IH2_MIR);
} else if (cpu_is_omap16xx()) {
level2_wake |= OMAP_IRQ_BIT(INT_KEYBOARD);
omap_writel(~level2_wake, OMAP_IH2_0_MIR);
/* INT_1610_WAKE_UP_REQ is needed for GPIO wakeup... */
omap_writel(~OMAP_IRQ_BIT(INT_1610_WAKE_UP_REQ),
OMAP_IH2_1_MIR);
omap_writel(~0x0, OMAP_IH2_2_MIR);
omap_writel(~0x0, OMAP_IH2_3_MIR);
}
/* New IRQ agreement, recalculate in cascade order */
omap_writel(1, OMAP_IH2_CONTROL);
omap_writel(1, OMAP_IH1_CONTROL);
}
#define EN_DSPCK 13 /* ARM_CKCTL */
#define EN_APICK 6 /* ARM_IDLECT2 */
#define DSP_EN 1 /* ARM_RSTCT1 */
void omap1_pm_suspend(void)
{
unsigned long arg0 = 0, arg1 = 0;
printk(KERN_INFO "PM: OMAP%x is trying to enter deep sleep...\n",
omap_rev());
omap_serial_wake_trigger(1);
if (!cpu_is_omap15xx())
omap_writew(0xffff, ULPD_SOFT_DISABLE_REQ_REG);
/*
* Step 1: turn off interrupts (FIXME: NOTE: already disabled)
*/
local_irq_disable();
local_fiq_disable();
/*
* Step 2: save registers
*
* The omap is a strange/beautiful device. The caches, memory
* and register state are preserved across power saves.
* We have to save and restore very little register state to
* idle the omap.
*
* Save interrupt, MPUI, ARM and UPLD control registers.
*/
if (cpu_is_omap15xx()) {
MPUI1510_SAVE(OMAP_IH1_MIR);
MPUI1510_SAVE(OMAP_IH2_MIR);
MPUI1510_SAVE(MPUI_CTRL);
MPUI1510_SAVE(MPUI_DSP_BOOT_CONFIG);
MPUI1510_SAVE(MPUI_DSP_API_CONFIG);
MPUI1510_SAVE(EMIFS_CONFIG);
MPUI1510_SAVE(EMIFF_SDRAM_CONFIG);
} else if (cpu_is_omap16xx()) {
MPUI1610_SAVE(OMAP_IH1_MIR);
MPUI1610_SAVE(OMAP_IH2_0_MIR);
MPUI1610_SAVE(OMAP_IH2_1_MIR);
MPUI1610_SAVE(OMAP_IH2_2_MIR);
MPUI1610_SAVE(OMAP_IH2_3_MIR);
MPUI1610_SAVE(MPUI_CTRL);
MPUI1610_SAVE(MPUI_DSP_BOOT_CONFIG);
MPUI1610_SAVE(MPUI_DSP_API_CONFIG);
MPUI1610_SAVE(EMIFS_CONFIG);
MPUI1610_SAVE(EMIFF_SDRAM_CONFIG);
}
ARM_SAVE(ARM_CKCTL);
ARM_SAVE(ARM_IDLECT1);
ARM_SAVE(ARM_IDLECT2);
if (!(cpu_is_omap15xx()))
ARM_SAVE(ARM_IDLECT3);
ARM_SAVE(ARM_EWUPCT);
ARM_SAVE(ARM_RSTCT1);
ARM_SAVE(ARM_RSTCT2);
ARM_SAVE(ARM_SYSST);
ULPD_SAVE(ULPD_CLOCK_CTRL);
ULPD_SAVE(ULPD_STATUS_REQ);
/* (Step 3 removed - we now allow deep sleep by default) */
/*
* Step 4: OMAP DSP Shutdown
*/
/* stop DSP */
omap_writew(omap_readw(ARM_RSTCT1) & ~(1 << DSP_EN), ARM_RSTCT1);
/* shut down dsp_ck */
omap_writew(omap_readw(ARM_CKCTL) & ~(1 << EN_DSPCK), ARM_CKCTL);
/* temporarily enabling api_ck to access DSP registers */
omap_writew(omap_readw(ARM_IDLECT2) | 1 << EN_APICK, ARM_IDLECT2);
/* save DSP registers */
DSP_SAVE(DSP_IDLECT2);
/* Stop all DSP domain clocks */
__raw_writew(0, DSP_IDLECT2);
/*
* Step 5: Wakeup Event Setup
*/
omap_pm_wakeup_setup();
/*
* Step 6: ARM and Traffic controller shutdown
*/
/* disable ARM watchdog */
omap_writel(0x00F5, OMAP_WDT_TIMER_MODE);
omap_writel(0x00A0, OMAP_WDT_TIMER_MODE);
/*
* Step 6b: ARM and Traffic controller shutdown
*
* Step 6 continues here. Prepare jump to power management
* assembly code in internal SRAM.
*
* Since the omap_cpu_suspend routine has been copied to
* SRAM, we'll do an indirect procedure call to it and pass the
* contents of arm_idlect1 and arm_idlect2 so it can restore
* them when it wakes up and it will return.
*/
arg0 = arm_sleep_save[ARM_SLEEP_SAVE_ARM_IDLECT1];
arg1 = arm_sleep_save[ARM_SLEEP_SAVE_ARM_IDLECT2];
/*
* Step 6c: ARM and Traffic controller shutdown
*
* Jump to assembly code. The processor will stay there
* until wake up.
*/
omap_sram_suspend(arg0, arg1);
/*
* If we are here, processor is woken up!
*/
/*
* Restore DSP clocks
*/
/* again temporarily enabling api_ck to access DSP registers */
omap_writew(omap_readw(ARM_IDLECT2) | 1 << EN_APICK, ARM_IDLECT2);
/* Restore DSP domain clocks */
DSP_RESTORE(DSP_IDLECT2);
/*
* Restore ARM state, except ARM_IDLECT1/2 which omap_cpu_suspend did
*/
if (!(cpu_is_omap15xx()))
ARM_RESTORE(ARM_IDLECT3);
ARM_RESTORE(ARM_CKCTL);
ARM_RESTORE(ARM_EWUPCT);
ARM_RESTORE(ARM_RSTCT1);
ARM_RESTORE(ARM_RSTCT2);
ARM_RESTORE(ARM_SYSST);
ULPD_RESTORE(ULPD_CLOCK_CTRL);
ULPD_RESTORE(ULPD_STATUS_REQ);
if (cpu_is_omap15xx()) {
MPUI1510_RESTORE(MPUI_CTRL);
MPUI1510_RESTORE(MPUI_DSP_BOOT_CONFIG);
MPUI1510_RESTORE(MPUI_DSP_API_CONFIG);
MPUI1510_RESTORE(EMIFS_CONFIG);
MPUI1510_RESTORE(EMIFF_SDRAM_CONFIG);
MPUI1510_RESTORE(OMAP_IH1_MIR);
MPUI1510_RESTORE(OMAP_IH2_MIR);
} else if (cpu_is_omap16xx()) {
MPUI1610_RESTORE(MPUI_CTRL);
MPUI1610_RESTORE(MPUI_DSP_BOOT_CONFIG);
MPUI1610_RESTORE(MPUI_DSP_API_CONFIG);
MPUI1610_RESTORE(EMIFS_CONFIG);
MPUI1610_RESTORE(EMIFF_SDRAM_CONFIG);
MPUI1610_RESTORE(OMAP_IH1_MIR);
MPUI1610_RESTORE(OMAP_IH2_0_MIR);
MPUI1610_RESTORE(OMAP_IH2_1_MIR);
MPUI1610_RESTORE(OMAP_IH2_2_MIR);
MPUI1610_RESTORE(OMAP_IH2_3_MIR);
}
if (!cpu_is_omap15xx())
omap_writew(0, ULPD_SOFT_DISABLE_REQ_REG);
/*
* Re-enable interrupts
*/
local_irq_enable();
local_fiq_enable();
omap_serial_wake_trigger(0);
printk(KERN_INFO "PM: OMAP%x is re-starting from deep sleep...\n",
omap_rev());
}
#ifdef CONFIG_DEBUG_FS
/*
* Read system PM registers for debugging
*/
static int omap_pm_debug_show(struct seq_file *m, void *v)
{
ARM_SAVE(ARM_CKCTL);
ARM_SAVE(ARM_IDLECT1);
ARM_SAVE(ARM_IDLECT2);
if (!(cpu_is_omap15xx()))
ARM_SAVE(ARM_IDLECT3);
ARM_SAVE(ARM_EWUPCT);
ARM_SAVE(ARM_RSTCT1);
ARM_SAVE(ARM_RSTCT2);
ARM_SAVE(ARM_SYSST);
ULPD_SAVE(ULPD_IT_STATUS);
ULPD_SAVE(ULPD_CLOCK_CTRL);
ULPD_SAVE(ULPD_SOFT_REQ);
ULPD_SAVE(ULPD_STATUS_REQ);
ULPD_SAVE(ULPD_DPLL_CTRL);
ULPD_SAVE(ULPD_POWER_CTRL);
if (cpu_is_omap15xx()) {
MPUI1510_SAVE(MPUI_CTRL);
MPUI1510_SAVE(MPUI_DSP_STATUS);
MPUI1510_SAVE(MPUI_DSP_BOOT_CONFIG);
MPUI1510_SAVE(MPUI_DSP_API_CONFIG);
MPUI1510_SAVE(EMIFF_SDRAM_CONFIG);
MPUI1510_SAVE(EMIFS_CONFIG);
} else if (cpu_is_omap16xx()) {
MPUI1610_SAVE(MPUI_CTRL);
MPUI1610_SAVE(MPUI_DSP_STATUS);
MPUI1610_SAVE(MPUI_DSP_BOOT_CONFIG);
MPUI1610_SAVE(MPUI_DSP_API_CONFIG);
MPUI1610_SAVE(EMIFF_SDRAM_CONFIG);
MPUI1610_SAVE(EMIFS_CONFIG);
}
seq_printf(m,
"ARM_CKCTL_REG: 0x%-8x \n"
"ARM_IDLECT1_REG: 0x%-8x \n"
"ARM_IDLECT2_REG: 0x%-8x \n"
"ARM_IDLECT3_REG: 0x%-8x \n"
"ARM_EWUPCT_REG: 0x%-8x \n"
"ARM_RSTCT1_REG: 0x%-8x \n"
"ARM_RSTCT2_REG: 0x%-8x \n"
"ARM_SYSST_REG: 0x%-8x \n"
"ULPD_IT_STATUS_REG: 0x%-4x \n"
"ULPD_CLOCK_CTRL_REG: 0x%-4x \n"
"ULPD_SOFT_REQ_REG: 0x%-4x \n"
"ULPD_DPLL_CTRL_REG: 0x%-4x \n"
"ULPD_STATUS_REQ_REG: 0x%-4x \n"
"ULPD_POWER_CTRL_REG: 0x%-4x \n",
ARM_SHOW(ARM_CKCTL),
ARM_SHOW(ARM_IDLECT1),
ARM_SHOW(ARM_IDLECT2),
ARM_SHOW(ARM_IDLECT3),
ARM_SHOW(ARM_EWUPCT),
ARM_SHOW(ARM_RSTCT1),
ARM_SHOW(ARM_RSTCT2),
ARM_SHOW(ARM_SYSST),
ULPD_SHOW(ULPD_IT_STATUS),
ULPD_SHOW(ULPD_CLOCK_CTRL),
ULPD_SHOW(ULPD_SOFT_REQ),
ULPD_SHOW(ULPD_DPLL_CTRL),
ULPD_SHOW(ULPD_STATUS_REQ),
ULPD_SHOW(ULPD_POWER_CTRL));
if (cpu_is_omap15xx()) {
seq_printf(m,
"MPUI1510_CTRL_REG 0x%-8x \n"
"MPUI1510_DSP_STATUS_REG: 0x%-8x \n"
"MPUI1510_DSP_BOOT_CONFIG_REG: 0x%-8x \n"
"MPUI1510_DSP_API_CONFIG_REG: 0x%-8x \n"
"MPUI1510_SDRAM_CONFIG_REG: 0x%-8x \n"
"MPUI1510_EMIFS_CONFIG_REG: 0x%-8x \n",
MPUI1510_SHOW(MPUI_CTRL),
MPUI1510_SHOW(MPUI_DSP_STATUS),
MPUI1510_SHOW(MPUI_DSP_BOOT_CONFIG),
MPUI1510_SHOW(MPUI_DSP_API_CONFIG),
MPUI1510_SHOW(EMIFF_SDRAM_CONFIG),
MPUI1510_SHOW(EMIFS_CONFIG));
} else if (cpu_is_omap16xx()) {
seq_printf(m,
"MPUI1610_CTRL_REG 0x%-8x \n"
"MPUI1610_DSP_STATUS_REG: 0x%-8x \n"
"MPUI1610_DSP_BOOT_CONFIG_REG: 0x%-8x \n"
"MPUI1610_DSP_API_CONFIG_REG: 0x%-8x \n"
"MPUI1610_SDRAM_CONFIG_REG: 0x%-8x \n"
"MPUI1610_EMIFS_CONFIG_REG: 0x%-8x \n",
MPUI1610_SHOW(MPUI_CTRL),
MPUI1610_SHOW(MPUI_DSP_STATUS),
MPUI1610_SHOW(MPUI_DSP_BOOT_CONFIG),
MPUI1610_SHOW(MPUI_DSP_API_CONFIG),
MPUI1610_SHOW(EMIFF_SDRAM_CONFIG),
MPUI1610_SHOW(EMIFS_CONFIG));
}
return 0;
}
DEFINE_SHOW_ATTRIBUTE(omap_pm_debug);
static void omap_pm_init_debugfs(void)
{
struct dentry *d;
d = debugfs_create_dir("pm_debug", NULL);
debugfs_create_file("omap_pm", S_IWUSR | S_IRUGO, d, NULL,
&omap_pm_debug_fops);
}
#endif /* CONFIG_DEBUG_FS */
/*
* omap_pm_prepare - Do preliminary suspend work.
*
*/
static int omap_pm_prepare(void)
{
/* We cannot sleep in idle until we have resumed */
cpu_idle_poll_ctrl(true);
return 0;
}
/*
* omap_pm_enter - Actually enter a sleep state.
* @state: State we're entering.
*
*/
static int omap_pm_enter(suspend_state_t state)
{
switch (state)
{
case PM_SUSPEND_MEM:
omap1_pm_suspend();
break;
default:
return -EINVAL;
}
return 0;
}
/**
* omap_pm_finish - Finish up suspend sequence.
*
* This is called after we wake back up (or if entering the sleep state
* failed).
*/
static void omap_pm_finish(void)
{
cpu_idle_poll_ctrl(false);
}
static irqreturn_t omap_wakeup_interrupt(int irq, void *dev)
{
return IRQ_HANDLED;
}
static const struct platform_suspend_ops omap_pm_ops = {
.prepare = omap_pm_prepare,
.enter = omap_pm_enter,
.finish = omap_pm_finish,
.valid = suspend_valid_only_mem,
};
static int __init omap_pm_init(void)
{
int error = 0;
int irq;
if (!cpu_class_is_omap1())
return -ENODEV;
pr_info("Power Management for TI OMAP.\n");
if (!IS_ENABLED(CONFIG_OMAP_32K_TIMER))
pr_info("OMAP1 PM: sleep states in idle disabled due to no 32KiHz timer\n");
if (!IS_ENABLED(CONFIG_OMAP_DM_TIMER))
pr_info("OMAP1 PM: sleep states in idle disabled due to no DMTIMER support\n");
if (IS_ENABLED(CONFIG_OMAP_32K_TIMER) &&
IS_ENABLED(CONFIG_OMAP_DM_TIMER)) {
/* OMAP16xx only */
pr_info("OMAP1 PM: sleep states in idle enabled\n");
enable_dyn_sleep = 1;
}
/*
* We copy the assembler sleep/wakeup routines to SRAM.
* These routines need to be in SRAM as that's the only
* memory the MPU can see when it wakes up.
*/
if (cpu_is_omap15xx()) {
omap_sram_suspend = omap_sram_push(omap1510_cpu_suspend,
omap1510_cpu_suspend_sz);
} else if (cpu_is_omap16xx()) {
omap_sram_suspend = omap_sram_push(omap1610_cpu_suspend,
omap1610_cpu_suspend_sz);
}
if (omap_sram_suspend == NULL) {
printk(KERN_ERR "PM not initialized: Missing SRAM support\n");
return -ENODEV;
}
arm_pm_idle = omap1_pm_idle;
if (cpu_is_omap16xx())
irq = INT_1610_WAKE_UP_REQ;
else
irq = -1;
if (irq >= 0) {
if (request_irq(irq, omap_wakeup_interrupt, 0, "peripheral wakeup", NULL))
pr_err("Failed to request irq %d (peripheral wakeup)\n", irq);
}
/* Program new power ramp-up time
* (0 for most boards since we don't lower voltage when in deep sleep)
*/
omap_writew(ULPD_SETUP_ANALOG_CELL_3_VAL, ULPD_SETUP_ANALOG_CELL_3);
/* Setup ULPD POWER_CTRL_REG - enter deep sleep whenever possible */
omap_writew(ULPD_POWER_CTRL_REG_VAL, ULPD_POWER_CTRL);
/* Configure IDLECT3 */
if (cpu_is_omap16xx())
omap_writel(OMAP1610_IDLECT3_VAL, OMAP1610_IDLECT3);
suspend_set_ops(&omap_pm_ops);
#ifdef CONFIG_DEBUG_FS
omap_pm_init_debugfs();
#endif
error = sysfs_create_file(power_kobj, &sleep_while_idle_attr.attr);
if (error)
pr_err("sysfs_create_file failed: %d\n", error);
if (cpu_is_omap16xx()) {
/* configure LOW_PWR pin */
omap_cfg_reg(T20_1610_LOW_PWR);
}
return error;
}
__initcall(omap_pm_init);
| linux-master | arch/arm/mach-omap1/pm.c |
/*
* linux/arch/arm/mach-omap1/timer32k.c
*
* OMAP 32K Timer
*
* Copyright (C) 2004 - 2005 Nokia Corporation
* Partial timer rewrite and additional dynamic tick timer support by
* Tony Lindgen <[email protected]> and
* Tuukka Tikkanen <[email protected]>
* OMAP Dual-mode timer framework support by Timo Teras
*
* MPU timer code based on the older MPU timer code for OMAP
* Copyright (C) 2000 RidgeRun, Inc.
* Author: Greg Lonnon <[email protected]>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
* NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <linux/io.h>
#include <linux/sched_clock.h>
#include <asm/irq.h>
#include <asm/mach/irq.h>
#include <asm/mach/time.h>
#include "hardware.h"
#include "common.h"
/*
* ---------------------------------------------------------------------------
* 32KHz OS timer
*
* This currently works only on 16xx, as 1510 does not have the continuous
* 32KHz synchronous timer. The 32KHz synchronous timer is used to keep track
* of time in addition to the 32KHz OS timer. Using only the 32KHz OS timer
* on 1510 would be possible, but the timer would not be as accurate as
* with the 32KHz synchronized timer.
* ---------------------------------------------------------------------------
*/
/* 16xx specific defines */
#define OMAP1_32K_TIMER_BASE 0xfffb9000
#define OMAP1_32KSYNC_TIMER_BASE 0xfffbc400
#define OMAP1_32K_TIMER_CR 0x08
#define OMAP1_32K_TIMER_TVR 0x00
#define OMAP1_32K_TIMER_TCR 0x04
#define OMAP_32K_TICKS_PER_SEC (32768)
/*
* TRM says 1 / HZ = ( TVR + 1) / 32768, so TRV = (32768 / HZ) - 1
* so with HZ = 128, TVR = 255.
*/
#define OMAP_32K_TIMER_TICK_PERIOD ((OMAP_32K_TICKS_PER_SEC / HZ) - 1)
#define JIFFIES_TO_HW_TICKS(nr_jiffies, clock_rate) \
(((nr_jiffies) * (clock_rate)) / HZ)
static inline void omap_32k_timer_write(int val, int reg)
{
omap_writew(val, OMAP1_32K_TIMER_BASE + reg);
}
static inline void omap_32k_timer_start(unsigned long load_val)
{
if (!load_val)
load_val = 1;
omap_32k_timer_write(load_val, OMAP1_32K_TIMER_TVR);
omap_32k_timer_write(0x0f, OMAP1_32K_TIMER_CR);
}
static inline void omap_32k_timer_stop(void)
{
omap_32k_timer_write(0x0, OMAP1_32K_TIMER_CR);
}
#define omap_32k_timer_ack_irq()
static int omap_32k_timer_set_next_event(unsigned long delta,
struct clock_event_device *dev)
{
omap_32k_timer_start(delta);
return 0;
}
static int omap_32k_timer_shutdown(struct clock_event_device *evt)
{
omap_32k_timer_stop();
return 0;
}
static int omap_32k_timer_set_periodic(struct clock_event_device *evt)
{
omap_32k_timer_stop();
omap_32k_timer_start(OMAP_32K_TIMER_TICK_PERIOD);
return 0;
}
static struct clock_event_device clockevent_32k_timer = {
.name = "32k-timer",
.features = CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_ONESHOT,
.set_next_event = omap_32k_timer_set_next_event,
.set_state_shutdown = omap_32k_timer_shutdown,
.set_state_periodic = omap_32k_timer_set_periodic,
.set_state_oneshot = omap_32k_timer_shutdown,
.tick_resume = omap_32k_timer_shutdown,
};
static irqreturn_t omap_32k_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evt = &clockevent_32k_timer;
omap_32k_timer_ack_irq();
evt->event_handler(evt);
return IRQ_HANDLED;
}
static __init void omap_init_32k_timer(void)
{
if (request_irq(INT_OS_TIMER, omap_32k_timer_interrupt,
IRQF_TIMER | IRQF_IRQPOLL, "32KHz timer", NULL))
pr_err("Failed to request irq %d(32KHz timer)\n", INT_OS_TIMER);
clockevent_32k_timer.cpumask = cpumask_of(0);
clockevents_config_and_register(&clockevent_32k_timer,
OMAP_32K_TICKS_PER_SEC, 1, 0xfffffffe);
}
/* OMAP2_32KSYNCNT_CR_OFF: offset of 32ksync counter register */
#define OMAP2_32KSYNCNT_REV_OFF 0x0
#define OMAP2_32KSYNCNT_REV_SCHEME (0x3 << 30)
#define OMAP2_32KSYNCNT_CR_OFF_LOW 0x10
#define OMAP2_32KSYNCNT_CR_OFF_HIGH 0x30
/*
* 32KHz clocksource ... always available, on pretty most chips except
* OMAP 730 and 1510. Other timers could be used as clocksources, with
* higher resolution in free-running counter modes (e.g. 12 MHz xtal),
* but systems won't necessarily want to spend resources that way.
*/
static void __iomem *sync32k_cnt_reg;
static u64 notrace omap_32k_read_sched_clock(void)
{
return sync32k_cnt_reg ? readl_relaxed(sync32k_cnt_reg) : 0;
}
/**
* omap_read_persistent_clock64 - Return time from a persistent clock.
*
* Reads the time from a source which isn't disabled during PM, the
* 32k sync timer. Convert the cycles elapsed since last read into
* nsecs and adds to a monotonically increasing timespec64.
*/
static struct timespec64 persistent_ts;
static cycles_t cycles;
static unsigned int persistent_mult, persistent_shift;
static void omap_read_persistent_clock64(struct timespec64 *ts)
{
unsigned long long nsecs;
cycles_t last_cycles;
last_cycles = cycles;
cycles = sync32k_cnt_reg ? readl_relaxed(sync32k_cnt_reg) : 0;
nsecs = clocksource_cyc2ns(cycles - last_cycles,
persistent_mult, persistent_shift);
timespec64_add_ns(&persistent_ts, nsecs);
*ts = persistent_ts;
}
/**
* omap_init_clocksource_32k - setup and register counter 32k as a
* kernel clocksource
* @pbase: base addr of counter_32k module
* @size: size of counter_32k to map
*
* Returns 0 upon success or negative error code upon failure.
*
*/
static int __init omap_init_clocksource_32k(void __iomem *vbase)
{
int ret;
/*
* 32k sync Counter IP register offsets vary between the
* highlander version and the legacy ones.
* The 'SCHEME' bits(30-31) of the revision register is used
* to identify the version.
*/
if (readl_relaxed(vbase + OMAP2_32KSYNCNT_REV_OFF) &
OMAP2_32KSYNCNT_REV_SCHEME)
sync32k_cnt_reg = vbase + OMAP2_32KSYNCNT_CR_OFF_HIGH;
else
sync32k_cnt_reg = vbase + OMAP2_32KSYNCNT_CR_OFF_LOW;
/*
* 120000 rough estimate from the calculations in
* __clocksource_update_freq_scale.
*/
clocks_calc_mult_shift(&persistent_mult, &persistent_shift,
32768, NSEC_PER_SEC, 120000);
ret = clocksource_mmio_init(sync32k_cnt_reg, "32k_counter", 32768,
250, 32, clocksource_mmio_readl_up);
if (ret) {
pr_err("32k_counter: can't register clocksource\n");
return ret;
}
sched_clock_register(omap_32k_read_sched_clock, 32, 32768);
register_persistent_clock(omap_read_persistent_clock64);
pr_info("OMAP clocksource: 32k_counter at 32768 Hz\n");
return 0;
}
/*
* ---------------------------------------------------------------------------
* Timer initialization
* ---------------------------------------------------------------------------
*/
int __init omap_32k_timer_init(void)
{
int ret = -ENODEV;
if (cpu_is_omap16xx()) {
void __iomem *base;
struct clk *sync32k_ick;
base = ioremap(OMAP1_32KSYNC_TIMER_BASE, SZ_1K);
if (!base) {
pr_err("32k_counter: failed to map base addr\n");
return -ENODEV;
}
sync32k_ick = clk_get(NULL, "omap_32ksync_ick");
if (!IS_ERR(sync32k_ick))
clk_prepare_enable(sync32k_ick);
ret = omap_init_clocksource_32k(base);
}
if (!ret)
omap_init_32k_timer();
return ret;
}
| linux-master | arch/arm/mach-omap1/timer32k.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* linux/arch/arm/mach-omap1/devices.c
*
* OMAP1 platform device setup/initialization
*/
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
#include <linux/platform_data/omap-wd-timer.h>
#include <linux/soc/ti/omap1-io.h>
#include <asm/mach/map.h>
#include "tc.h"
#include "mux.h"
#include "hardware.h"
#include "common.h"
#include "clock.h"
#include "mmc.h"
#include "sram.h"
#if IS_ENABLED(CONFIG_RTC_DRV_OMAP)
#define OMAP_RTC_BASE 0xfffb4800
static struct resource rtc_resources[] = {
{
.start = OMAP_RTC_BASE,
.end = OMAP_RTC_BASE + 0x5f,
.flags = IORESOURCE_MEM,
},
{
.start = INT_RTC_TIMER,
.flags = IORESOURCE_IRQ,
},
{
.start = INT_RTC_ALARM,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device omap_rtc_device = {
.name = "omap_rtc",
.id = -1,
.num_resources = ARRAY_SIZE(rtc_resources),
.resource = rtc_resources,
};
static void omap_init_rtc(void)
{
(void) platform_device_register(&omap_rtc_device);
}
#else
static inline void omap_init_rtc(void) {}
#endif
/*-------------------------------------------------------------------------*/
#if IS_ENABLED(CONFIG_MMC_OMAP)
static inline void omap1_mmc_mux(struct omap_mmc_platform_data *mmc_controller,
int controller_nr)
{
if (controller_nr == 0) {
omap_cfg_reg(MMC_CMD);
omap_cfg_reg(MMC_CLK);
omap_cfg_reg(MMC_DAT0);
if (cpu_is_omap1710()) {
omap_cfg_reg(M15_1710_MMC_CLKI);
omap_cfg_reg(P19_1710_MMC_CMDDIR);
omap_cfg_reg(P20_1710_MMC_DATDIR0);
}
if (mmc_controller->slots[0].wires == 4) {
omap_cfg_reg(MMC_DAT1);
/* NOTE: DAT2 can be on W10 (here) or M15 */
if (!mmc_controller->slots[0].nomux)
omap_cfg_reg(MMC_DAT2);
omap_cfg_reg(MMC_DAT3);
}
}
/* Block 2 is on newer chips, and has many pinout options */
if (cpu_is_omap16xx() && controller_nr == 1) {
if (!mmc_controller->slots[1].nomux) {
omap_cfg_reg(Y8_1610_MMC2_CMD);
omap_cfg_reg(Y10_1610_MMC2_CLK);
omap_cfg_reg(R18_1610_MMC2_CLKIN);
omap_cfg_reg(W8_1610_MMC2_DAT0);
if (mmc_controller->slots[1].wires == 4) {
omap_cfg_reg(V8_1610_MMC2_DAT1);
omap_cfg_reg(W15_1610_MMC2_DAT2);
omap_cfg_reg(R10_1610_MMC2_DAT3);
}
/* These are needed for the level shifter */
omap_cfg_reg(V9_1610_MMC2_CMDDIR);
omap_cfg_reg(V5_1610_MMC2_DATDIR0);
omap_cfg_reg(W19_1610_MMC2_DATDIR1);
}
/* Feedback clock must be set on OMAP-1710 MMC2 */
if (cpu_is_omap1710())
omap_writel(omap_readl(MOD_CONF_CTRL_1) | (1 << 24),
MOD_CONF_CTRL_1);
}
}
#define OMAP_MMC_NR_RES 4
/*
* Register MMC devices.
*/
static int __init omap_mmc_add(const char *name, int id, unsigned long base,
unsigned long size, unsigned int irq,
unsigned rx_req, unsigned tx_req,
struct omap_mmc_platform_data *data)
{
struct platform_device *pdev;
struct resource res[OMAP_MMC_NR_RES];
int ret;
pdev = platform_device_alloc(name, id);
if (!pdev)
return -ENOMEM;
memset(res, 0, OMAP_MMC_NR_RES * sizeof(struct resource));
res[0].start = base;
res[0].end = base + size - 1;
res[0].flags = IORESOURCE_MEM;
res[1].start = res[1].end = irq;
res[1].flags = IORESOURCE_IRQ;
res[2].start = rx_req;
res[2].name = "rx";
res[2].flags = IORESOURCE_DMA;
res[3].start = tx_req;
res[3].name = "tx";
res[3].flags = IORESOURCE_DMA;
if (cpu_is_omap15xx())
data->slots[0].features = MMC_OMAP15XX;
if (cpu_is_omap16xx())
data->slots[0].features = MMC_OMAP16XX;
ret = platform_device_add_resources(pdev, res, ARRAY_SIZE(res));
if (ret == 0)
ret = platform_device_add_data(pdev, data, sizeof(*data));
if (ret)
goto fail;
ret = platform_device_add(pdev);
if (ret)
goto fail;
/* return device handle to board setup code */
data->dev = &pdev->dev;
return 0;
fail:
platform_device_put(pdev);
return ret;
}
void __init omap1_init_mmc(struct omap_mmc_platform_data **mmc_data,
int nr_controllers)
{
int i;
for (i = 0; i < nr_controllers; i++) {
unsigned long base, size;
unsigned rx_req, tx_req;
unsigned int irq = 0;
if (!mmc_data[i])
continue;
omap1_mmc_mux(mmc_data[i], i);
switch (i) {
case 0:
base = OMAP1_MMC1_BASE;
irq = INT_MMC;
rx_req = 22;
tx_req = 21;
break;
case 1:
if (!cpu_is_omap16xx())
return;
base = OMAP1_MMC2_BASE;
irq = INT_1610_MMC2;
rx_req = 55;
tx_req = 54;
break;
default:
continue;
}
size = OMAP1_MMC_SIZE;
omap_mmc_add("mmci-omap", i, base, size, irq,
rx_req, tx_req, mmc_data[i]);
}
}
#endif
/*-------------------------------------------------------------------------*/
/* Numbering for the SPI-capable controllers when used for SPI:
* spi = 1
* uwire = 2
* mmc1..2 = 3..4
* mcbsp1..3 = 5..7
*/
#if IS_ENABLED(CONFIG_SPI_OMAP_UWIRE)
#define OMAP_UWIRE_BASE 0xfffb3000
static struct resource uwire_resources[] = {
{
.start = OMAP_UWIRE_BASE,
.end = OMAP_UWIRE_BASE + 0x20,
.flags = IORESOURCE_MEM,
},
};
static struct platform_device omap_uwire_device = {
.name = "omap_uwire",
.id = -1,
.num_resources = ARRAY_SIZE(uwire_resources),
.resource = uwire_resources,
};
static void omap_init_uwire(void)
{
/* FIXME define and use a boot tag; not all boards will be hooking
* up devices to the microwire controller, and multi-board configs
* mean that CONFIG_SPI_OMAP_UWIRE may be configured anyway...
*/
/* board-specific code must configure chipselects (only a few
* are normally used) and SCLK/SDI/SDO (each has two choices).
*/
(void) platform_device_register(&omap_uwire_device);
}
#else
static inline void omap_init_uwire(void) {}
#endif
#define OMAP1_RNG_BASE 0xfffe5000
static struct resource omap1_rng_resources[] = {
{
.start = OMAP1_RNG_BASE,
.end = OMAP1_RNG_BASE + 0x4f,
.flags = IORESOURCE_MEM,
},
};
static struct platform_device omap1_rng_device = {
.name = "omap_rng",
.id = -1,
.num_resources = ARRAY_SIZE(omap1_rng_resources),
.resource = omap1_rng_resources,
};
static void omap1_init_rng(void)
{
if (!cpu_is_omap16xx())
return;
(void) platform_device_register(&omap1_rng_device);
}
/*-------------------------------------------------------------------------*/
/*
* This gets called after board-specific INIT_MACHINE, and initializes most
* on-chip peripherals accessible on this board (except for few like USB):
*
* (a) Does any "standard config" pin muxing needed. Board-specific
* code will have muxed GPIO pins and done "nonstandard" setup;
* that code could live in the boot loader.
* (b) Populating board-specific platform_data with the data drivers
* rely on to handle wiring variations.
* (c) Creating platform devices as meaningful on this board and
* with this kernel configuration.
*
* Claiming GPIOs, and setting their direction and initial values, is the
* responsibility of the device drivers. So is responding to probe().
*
* Board-specific knowledge like creating devices or pin setup is to be
* kept out of drivers as much as possible. In particular, pin setup
* may be handled by the boot loader, and drivers should expect it will
* normally have been done by the time they're probed.
*/
static int __init omap1_init_devices(void)
{
if (!cpu_class_is_omap1())
return -ENODEV;
omap1_sram_init();
omap1_clk_late_init();
/* please keep these calls, and their implementations above,
* in alphabetical order so they're easier to sort through.
*/
omap_init_rtc();
omap_init_uwire();
omap1_init_rng();
return 0;
}
arch_initcall(omap1_init_devices);
#if IS_ENABLED(CONFIG_OMAP_WATCHDOG)
static struct resource wdt_resources[] = {
{
.start = 0xfffeb000,
.end = 0xfffeb07F,
.flags = IORESOURCE_MEM,
},
};
static struct platform_device omap_wdt_device = {
.name = "omap_wdt",
.id = -1,
.num_resources = ARRAY_SIZE(wdt_resources),
.resource = wdt_resources,
};
static int __init omap_init_wdt(void)
{
struct omap_wd_timer_platform_data pdata;
int ret;
if (!cpu_is_omap16xx())
return -ENODEV;
pdata.read_reset_sources = omap1_get_reset_sources;
ret = platform_device_register(&omap_wdt_device);
if (!ret) {
ret = platform_device_add_data(&omap_wdt_device, &pdata,
sizeof(pdata));
if (ret)
platform_device_del(&omap_wdt_device);
}
return ret;
}
subsys_initcall(omap_init_wdt);
#endif
| linux-master | arch/arm/mach-omap1/devices.c |
/*
* linux/arch/arm/mach-omap1/time.c
*
* OMAP Timers
*
* Copyright (C) 2004 Nokia Corporation
* Partial timer rewrite and additional dynamic tick timer support by
* Tony Lindgen <[email protected]> and
* Tuukka Tikkanen <[email protected]>
*
* MPU timer code based on the older MPU timer code for OMAP
* Copyright (C) 2000 RidgeRun, Inc.
* Author: Greg Lonnon <[email protected]>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
* NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <linux/io.h>
#include <linux/sched_clock.h>
#include <asm/irq.h>
#include <asm/mach/irq.h>
#include <asm/mach/time.h>
#include "hardware.h"
#include "mux.h"
#include "iomap.h"
#include "common.h"
#include "clock.h"
#ifdef CONFIG_OMAP_MPU_TIMER
#define OMAP_MPU_TIMER_BASE OMAP_MPU_TIMER1_BASE
#define OMAP_MPU_TIMER_OFFSET 0x100
typedef struct {
u32 cntl; /* CNTL_TIMER, R/W */
u32 load_tim; /* LOAD_TIM, W */
u32 read_tim; /* READ_TIM, R */
} omap_mpu_timer_regs_t;
#define omap_mpu_timer_base(n) \
((omap_mpu_timer_regs_t __iomem *)OMAP1_IO_ADDRESS(OMAP_MPU_TIMER_BASE + \
(n)*OMAP_MPU_TIMER_OFFSET))
static inline unsigned long notrace omap_mpu_timer_read(int nr)
{
omap_mpu_timer_regs_t __iomem *timer = omap_mpu_timer_base(nr);
return readl(&timer->read_tim);
}
static inline void omap_mpu_set_autoreset(int nr)
{
omap_mpu_timer_regs_t __iomem *timer = omap_mpu_timer_base(nr);
writel(readl(&timer->cntl) | MPU_TIMER_AR, &timer->cntl);
}
static inline void omap_mpu_remove_autoreset(int nr)
{
omap_mpu_timer_regs_t __iomem *timer = omap_mpu_timer_base(nr);
writel(readl(&timer->cntl) & ~MPU_TIMER_AR, &timer->cntl);
}
static inline void omap_mpu_timer_start(int nr, unsigned long load_val,
int autoreset)
{
omap_mpu_timer_regs_t __iomem *timer = omap_mpu_timer_base(nr);
unsigned int timerflags = MPU_TIMER_CLOCK_ENABLE | MPU_TIMER_ST;
if (autoreset)
timerflags |= MPU_TIMER_AR;
writel(MPU_TIMER_CLOCK_ENABLE, &timer->cntl);
udelay(1);
writel(load_val, &timer->load_tim);
udelay(1);
writel(timerflags, &timer->cntl);
}
static inline void omap_mpu_timer_stop(int nr)
{
omap_mpu_timer_regs_t __iomem *timer = omap_mpu_timer_base(nr);
writel(readl(&timer->cntl) & ~MPU_TIMER_ST, &timer->cntl);
}
/*
* ---------------------------------------------------------------------------
* MPU timer 1 ... count down to zero, interrupt, reload
* ---------------------------------------------------------------------------
*/
static int omap_mpu_set_next_event(unsigned long cycles,
struct clock_event_device *evt)
{
omap_mpu_timer_start(0, cycles, 0);
return 0;
}
static int omap_mpu_set_oneshot(struct clock_event_device *evt)
{
omap_mpu_timer_stop(0);
omap_mpu_remove_autoreset(0);
return 0;
}
static int omap_mpu_set_periodic(struct clock_event_device *evt)
{
omap_mpu_set_autoreset(0);
return 0;
}
static struct clock_event_device clockevent_mpu_timer1 = {
.name = "mpu_timer1",
.features = CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_ONESHOT,
.set_next_event = omap_mpu_set_next_event,
.set_state_periodic = omap_mpu_set_periodic,
.set_state_oneshot = omap_mpu_set_oneshot,
};
static irqreturn_t omap_mpu_timer1_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evt = &clockevent_mpu_timer1;
evt->event_handler(evt);
return IRQ_HANDLED;
}
static __init void omap_init_mpu_timer(unsigned long rate)
{
if (request_irq(INT_TIMER1, omap_mpu_timer1_interrupt,
IRQF_TIMER | IRQF_IRQPOLL, "mpu_timer1", NULL))
pr_err("Failed to request irq %d (mpu_timer1)\n", INT_TIMER1);
omap_mpu_timer_start(0, (rate / HZ) - 1, 1);
clockevent_mpu_timer1.cpumask = cpumask_of(0);
clockevents_config_and_register(&clockevent_mpu_timer1, rate,
1, -1);
}
/*
* ---------------------------------------------------------------------------
* MPU timer 2 ... free running 32-bit clock source and scheduler clock
* ---------------------------------------------------------------------------
*/
static u64 notrace omap_mpu_read_sched_clock(void)
{
return ~omap_mpu_timer_read(1);
}
static void __init omap_init_clocksource(unsigned long rate)
{
omap_mpu_timer_regs_t __iomem *timer = omap_mpu_timer_base(1);
static char err[] __initdata = KERN_ERR
"%s: can't register clocksource!\n";
omap_mpu_timer_start(1, ~0, 1);
sched_clock_register(omap_mpu_read_sched_clock, 32, rate);
if (clocksource_mmio_init(&timer->read_tim, "mpu_timer2", rate,
300, 32, clocksource_mmio_readl_down))
printk(err, "mpu_timer2");
}
static void __init omap_mpu_timer_init(void)
{
struct clk *ck_ref = clk_get(NULL, "ck_ref");
unsigned long rate;
BUG_ON(IS_ERR(ck_ref));
rate = clk_get_rate(ck_ref);
clk_put(ck_ref);
/* PTV = 0 */
rate /= 2;
omap_init_mpu_timer(rate);
omap_init_clocksource(rate);
}
#else
static inline void omap_mpu_timer_init(void)
{
pr_err("Bogus timer, should not happen\n");
}
#endif /* CONFIG_OMAP_MPU_TIMER */
/*
* ---------------------------------------------------------------------------
* Timer initialization
* ---------------------------------------------------------------------------
*/
void __init omap1_timer_init(void)
{
omap1_clk_init();
omap1_mux_init();
if (omap_32k_timer_init() != 0)
omap_mpu_timer_init();
}
| linux-master | arch/arm/mach-omap1/time.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Runtime PM support code for OMAP1
*
* Author: Kevin Hilman, Deep Root Systems, LLC
*
* Copyright (C) 2010 Texas Instruments, Inc.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/pm_runtime.h>
#include <linux/pm_clock.h>
#include <linux/platform_device.h>
#include <linux/mutex.h>
#include <linux/clk.h>
#include <linux/err.h>
#include "soc.h"
static struct dev_pm_domain default_pm_domain = {
.ops = {
USE_PM_CLK_RUNTIME_OPS
USE_PLATFORM_PM_SLEEP_OPS
},
};
static struct pm_clk_notifier_block platform_bus_notifier = {
.pm_domain = &default_pm_domain,
.con_ids = { "ick", "fck", NULL, },
};
static int __init omap1_pm_runtime_init(void)
{
if (!cpu_class_is_omap1())
return -ENODEV;
pm_clk_add_notifier(&platform_bus_type, &platform_bus_notifier);
return 0;
}
core_initcall(omap1_pm_runtime_init);
| linux-master | arch/arm/mach-omap1/pm_bus.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/arch/arm/mach-omap1/board-ams-delta.c
*
* Modified from board-generic.c
*
* Board specific inits for the Amstrad E3 (codename Delta) videophone
*
* Copyright (C) 2006 Jonathan McDowell <[email protected]>
*/
#include <linux/gpio/driver.h>
#include <linux/gpio/machine.h>
#include <linux/gpio/consumer.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/leds.h>
#include <linux/mtd/nand-gpio.h>
#include <linux/mtd/partitions.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/regulator/fixed.h>
#include <linux/regulator/machine.h>
#include <linux/serial_8250.h>
#include <linux/export.h>
#include <linux/omapfb.h>
#include <linux/io.h>
#include <linux/platform_data/gpio-omap.h>
#include <linux/soc/ti/omap1-mux.h>
#include <asm/serial.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <linux/platform_data/keypad-omap.h>
#include "hardware.h"
#include "usb.h"
#include "ams-delta-fiq.h"
#include "board-ams-delta.h"
#include "iomap.h"
#include "common.h"
static const unsigned int ams_delta_keymap[] = {
KEY(0, 0, KEY_F1), /* Advert */
KEY(0, 3, KEY_COFFEE), /* Games */
KEY(0, 2, KEY_QUESTION), /* Directory */
KEY(2, 3, KEY_CONNECT), /* Internet */
KEY(1, 2, KEY_SHOP), /* Services */
KEY(1, 1, KEY_PHONE), /* VoiceMail */
KEY(0, 1, KEY_DELETE), /* Delete */
KEY(2, 2, KEY_PLAY), /* Play */
KEY(1, 0, KEY_PAGEUP), /* Up */
KEY(1, 3, KEY_PAGEDOWN), /* Down */
KEY(2, 0, KEY_EMAIL), /* ReadEmail */
KEY(2, 1, KEY_STOP), /* Stop */
/* Numeric keypad portion */
KEY(0, 7, KEY_KP1),
KEY(0, 6, KEY_KP2),
KEY(0, 5, KEY_KP3),
KEY(1, 7, KEY_KP4),
KEY(1, 6, KEY_KP5),
KEY(1, 5, KEY_KP6),
KEY(2, 7, KEY_KP7),
KEY(2, 6, KEY_KP8),
KEY(2, 5, KEY_KP9),
KEY(3, 6, KEY_KP0),
KEY(3, 7, KEY_KPASTERISK),
KEY(3, 5, KEY_KPDOT), /* # key */
KEY(7, 2, KEY_NUMLOCK), /* Mute */
KEY(7, 1, KEY_KPMINUS), /* Recall */
KEY(6, 1, KEY_KPPLUS), /* Redial */
KEY(7, 6, KEY_KPSLASH), /* Handsfree */
KEY(6, 0, KEY_ENTER), /* Video */
KEY(7, 4, KEY_CAMERA), /* Photo */
KEY(0, 4, KEY_F2), /* Home */
KEY(1, 4, KEY_F3), /* Office */
KEY(2, 4, KEY_F4), /* Mobile */
KEY(7, 7, KEY_F5), /* SMS */
KEY(7, 5, KEY_F6), /* Email */
/* QWERTY portion of keypad */
KEY(3, 4, KEY_Q),
KEY(3, 3, KEY_W),
KEY(3, 2, KEY_E),
KEY(3, 1, KEY_R),
KEY(3, 0, KEY_T),
KEY(4, 7, KEY_Y),
KEY(4, 6, KEY_U),
KEY(4, 5, KEY_I),
KEY(4, 4, KEY_O),
KEY(4, 3, KEY_P),
KEY(4, 2, KEY_A),
KEY(4, 1, KEY_S),
KEY(4, 0, KEY_D),
KEY(5, 7, KEY_F),
KEY(5, 6, KEY_G),
KEY(5, 5, KEY_H),
KEY(5, 4, KEY_J),
KEY(5, 3, KEY_K),
KEY(5, 2, KEY_L),
KEY(5, 1, KEY_Z),
KEY(5, 0, KEY_X),
KEY(6, 7, KEY_C),
KEY(6, 6, KEY_V),
KEY(6, 5, KEY_B),
KEY(6, 4, KEY_N),
KEY(6, 3, KEY_M),
KEY(6, 2, KEY_SPACE),
KEY(7, 0, KEY_LEFTSHIFT), /* Vol up */
KEY(7, 3, KEY_LEFTCTRL), /* Vol down */
};
#define LATCH1_PHYS 0x01000000
#define LATCH1_VIRT 0xEA000000
#define MODEM_PHYS 0x04000000
#define MODEM_VIRT 0xEB000000
#define LATCH2_PHYS 0x08000000
#define LATCH2_VIRT 0xEC000000
static struct map_desc ams_delta_io_desc[] __initdata = {
/* AMS_DELTA_LATCH1 */
{
.virtual = LATCH1_VIRT,
.pfn = __phys_to_pfn(LATCH1_PHYS),
.length = 0x01000000,
.type = MT_DEVICE
},
/* AMS_DELTA_LATCH2 */
{
.virtual = LATCH2_VIRT,
.pfn = __phys_to_pfn(LATCH2_PHYS),
.length = 0x01000000,
.type = MT_DEVICE
},
/* AMS_DELTA_MODEM */
{
.virtual = MODEM_VIRT,
.pfn = __phys_to_pfn(MODEM_PHYS),
.length = 0x01000000,
.type = MT_DEVICE
}
};
static const struct omap_lcd_config ams_delta_lcd_config __initconst = {
.ctrl_name = "internal",
};
static struct omap_usb_config ams_delta_usb_config __initdata = {
.register_host = 1,
.hmc_mode = 16,
.pins[0] = 2,
};
#define LATCH1_NGPIO 8
static struct resource latch1_resources[] = {
[0] = {
.name = "dat",
.start = LATCH1_PHYS,
.end = LATCH1_PHYS + (LATCH1_NGPIO - 1) / 8,
.flags = IORESOURCE_MEM,
},
};
#define LATCH1_LABEL "latch1"
static struct bgpio_pdata latch1_pdata = {
.label = LATCH1_LABEL,
.base = -1,
.ngpio = LATCH1_NGPIO,
};
static struct platform_device latch1_gpio_device = {
.name = "basic-mmio-gpio",
.id = 0,
.resource = latch1_resources,
.num_resources = ARRAY_SIZE(latch1_resources),
.dev = {
.platform_data = &latch1_pdata,
},
};
#define LATCH1_PIN_LED_CAMERA 0
#define LATCH1_PIN_LED_ADVERT 1
#define LATCH1_PIN_LED_MAIL 2
#define LATCH1_PIN_LED_HANDSFREE 3
#define LATCH1_PIN_LED_VOICEMAIL 4
#define LATCH1_PIN_LED_VOICE 5
#define LATCH1_PIN_DOCKIT1 6
#define LATCH1_PIN_DOCKIT2 7
#define LATCH2_NGPIO 16
static struct resource latch2_resources[] = {
[0] = {
.name = "dat",
.start = LATCH2_PHYS,
.end = LATCH2_PHYS + (LATCH2_NGPIO - 1) / 8,
.flags = IORESOURCE_MEM,
},
};
#define LATCH2_LABEL "latch2"
static struct bgpio_pdata latch2_pdata = {
.label = LATCH2_LABEL,
.base = -1,
.ngpio = LATCH2_NGPIO,
};
static struct platform_device latch2_gpio_device = {
.name = "basic-mmio-gpio",
.id = 1,
.resource = latch2_resources,
.num_resources = ARRAY_SIZE(latch2_resources),
.dev = {
.platform_data = &latch2_pdata,
},
};
#define LATCH2_PIN_LCD_VBLEN 0
#define LATCH2_PIN_LCD_NDISP 1
#define LATCH2_PIN_NAND_NCE 2
#define LATCH2_PIN_NAND_NRE 3
#define LATCH2_PIN_NAND_NWP 4
#define LATCH2_PIN_NAND_NWE 5
#define LATCH2_PIN_NAND_ALE 6
#define LATCH2_PIN_NAND_CLE 7
#define LATCH2_PIN_KEYBRD_PWR 8
#define LATCH2_PIN_KEYBRD_DATAOUT 9
#define LATCH2_PIN_SCARD_RSTIN 10
#define LATCH2_PIN_SCARD_CMDVCC 11
#define LATCH2_PIN_MODEM_NRESET 12
#define LATCH2_PIN_MODEM_CODEC 13
#define LATCH2_PIN_HANDSFREE_MUTE 14
#define LATCH2_PIN_HANDSET_MUTE 15
static struct regulator_consumer_supply modem_nreset_consumers[] = {
REGULATOR_SUPPLY("RESET#", "serial8250.1"),
REGULATOR_SUPPLY("POR", "cx20442-codec"),
};
static struct regulator_init_data modem_nreset_data = {
.constraints = {
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
.boot_on = 1,
},
.num_consumer_supplies = ARRAY_SIZE(modem_nreset_consumers),
.consumer_supplies = modem_nreset_consumers,
};
static struct fixed_voltage_config modem_nreset_config = {
.supply_name = "modem_nreset",
.microvolts = 3300000,
.startup_delay = 25000,
.enabled_at_boot = 1,
.init_data = &modem_nreset_data,
};
static struct platform_device modem_nreset_device = {
.name = "reg-fixed-voltage",
.id = -1,
.dev = {
.platform_data = &modem_nreset_config,
},
};
static struct gpiod_lookup_table ams_delta_nreset_gpiod_table = {
.dev_id = "reg-fixed-voltage",
.table = {
GPIO_LOOKUP(LATCH2_LABEL, LATCH2_PIN_MODEM_NRESET,
NULL, GPIO_ACTIVE_HIGH),
{ },
},
};
struct modem_private_data {
struct regulator *regulator;
};
static struct modem_private_data modem_priv;
/*
* Define partitions for flash device
*/
static struct mtd_partition partition_info[] = {
{ .name = "Kernel",
.offset = 0,
.size = 3 * SZ_1M + SZ_512K },
{ .name = "u-boot",
.offset = 3 * SZ_1M + SZ_512K,
.size = SZ_256K },
{ .name = "u-boot params",
.offset = 3 * SZ_1M + SZ_512K + SZ_256K,
.size = SZ_256K },
{ .name = "Amstrad LDR",
.offset = 4 * SZ_1M,
.size = SZ_256K },
{ .name = "File system",
.offset = 4 * SZ_1M + 1 * SZ_256K,
.size = 27 * SZ_1M },
{ .name = "PBL reserved",
.offset = 32 * SZ_1M - 3 * SZ_256K,
.size = 3 * SZ_256K },
};
static struct gpio_nand_platdata nand_platdata = {
.parts = partition_info,
.num_parts = ARRAY_SIZE(partition_info),
};
static struct platform_device ams_delta_nand_device = {
.name = "ams-delta-nand",
.id = -1,
.dev = {
.platform_data = &nand_platdata,
},
};
#define OMAP_GPIO_LABEL "gpio-0-15"
#define OMAP_MPUIO_LABEL "mpuio"
static struct gpiod_lookup_table ams_delta_nand_gpio_table = {
.table = {
GPIO_LOOKUP(OMAP_GPIO_LABEL, AMS_DELTA_GPIO_PIN_NAND_RB, "rdy",
0),
GPIO_LOOKUP(LATCH2_LABEL, LATCH2_PIN_NAND_NCE, "nce",
GPIO_ACTIVE_LOW),
GPIO_LOOKUP(LATCH2_LABEL, LATCH2_PIN_NAND_NRE, "nre",
GPIO_ACTIVE_LOW),
GPIO_LOOKUP(LATCH2_LABEL, LATCH2_PIN_NAND_NWP, "nwp",
GPIO_ACTIVE_LOW),
GPIO_LOOKUP(LATCH2_LABEL, LATCH2_PIN_NAND_NWE, "nwe",
GPIO_ACTIVE_LOW),
GPIO_LOOKUP(LATCH2_LABEL, LATCH2_PIN_NAND_ALE, "ale", 0),
GPIO_LOOKUP(LATCH2_LABEL, LATCH2_PIN_NAND_CLE, "cle", 0),
GPIO_LOOKUP_IDX(OMAP_MPUIO_LABEL, 0, "data", 0, 0),
GPIO_LOOKUP_IDX(OMAP_MPUIO_LABEL, 1, "data", 1, 0),
GPIO_LOOKUP_IDX(OMAP_MPUIO_LABEL, 2, "data", 2, 0),
GPIO_LOOKUP_IDX(OMAP_MPUIO_LABEL, 3, "data", 3, 0),
GPIO_LOOKUP_IDX(OMAP_MPUIO_LABEL, 4, "data", 4, 0),
GPIO_LOOKUP_IDX(OMAP_MPUIO_LABEL, 5, "data", 5, 0),
GPIO_LOOKUP_IDX(OMAP_MPUIO_LABEL, 6, "data", 6, 0),
GPIO_LOOKUP_IDX(OMAP_MPUIO_LABEL, 7, "data", 7, 0),
{ },
},
};
static struct resource ams_delta_kp_resources[] = {
[0] = {
.start = INT_KEYBOARD,
.end = INT_KEYBOARD,
.flags = IORESOURCE_IRQ,
},
};
static const struct matrix_keymap_data ams_delta_keymap_data = {
.keymap = ams_delta_keymap,
.keymap_size = ARRAY_SIZE(ams_delta_keymap),
};
static struct omap_kp_platform_data ams_delta_kp_data = {
.rows = 8,
.cols = 8,
.keymap_data = &ams_delta_keymap_data,
.delay = 9,
};
static struct platform_device ams_delta_kp_device = {
.name = "omap-keypad",
.id = -1,
.dev = {
.platform_data = &ams_delta_kp_data,
},
.num_resources = ARRAY_SIZE(ams_delta_kp_resources),
.resource = ams_delta_kp_resources,
};
static struct platform_device ams_delta_lcd_device = {
.name = "lcd_ams_delta",
.id = -1,
};
static struct gpiod_lookup_table ams_delta_lcd_gpio_table = {
.table = {
GPIO_LOOKUP(LATCH2_LABEL, LATCH2_PIN_LCD_VBLEN, "vblen", 0),
GPIO_LOOKUP(LATCH2_LABEL, LATCH2_PIN_LCD_NDISP, "ndisp", 0),
{ },
},
};
static struct gpio_led gpio_leds[] __initdata = {
[LATCH1_PIN_LED_CAMERA] = {
.name = "camera",
.default_state = LEDS_GPIO_DEFSTATE_OFF,
},
[LATCH1_PIN_LED_ADVERT] = {
.name = "advert",
.default_state = LEDS_GPIO_DEFSTATE_OFF,
},
[LATCH1_PIN_LED_MAIL] = {
.name = "email",
.default_state = LEDS_GPIO_DEFSTATE_OFF,
},
[LATCH1_PIN_LED_HANDSFREE] = {
.name = "handsfree",
.default_state = LEDS_GPIO_DEFSTATE_OFF,
},
[LATCH1_PIN_LED_VOICEMAIL] = {
.name = "voicemail",
.default_state = LEDS_GPIO_DEFSTATE_OFF,
},
[LATCH1_PIN_LED_VOICE] = {
.name = "voice",
.default_state = LEDS_GPIO_DEFSTATE_OFF,
},
};
static const struct gpio_led_platform_data leds_pdata __initconst = {
.leds = gpio_leds,
.num_leds = ARRAY_SIZE(gpio_leds),
};
static struct gpiod_lookup_table leds_gpio_table = {
.table = {
GPIO_LOOKUP_IDX(LATCH1_LABEL, LATCH1_PIN_LED_CAMERA, NULL,
LATCH1_PIN_LED_CAMERA, 0),
GPIO_LOOKUP_IDX(LATCH1_LABEL, LATCH1_PIN_LED_ADVERT, NULL,
LATCH1_PIN_LED_ADVERT, 0),
GPIO_LOOKUP_IDX(LATCH1_LABEL, LATCH1_PIN_LED_MAIL, NULL,
LATCH1_PIN_LED_MAIL, 0),
GPIO_LOOKUP_IDX(LATCH1_LABEL, LATCH1_PIN_LED_HANDSFREE, NULL,
LATCH1_PIN_LED_HANDSFREE, 0),
GPIO_LOOKUP_IDX(LATCH1_LABEL, LATCH1_PIN_LED_VOICEMAIL, NULL,
LATCH1_PIN_LED_VOICEMAIL, 0),
GPIO_LOOKUP_IDX(LATCH1_LABEL, LATCH1_PIN_LED_VOICE, NULL,
LATCH1_PIN_LED_VOICE, 0),
{ },
},
};
static struct platform_device ams_delta_audio_device = {
.name = "ams-delta-audio",
.id = -1,
};
static struct gpiod_lookup_table ams_delta_audio_gpio_table = {
.table = {
GPIO_LOOKUP(OMAP_GPIO_LABEL, AMS_DELTA_GPIO_PIN_HOOK_SWITCH,
"hook_switch", 0),
GPIO_LOOKUP(LATCH2_LABEL, LATCH2_PIN_MODEM_CODEC,
"modem_codec", 0),
GPIO_LOOKUP(LATCH2_LABEL, LATCH2_PIN_HANDSFREE_MUTE,
"handsfree_mute", 0),
GPIO_LOOKUP(LATCH2_LABEL, LATCH2_PIN_HANDSET_MUTE,
"handset_mute", 0),
{ },
},
};
static struct platform_device cx20442_codec_device = {
.name = "cx20442-codec",
.id = -1,
};
static struct resource ams_delta_serio_resources[] = {
{
.flags = IORESOURCE_IRQ,
/*
* Initialize IRQ resource with invalid IRQ number.
* It will be replaced with dynamically allocated GPIO IRQ
* obtained from GPIO chip as soon as the chip is available.
*/
.start = -EINVAL,
.end = -EINVAL,
},
};
static struct platform_device ams_delta_serio_device = {
.name = "ams-delta-serio",
.id = PLATFORM_DEVID_NONE,
.dev = {
/*
* Initialize .platform_data explicitly with NULL to
* indicate it is going to be used. It will be replaced
* with FIQ buffer address as soon as FIQ is initialized.
*/
.platform_data = NULL,
},
.num_resources = ARRAY_SIZE(ams_delta_serio_resources),
.resource = ams_delta_serio_resources,
};
static struct regulator_consumer_supply keybrd_pwr_consumers[] = {
/*
* Initialize supply .dev_name with NULL. It will be replaced
* with serio dev_name() as soon as the serio device is registered.
*/
REGULATOR_SUPPLY("vcc", NULL),
};
static struct regulator_init_data keybrd_pwr_initdata = {
.constraints = {
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
.num_consumer_supplies = ARRAY_SIZE(keybrd_pwr_consumers),
.consumer_supplies = keybrd_pwr_consumers,
};
static struct fixed_voltage_config keybrd_pwr_config = {
.supply_name = "keybrd_pwr",
.microvolts = 5000000,
.init_data = &keybrd_pwr_initdata,
};
static struct platform_device keybrd_pwr_device = {
.name = "reg-fixed-voltage",
.id = PLATFORM_DEVID_AUTO,
.dev = {
.platform_data = &keybrd_pwr_config,
},
};
static struct gpiod_lookup_table keybrd_pwr_gpio_table = {
.table = {
GPIO_LOOKUP(LATCH2_LABEL, LATCH2_PIN_KEYBRD_PWR, NULL,
GPIO_ACTIVE_HIGH),
{ },
},
};
static struct platform_device *ams_delta_devices[] __initdata = {
&latch1_gpio_device,
&latch2_gpio_device,
&ams_delta_kp_device,
&ams_delta_audio_device,
&ams_delta_serio_device,
&ams_delta_nand_device,
&ams_delta_lcd_device,
&cx20442_codec_device,
};
static struct gpiod_lookup_table *ams_delta_gpio_tables[] __initdata = {
&ams_delta_nreset_gpiod_table,
&ams_delta_audio_gpio_table,
&keybrd_pwr_gpio_table,
&ams_delta_lcd_gpio_table,
&ams_delta_nand_gpio_table,
};
/*
* Some drivers may not use GPIO lookup tables but need to be provided
* with GPIO numbers. The same applies to GPIO based IRQ lines - some
* drivers may even not use GPIO layer but expect just IRQ numbers.
* We could either define GPIO lookup tables then use them on behalf
* of those devices, or we can use GPIO driver level methods for
* identification of GPIO and IRQ numbers. For the purpose of the latter,
* defina a helper function which identifies GPIO chips by their labels.
*/
static int gpiochip_match_by_label(struct gpio_chip *chip, void *data)
{
char *label = data;
return !strcmp(label, chip->label);
}
static struct gpiod_hog ams_delta_gpio_hogs[] = {
GPIO_HOG(LATCH2_LABEL, LATCH2_PIN_KEYBRD_DATAOUT, "keybrd_dataout",
GPIO_ACTIVE_HIGH, GPIOD_OUT_LOW),
{},
};
static struct plat_serial8250_port ams_delta_modem_ports[];
/*
* Obtain MODEM IRQ GPIO descriptor using its hardware pin
* number and assign related IRQ number to the MODEM port.
* Keep the GPIO descriptor open so nobody steps in.
*/
static void __init modem_assign_irq(struct gpio_chip *chip)
{
struct gpio_desc *gpiod;
gpiod = gpiochip_request_own_desc(chip, AMS_DELTA_GPIO_PIN_MODEM_IRQ,
"modem_irq", GPIO_ACTIVE_HIGH,
GPIOD_IN);
if (IS_ERR(gpiod)) {
pr_err("%s: modem IRQ GPIO request failed (%ld)\n", __func__,
PTR_ERR(gpiod));
} else {
ams_delta_modem_ports[0].irq = gpiod_to_irq(gpiod);
}
}
/*
* The purpose of this function is to take care of proper initialization of
* devices and data structures which depend on GPIO lines provided by OMAP GPIO
* banks but their drivers don't use GPIO lookup tables or GPIO layer at all.
* The function may be called as soon as OMAP GPIO devices are probed.
* Since that happens at postcore_initcall, it can be called successfully
* from init_machine or later.
* Dependent devices may be registered from within this function or later.
*/
static void __init omap_gpio_deps_init(void)
{
struct gpio_chip *chip;
chip = gpiochip_find(OMAP_GPIO_LABEL, gpiochip_match_by_label);
if (!chip) {
pr_err("%s: OMAP GPIO chip not found\n", __func__);
return;
}
/*
* Start with FIQ initialization as it may have to request
* and release successfully each OMAP GPIO pin in turn.
*/
ams_delta_init_fiq(chip, &ams_delta_serio_device);
modem_assign_irq(chip);
}
/*
* Initialize latch2 pins with values which are safe for dependent on-board
* devices or useful for their successull initialization even before GPIO
* driver takes control over the latch pins:
* - LATCH2_PIN_LCD_VBLEN = 0
* - LATCH2_PIN_LCD_NDISP = 0 Keep LCD device powered off before its
* driver takes control over it.
* - LATCH2_PIN_NAND_NCE = 0
* - LATCH2_PIN_NAND_NWP = 0 Keep NAND device down and write-
* protected before its driver takes
* control over it.
* - LATCH2_PIN_KEYBRD_PWR = 0 Keep keyboard powered off before serio
* driver takes control over it.
* - LATCH2_PIN_KEYBRD_DATAOUT = 0 Keep low to avoid corruption of first
* byte of data received from attached
* keyboard when serio device is probed;
* the pin is also hogged low by the latch2
* GPIO driver as soon as it is ready.
* - LATCH2_PIN_MODEM_NRESET = 1 Enable voice MODEM device, allowing for
* its successful probe even before a
* regulator it depends on, which in turn
* takes control over the pin, is set up.
* - LATCH2_PIN_MODEM_CODEC = 1 Attach voice MODEM CODEC data port
* to the MODEM so the CODEC is under
* control even if audio driver doesn't
* take it over.
*/
static void __init ams_delta_latch2_init(void)
{
u16 latch2 = 1 << LATCH2_PIN_MODEM_NRESET | 1 << LATCH2_PIN_MODEM_CODEC;
__raw_writew(latch2, IOMEM(LATCH2_VIRT));
}
static void __init ams_delta_init(void)
{
struct platform_device *leds_pdev;
/* mux pins for uarts */
omap_cfg_reg(UART1_TX);
omap_cfg_reg(UART1_RTS);
/* parallel camera interface */
omap_cfg_reg(H19_1610_CAM_EXCLK);
omap_cfg_reg(J15_1610_CAM_LCLK);
omap_cfg_reg(L18_1610_CAM_VS);
omap_cfg_reg(L15_1610_CAM_HS);
omap_cfg_reg(L19_1610_CAM_D0);
omap_cfg_reg(K14_1610_CAM_D1);
omap_cfg_reg(K15_1610_CAM_D2);
omap_cfg_reg(K19_1610_CAM_D3);
omap_cfg_reg(K18_1610_CAM_D4);
omap_cfg_reg(J14_1610_CAM_D5);
omap_cfg_reg(J19_1610_CAM_D6);
omap_cfg_reg(J18_1610_CAM_D7);
omap_gpio_deps_init();
ams_delta_latch2_init();
gpiod_add_hogs(ams_delta_gpio_hogs);
omap_serial_init();
omap_register_i2c_bus(1, 100, NULL, 0);
omap1_usb_init(&ams_delta_usb_config);
platform_add_devices(ams_delta_devices, ARRAY_SIZE(ams_delta_devices));
/*
* As soon as regulator consumers have been registered, assign their
* dev_names to consumer supply entries of respective regulators.
*/
keybrd_pwr_consumers[0].dev_name =
dev_name(&ams_delta_serio_device.dev);
/*
* Once consumer supply entries are populated with dev_names,
* register regulator devices. At this stage only the keyboard
* power regulator has its consumer supply table fully populated.
*/
platform_device_register(&keybrd_pwr_device);
/*
* As soon as GPIO consumers have been registered, assign
* their dev_names to respective GPIO lookup tables.
*/
ams_delta_audio_gpio_table.dev_id =
dev_name(&ams_delta_audio_device.dev);
keybrd_pwr_gpio_table.dev_id = dev_name(&keybrd_pwr_device.dev);
ams_delta_nand_gpio_table.dev_id = dev_name(&ams_delta_nand_device.dev);
ams_delta_lcd_gpio_table.dev_id = dev_name(&ams_delta_lcd_device.dev);
/*
* Once GPIO lookup tables are populated with dev_names, register them.
*/
gpiod_add_lookup_tables(ams_delta_gpio_tables,
ARRAY_SIZE(ams_delta_gpio_tables));
leds_pdev = gpio_led_register_device(PLATFORM_DEVID_NONE, &leds_pdata);
if (!IS_ERR_OR_NULL(leds_pdev)) {
leds_gpio_table.dev_id = dev_name(&leds_pdev->dev);
gpiod_add_lookup_table(&leds_gpio_table);
}
omap_writew(omap_readw(ARM_RSTCT1) | 0x0004, ARM_RSTCT1);
omapfb_set_lcd_config(&ams_delta_lcd_config);
}
static void modem_pm(struct uart_port *port, unsigned int state, unsigned old)
{
struct modem_private_data *priv = port->private_data;
int ret;
if (!priv)
return;
if (IS_ERR(priv->regulator))
return;
if (state == old)
return;
if (state == 0)
ret = regulator_enable(priv->regulator);
else if (old == 0)
ret = regulator_disable(priv->regulator);
else
ret = 0;
if (ret)
dev_warn(port->dev,
"ams_delta modem_pm: failed to %sable regulator: %d\n",
state ? "dis" : "en", ret);
}
static struct plat_serial8250_port ams_delta_modem_ports[] = {
{
.membase = IOMEM(MODEM_VIRT),
.mapbase = MODEM_PHYS,
.irq = IRQ_NOTCONNECTED, /* changed later */
.flags = UPF_BOOT_AUTOCONF,
.irqflags = IRQF_TRIGGER_RISING,
.iotype = UPIO_MEM,
.regshift = 1,
.uartclk = BASE_BAUD * 16,
.pm = modem_pm,
.private_data = &modem_priv,
},
{ },
};
static struct platform_device ams_delta_modem_device = {
.name = "serial8250",
.id = PLAT8250_DEV_PLATFORM1,
.dev = {
.platform_data = ams_delta_modem_ports,
},
};
static int __init modem_nreset_init(void)
{
int err;
err = platform_device_register(&modem_nreset_device);
if (err)
pr_err("Couldn't register the modem regulator device\n");
return err;
}
/*
* This function expects MODEM IRQ number already assigned to the port.
* The MODEM device requires its RESET# pin kept high during probe.
* That requirement can be fulfilled in several ways:
* - with a descriptor of already functional modem_nreset regulator
* assigned to the MODEM private data,
* - with the regulator not yet controlled by modem_pm function but
* already enabled by default on probe,
* - before the modem_nreset regulator is probed, with the pin already
* set high explicitly.
* The last one is already guaranteed by ams_delta_latch2_init() called
* from machine_init.
* In order to avoid taking over ttyS0 device slot, the MODEM device
* should be registered after OMAP serial ports. Since those ports
* are registered at arch_initcall, this function can be called safely
* at arch_initcall_sync earliest.
*/
static int __init ams_delta_modem_init(void)
{
if (!machine_is_ams_delta())
return -ENODEV;
omap_cfg_reg(M14_1510_GPIO2);
/* Initialize the modem_nreset regulator consumer before use */
modem_priv.regulator = ERR_PTR(-ENODEV);
return platform_device_register(&ams_delta_modem_device);
}
arch_initcall_sync(ams_delta_modem_init);
static int __init late_init(void)
{
int err;
err = modem_nreset_init();
if (err)
return err;
/*
* Once the modem device is registered, the modem_nreset
* regulator can be requested on behalf of that device.
*/
modem_priv.regulator = regulator_get(&ams_delta_modem_device.dev,
"RESET#");
if (IS_ERR(modem_priv.regulator)) {
err = PTR_ERR(modem_priv.regulator);
goto unregister;
}
return 0;
unregister:
platform_device_unregister(&ams_delta_modem_device);
return err;
}
static void __init ams_delta_init_late(void)
{
omap1_init_late();
late_init();
}
static void __init ams_delta_map_io(void)
{
omap1_map_io();
iotable_init(ams_delta_io_desc, ARRAY_SIZE(ams_delta_io_desc));
}
MACHINE_START(AMS_DELTA, "Amstrad E3 (Delta)")
/* Maintainer: Jonathan McDowell <[email protected]> */
.atag_offset = 0x100,
.map_io = ams_delta_map_io,
.init_early = omap1_init_early,
.init_irq = omap1_init_irq,
.init_machine = ams_delta_init,
.init_late = ams_delta_init_late,
.init_time = omap1_timer_init,
.restart = omap1_restart,
MACHINE_END
| linux-master | arch/arm/mach-omap1/board-ams-delta.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* linux/arch/arm/mach-omap1/mux.c
*
* OMAP1 pin multiplexing configurations
*
* Copyright (C) 2003 - 2008 Nokia Corporation
*
* Written by Tony Lindgren
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/spinlock.h>
#include <linux/soc/ti/omap1-io.h>
#include "hardware.h"
#include "mux.h"
#ifdef CONFIG_OMAP_MUX
static struct omap_mux_cfg arch_mux_cfg;
#if defined(CONFIG_ARCH_OMAP15XX) || defined(CONFIG_ARCH_OMAP16XX)
static struct pin_config omap1xxx_pins[] = {
/*
* description mux mode mux pull pull pull pu_pd pu dbg
* reg offset mode reg bit ena reg
*/
MUX_CFG("UART1_TX", 9, 21, 1, 2, 3, 0, NA, 0, 0)
MUX_CFG("UART1_RTS", 9, 12, 1, 2, 0, 0, NA, 0, 0)
/* UART2 (COM_UART_GATING), conflicts with USB2 */
MUX_CFG("UART2_TX", C, 27, 1, 3, 3, 0, NA, 0, 0)
MUX_CFG("UART2_RX", C, 18, 0, 3, 1, 1, NA, 0, 0)
MUX_CFG("UART2_CTS", C, 21, 0, 3, 1, 1, NA, 0, 0)
MUX_CFG("UART2_RTS", C, 24, 1, 3, 2, 0, NA, 0, 0)
/* UART3 (GIGA_UART_GATING) */
MUX_CFG("UART3_TX", 6, 0, 1, 0, 30, 0, NA, 0, 0)
MUX_CFG("UART3_RX", 6, 3, 0, 0, 31, 1, NA, 0, 0)
MUX_CFG("UART3_CTS", 5, 12, 2, 0, 24, 0, NA, 0, 0)
MUX_CFG("UART3_RTS", 5, 15, 2, 0, 25, 0, NA, 0, 0)
MUX_CFG("UART3_CLKREQ", 9, 27, 0, 2, 5, 0, NA, 0, 0)
MUX_CFG("UART3_BCLK", A, 0, 0, 2, 6, 0, NA, 0, 0)
MUX_CFG("Y15_1610_UART3_RTS", A, 0, 1, 2, 6, 0, NA, 0, 0)
/* PWT & PWL, conflicts with UART3 */
MUX_CFG("PWT", 6, 0, 2, 0, 30, 0, NA, 0, 0)
MUX_CFG("PWL", 6, 3, 1, 0, 31, 1, NA, 0, 0)
/* USB internal master generic */
MUX_CFG("R18_USB_VBUS", 7, 9, 2, 1, 11, 0, NA, 0, 1)
MUX_CFG("R18_1510_USB_GPIO0", 7, 9, 0, 1, 11, 1, NA, 0, 1)
/* works around erratum: W4_USB_PUEN and W4_USB_PUDIS are switched! */
MUX_CFG("W4_USB_PUEN", D, 3, 3, 3, 5, 1, NA, 0, 1)
MUX_CFG("W4_USB_CLKO", D, 3, 1, 3, 5, 0, NA, 0, 1)
MUX_CFG("W4_USB_HIGHZ", D, 3, 4, 3, 5, 0, 3, 0, 1)
MUX_CFG("W4_GPIO58", D, 3, 7, 3, 5, 0, 3, 0, 1)
/* USB1 master */
MUX_CFG("USB1_SUSP", 8, 27, 2, 1, 27, 0, NA, 0, 1)
MUX_CFG("USB1_SE0", 9, 0, 2, 1, 28, 0, NA, 0, 1)
MUX_CFG("W13_1610_USB1_SE0", 9, 0, 4, 1, 28, 0, NA, 0, 1)
MUX_CFG("USB1_TXEN", 9, 3, 2, 1, 29, 0, NA, 0, 1)
MUX_CFG("USB1_TXD", 9, 24, 1, 2, 4, 0, NA, 0, 1)
MUX_CFG("USB1_VP", A, 3, 1, 2, 7, 0, NA, 0, 1)
MUX_CFG("USB1_VM", A, 6, 1, 2, 8, 0, NA, 0, 1)
MUX_CFG("USB1_RCV", A, 9, 1, 2, 9, 0, NA, 0, 1)
MUX_CFG("USB1_SPEED", A, 12, 2, 2, 10, 0, NA, 0, 1)
MUX_CFG("R13_1610_USB1_SPEED", A, 12, 5, 2, 10, 0, NA, 0, 1)
MUX_CFG("R13_1710_USB1_SEO", A, 12, 5, 2, 10, 0, NA, 0, 1)
/* USB2 master */
MUX_CFG("USB2_SUSP", B, 3, 1, 2, 17, 0, NA, 0, 1)
MUX_CFG("USB2_VP", B, 6, 1, 2, 18, 0, NA, 0, 1)
MUX_CFG("USB2_TXEN", B, 9, 1, 2, 19, 0, NA, 0, 1)
MUX_CFG("USB2_VM", C, 18, 1, 3, 0, 0, NA, 0, 1)
MUX_CFG("USB2_RCV", C, 21, 1, 3, 1, 0, NA, 0, 1)
MUX_CFG("USB2_SE0", C, 24, 2, 3, 2, 0, NA, 0, 1)
MUX_CFG("USB2_TXD", C, 27, 2, 3, 3, 0, NA, 0, 1)
/* OMAP-1510 GPIO */
MUX_CFG("R18_1510_GPIO0", 7, 9, 0, 1, 11, 1, 0, 0, 1)
MUX_CFG("R19_1510_GPIO1", 7, 6, 0, 1, 10, 1, 0, 0, 1)
MUX_CFG("M14_1510_GPIO2", 7, 3, 0, 1, 9, 1, 0, 0, 1)
/* OMAP1610 GPIO */
MUX_CFG("P18_1610_GPIO3", 7, 0, 0, 1, 8, 0, NA, 0, 1)
MUX_CFG("Y15_1610_GPIO17", A, 0, 7, 2, 6, 0, NA, 0, 1)
/* OMAP-1710 GPIO */
MUX_CFG("R18_1710_GPIO0", 7, 9, 0, 1, 11, 1, 1, 1, 1)
MUX_CFG("V2_1710_GPIO10", F, 27, 1, 4, 3, 1, 4, 1, 1)
MUX_CFG("N21_1710_GPIO14", 6, 9, 0, 1, 1, 1, 1, 1, 1)
MUX_CFG("W15_1710_GPIO40", 9, 27, 7, 2, 5, 1, 2, 1, 1)
/* MPUIO */
MUX_CFG("MPUIO2", 7, 18, 0, 1, 14, 1, NA, 0, 1)
MUX_CFG("N15_1610_MPUIO2", 7, 18, 0, 1, 14, 1, 1, 0, 1)
MUX_CFG("MPUIO4", 7, 15, 0, 1, 13, 1, NA, 0, 1)
MUX_CFG("MPUIO5", 7, 12, 0, 1, 12, 1, NA, 0, 1)
MUX_CFG("T20_1610_MPUIO5", 7, 12, 0, 1, 12, 0, 3, 0, 1)
MUX_CFG("W11_1610_MPUIO6", 10, 15, 2, 3, 8, 0, 3, 0, 1)
MUX_CFG("V10_1610_MPUIO7", A, 24, 2, 2, 14, 0, 2, 0, 1)
MUX_CFG("W11_1610_MPUIO9", 10, 15, 1, 3, 8, 0, 3, 0, 1)
MUX_CFG("V10_1610_MPUIO10", A, 24, 1, 2, 14, 0, 2, 0, 1)
MUX_CFG("W10_1610_MPUIO11", A, 18, 2, 2, 11, 0, 2, 0, 1)
MUX_CFG("E20_1610_MPUIO13", 3, 21, 1, 0, 7, 0, 0, 0, 1)
MUX_CFG("U20_1610_MPUIO14", 9, 6, 6, 0, 30, 0, 0, 0, 1)
MUX_CFG("E19_1610_MPUIO15", 3, 18, 1, 0, 6, 0, 0, 0, 1)
/* MCBSP2 */
MUX_CFG("MCBSP2_CLKR", C, 6, 0, 2, 27, 1, NA, 0, 1)
MUX_CFG("MCBSP2_CLKX", C, 9, 0, 2, 29, 1, NA, 0, 1)
MUX_CFG("MCBSP2_DR", C, 0, 0, 2, 26, 1, NA, 0, 1)
MUX_CFG("MCBSP2_DX", C, 15, 0, 2, 31, 1, NA, 0, 1)
MUX_CFG("MCBSP2_FSR", C, 12, 0, 2, 30, 1, NA, 0, 1)
MUX_CFG("MCBSP2_FSX", C, 3, 0, 2, 27, 1, NA, 0, 1)
/* MCBSP3 NOTE: Mode must 1 for clock */
MUX_CFG("MCBSP3_CLKX", 9, 3, 1, 1, 29, 0, NA, 0, 1)
/* Misc ballouts */
MUX_CFG("BALLOUT_V8_ARMIO3", B, 18, 0, 2, 25, 1, NA, 0, 1)
MUX_CFG("N20_HDQ", 6, 18, 1, 1, 4, 0, 1, 4, 0)
/* OMAP-1610 MMC2 */
MUX_CFG("W8_1610_MMC2_DAT0", B, 21, 6, 2, 23, 1, 2, 1, 1)
MUX_CFG("V8_1610_MMC2_DAT1", B, 27, 6, 2, 25, 1, 2, 1, 1)
MUX_CFG("W15_1610_MMC2_DAT2", 9, 12, 6, 2, 5, 1, 2, 1, 1)
MUX_CFG("R10_1610_MMC2_DAT3", B, 18, 6, 2, 22, 1, 2, 1, 1)
MUX_CFG("Y10_1610_MMC2_CLK", B, 3, 6, 2, 17, 0, 2, 0, 1)
MUX_CFG("Y8_1610_MMC2_CMD", B, 24, 6, 2, 24, 1, 2, 1, 1)
MUX_CFG("V9_1610_MMC2_CMDDIR", B, 12, 6, 2, 20, 0, 2, 1, 1)
MUX_CFG("V5_1610_MMC2_DATDIR0", B, 15, 6, 2, 21, 0, 2, 1, 1)
MUX_CFG("W19_1610_MMC2_DATDIR1", 8, 15, 6, 1, 23, 0, 1, 1, 1)
MUX_CFG("R18_1610_MMC2_CLKIN", 7, 9, 6, 1, 11, 0, 1, 11, 1)
/* OMAP-1610 External Trace Interface */
MUX_CFG("M19_1610_ETM_PSTAT0", 5, 27, 1, 0, 29, 0, 0, 0, 1)
MUX_CFG("L15_1610_ETM_PSTAT1", 5, 24, 1, 0, 28, 0, 0, 0, 1)
MUX_CFG("L18_1610_ETM_PSTAT2", 5, 21, 1, 0, 27, 0, 0, 0, 1)
MUX_CFG("L19_1610_ETM_D0", 5, 18, 1, 0, 26, 0, 0, 0, 1)
MUX_CFG("J19_1610_ETM_D6", 5, 0, 1, 0, 20, 0, 0, 0, 1)
MUX_CFG("J18_1610_ETM_D7", 5, 27, 1, 0, 19, 0, 0, 0, 1)
/* OMAP16XX GPIO */
MUX_CFG("P20_1610_GPIO4", 6, 27, 0, 1, 7, 0, 1, 1, 1)
MUX_CFG("V9_1610_GPIO7", B, 12, 1, 2, 20, 0, 2, 1, 1)
MUX_CFG("W8_1610_GPIO9", B, 21, 0, 2, 23, 0, 2, 1, 1)
MUX_CFG("N20_1610_GPIO11", 6, 18, 0, 1, 4, 0, 1, 1, 1)
MUX_CFG("N19_1610_GPIO13", 6, 12, 0, 1, 2, 0, 1, 1, 1)
MUX_CFG("P10_1610_GPIO22", C, 0, 7, 2, 26, 0, 2, 1, 1)
MUX_CFG("V5_1610_GPIO24", B, 15, 7, 2, 21, 0, 2, 1, 1)
MUX_CFG("AA20_1610_GPIO_41", 9, 9, 7, 1, 31, 0, 1, 1, 1)
MUX_CFG("W19_1610_GPIO48", 8, 15, 7, 1, 23, 1, 1, 0, 1)
MUX_CFG("M7_1610_GPIO62", 10, 0, 0, 4, 24, 0, 4, 0, 1)
MUX_CFG("V14_16XX_GPIO37", 9, 18, 7, 2, 2, 0, 2, 2, 0)
MUX_CFG("R9_16XX_GPIO18", C, 18, 7, 3, 0, 0, 3, 0, 0)
MUX_CFG("L14_16XX_GPIO49", 6, 3, 7, 0, 31, 0, 0, 31, 0)
/* OMAP-1610 uWire */
MUX_CFG("V19_1610_UWIRE_SCLK", 8, 6, 0, 1, 20, 0, 1, 1, 1)
MUX_CFG("U18_1610_UWIRE_SDI", 8, 0, 0, 1, 18, 0, 1, 1, 1)
MUX_CFG("W21_1610_UWIRE_SDO", 8, 3, 0, 1, 19, 0, 1, 1, 1)
MUX_CFG("N14_1610_UWIRE_CS0", 8, 9, 1, 1, 21, 0, 1, 1, 1)
MUX_CFG("P15_1610_UWIRE_CS3", 8, 12, 1, 1, 22, 0, 1, 1, 1)
MUX_CFG("N15_1610_UWIRE_CS1", 7, 18, 2, 1, 14, 0, NA, 0, 1)
/* OMAP-1610 SPI */
MUX_CFG("U19_1610_SPIF_SCK", 7, 21, 6, 1, 15, 0, 1, 1, 1)
MUX_CFG("U18_1610_SPIF_DIN", 8, 0, 6, 1, 18, 1, 1, 0, 1)
MUX_CFG("P20_1610_SPIF_DIN", 6, 27, 4, 1, 7, 1, 1, 0, 1)
MUX_CFG("W21_1610_SPIF_DOUT", 8, 3, 6, 1, 19, 0, 1, 0, 1)
MUX_CFG("R18_1610_SPIF_DOUT", 7, 9, 3, 1, 11, 0, 1, 0, 1)
MUX_CFG("N14_1610_SPIF_CS0", 8, 9, 6, 1, 21, 0, 1, 1, 1)
MUX_CFG("N15_1610_SPIF_CS1", 7, 18, 6, 1, 14, 0, 1, 1, 1)
MUX_CFG("T19_1610_SPIF_CS2", 7, 15, 4, 1, 13, 0, 1, 1, 1)
MUX_CFG("P15_1610_SPIF_CS3", 8, 12, 3, 1, 22, 0, 1, 1, 1)
/* OMAP-1610 Flash */
MUX_CFG("L3_1610_FLASH_CS2B_OE",10, 6, 1, NA, 0, 0, NA, 0, 1)
MUX_CFG("M8_1610_FLASH_CS2B_WE",10, 3, 1, NA, 0, 0, NA, 0, 1)
/* First MMC interface, same on 1510, 1610 and 1710 */
MUX_CFG("MMC_CMD", A, 27, 0, 2, 15, 1, 2, 1, 1)
MUX_CFG("MMC_DAT1", A, 24, 0, 2, 14, 1, 2, 1, 1)
MUX_CFG("MMC_DAT2", A, 18, 0, 2, 12, 1, 2, 1, 1)
MUX_CFG("MMC_DAT0", B, 0, 0, 2, 16, 1, 2, 1, 1)
MUX_CFG("MMC_CLK", A, 21, 0, NA, 0, 0, NA, 0, 1)
MUX_CFG("MMC_DAT3", 10, 15, 0, 3, 8, 1, 3, 1, 1)
MUX_CFG("M15_1710_MMC_CLKI", 6, 21, 2, 0, 0, 0, NA, 0, 1)
MUX_CFG("P19_1710_MMC_CMDDIR", 6, 24, 6, 0, 0, 0, NA, 0, 1)
MUX_CFG("P20_1710_MMC_DATDIR0", 6, 27, 5, 0, 0, 0, NA, 0, 1)
/* OMAP-1610 USB0 alternate configuration */
MUX_CFG("W9_USB0_TXEN", B, 9, 5, 2, 19, 0, 2, 0, 1)
MUX_CFG("AA9_USB0_VP", B, 6, 5, 2, 18, 0, 2, 0, 1)
MUX_CFG("Y5_USB0_RCV", C, 21, 5, 3, 1, 0, 1, 0, 1)
MUX_CFG("R9_USB0_VM", C, 18, 5, 3, 0, 0, 3, 0, 1)
MUX_CFG("V6_USB0_TXD", C, 27, 5, 3, 3, 0, 3, 0, 1)
MUX_CFG("W5_USB0_SE0", C, 24, 5, 3, 2, 0, 3, 0, 1)
MUX_CFG("V9_USB0_SPEED", B, 12, 5, 2, 20, 0, 2, 0, 1)
MUX_CFG("Y10_USB0_SUSP", B, 3, 5, 2, 17, 0, 2, 0, 1)
/* USB2 interface */
MUX_CFG("W9_USB2_TXEN", B, 9, 1, NA, 0, 0, NA, 0, 1)
MUX_CFG("AA9_USB2_VP", B, 6, 1, NA, 0, 0, NA, 0, 1)
MUX_CFG("Y5_USB2_RCV", C, 21, 1, NA, 0, 0, NA, 0, 1)
MUX_CFG("R9_USB2_VM", C, 18, 1, NA, 0, 0, NA, 0, 1)
MUX_CFG("V6_USB2_TXD", C, 27, 2, NA, 0, 0, NA, 0, 1)
MUX_CFG("W5_USB2_SE0", C, 24, 2, NA, 0, 0, NA, 0, 1)
/* 16XX UART */
MUX_CFG("R13_1610_UART1_TX", A, 12, 6, 2, 10, 0, 2, 10, 1)
MUX_CFG("V14_16XX_UART1_RX", 9, 18, 0, 2, 2, 0, 2, 2, 1)
MUX_CFG("R14_1610_UART1_CTS", 9, 15, 0, 2, 1, 0, 2, 1, 1)
MUX_CFG("AA15_1610_UART1_RTS", 9, 12, 1, 2, 0, 0, 2, 0, 1)
MUX_CFG("R9_16XX_UART2_RX", C, 18, 0, 3, 0, 0, 3, 0, 1)
MUX_CFG("L14_16XX_UART3_RX", 6, 3, 0, 0, 31, 0, 0, 31, 1)
/* I2C interface */
MUX_CFG("I2C_SCL", 7, 24, 0, NA, 0, 0, NA, 0, 0)
MUX_CFG("I2C_SDA", 7, 27, 0, NA, 0, 0, NA, 0, 0)
/* Keypad */
MUX_CFG("F18_1610_KBC0", 3, 15, 0, 0, 5, 1, 0, 0, 0)
MUX_CFG("D20_1610_KBC1", 3, 12, 0, 0, 4, 1, 0, 0, 0)
MUX_CFG("D19_1610_KBC2", 3, 9, 0, 0, 3, 1, 0, 0, 0)
MUX_CFG("E18_1610_KBC3", 3, 6, 0, 0, 2, 1, 0, 0, 0)
MUX_CFG("C21_1610_KBC4", 3, 3, 0, 0, 1, 1, 0, 0, 0)
MUX_CFG("G18_1610_KBR0", 4, 0, 0, 0, 10, 1, 0, 1, 0)
MUX_CFG("F19_1610_KBR1", 3, 27, 0, 0, 9, 1, 0, 1, 0)
MUX_CFG("H14_1610_KBR2", 3, 24, 0, 0, 8, 1, 0, 1, 0)
MUX_CFG("E20_1610_KBR3", 3, 21, 0, 0, 7, 1, 0, 1, 0)
MUX_CFG("E19_1610_KBR4", 3, 18, 0, 0, 6, 1, 0, 1, 0)
MUX_CFG("N19_1610_KBR5", 6, 12, 1, 1, 2, 1, 1, 1, 0)
/* Power management */
MUX_CFG("T20_1610_LOW_PWR", 7, 12, 1, NA, 0, 0, NA, 0, 0)
/* MCLK Settings */
MUX_CFG("V5_1710_MCLK_ON", B, 15, 0, NA, 0, 0, NA, 0, 0)
MUX_CFG("V5_1710_MCLK_OFF", B, 15, 6, NA, 0, 0, NA, 0, 0)
MUX_CFG("R10_1610_MCLK_ON", B, 18, 0, NA, 22, 0, NA, 1, 0)
MUX_CFG("R10_1610_MCLK_OFF", B, 18, 6, 2, 22, 1, 2, 1, 1)
/* CompactFlash controller, conflicts with MMC1 */
MUX_CFG("P11_1610_CF_CD2", A, 27, 3, 2, 15, 1, 2, 1, 1)
MUX_CFG("R11_1610_CF_IOIS16", B, 0, 3, 2, 16, 1, 2, 1, 1)
MUX_CFG("V10_1610_CF_IREQ", A, 24, 3, 2, 14, 0, 2, 0, 1)
MUX_CFG("W10_1610_CF_RESET", A, 18, 3, 2, 12, 1, 2, 1, 1)
MUX_CFG("W11_1610_CF_CD1", 10, 15, 3, 3, 8, 1, 3, 1, 1)
/* parallel camera */
MUX_CFG("J15_1610_CAM_LCLK", 4, 24, 0, 0, 18, 1, 0, 0, 0)
MUX_CFG("J18_1610_CAM_D7", 4, 27, 0, 0, 19, 1, 0, 0, 0)
MUX_CFG("J19_1610_CAM_D6", 5, 0, 0, 0, 20, 1, 0, 0, 0)
MUX_CFG("J14_1610_CAM_D5", 5, 3, 0, 0, 21, 1, 0, 0, 0)
MUX_CFG("K18_1610_CAM_D4", 5, 6, 0, 0, 22, 1, 0, 0, 0)
MUX_CFG("K19_1610_CAM_D3", 5, 9, 0, 0, 23, 1, 0, 0, 0)
MUX_CFG("K15_1610_CAM_D2", 5, 12, 0, 0, 24, 1, 0, 0, 0)
MUX_CFG("K14_1610_CAM_D1", 5, 15, 0, 0, 25, 1, 0, 0, 0)
MUX_CFG("L19_1610_CAM_D0", 5, 18, 0, 0, 26, 1, 0, 0, 0)
MUX_CFG("L18_1610_CAM_VS", 5, 21, 0, 0, 27, 1, 0, 0, 0)
MUX_CFG("L15_1610_CAM_HS", 5, 24, 0, 0, 28, 1, 0, 0, 0)
MUX_CFG("M19_1610_CAM_RSTZ", 5, 27, 0, 0, 29, 0, 0, 0, 0)
MUX_CFG("Y15_1610_CAM_OUTCLK", A, 0, 6, 2, 6, 0, 2, 0, 0)
/* serial camera */
MUX_CFG("H19_1610_CAM_EXCLK", 4, 21, 0, 0, 17, 0, 0, 0, 0)
/* REVISIT 5912 spec sez CCP_* can't pullup or pulldown ... ? */
MUX_CFG("Y12_1610_CCP_CLKP", 8, 18, 6, 1, 24, 1, 1, 0, 0)
MUX_CFG("W13_1610_CCP_CLKM", 9, 0, 6, 1, 28, 1, 1, 0, 0)
MUX_CFG("W14_1610_CCP_DATAP", 9, 24, 6, 2, 4, 1, 2, 0, 0)
MUX_CFG("Y14_1610_CCP_DATAM", 9, 21, 6, 2, 3, 1, 2, 0, 0)
};
#define OMAP1XXX_PINS_SZ ARRAY_SIZE(omap1xxx_pins)
#else
#define omap1xxx_pins NULL
#define OMAP1XXX_PINS_SZ 0
#endif /* CONFIG_ARCH_OMAP15XX || CONFIG_ARCH_OMAP16XX */
static int omap1_cfg_reg(const struct pin_config *cfg)
{
static DEFINE_SPINLOCK(mux_spin_lock);
unsigned long flags;
unsigned int reg_orig = 0, reg = 0, pu_pd_orig = 0, pu_pd = 0,
pull_orig = 0, pull = 0;
unsigned int mask, warn = 0;
/* Check the mux register in question */
if (cfg->mux_reg) {
unsigned tmp1, tmp2;
spin_lock_irqsave(&mux_spin_lock, flags);
reg_orig = omap_readl(cfg->mux_reg);
/* The mux registers always seem to be 3 bits long */
mask = (0x7 << cfg->mask_offset);
tmp1 = reg_orig & mask;
reg = reg_orig & ~mask;
tmp2 = (cfg->mask << cfg->mask_offset);
reg |= tmp2;
if (tmp1 != tmp2)
warn = 1;
omap_writel(reg, cfg->mux_reg);
spin_unlock_irqrestore(&mux_spin_lock, flags);
}
/* Check for pull up or pull down selection on 1610 */
if (!cpu_is_omap15xx()) {
if (cfg->pu_pd_reg && cfg->pull_val) {
spin_lock_irqsave(&mux_spin_lock, flags);
pu_pd_orig = omap_readl(cfg->pu_pd_reg);
mask = 1 << cfg->pull_bit;
if (cfg->pu_pd_val) {
if (!(pu_pd_orig & mask))
warn = 1;
/* Use pull up */
pu_pd = pu_pd_orig | mask;
} else {
if (pu_pd_orig & mask)
warn = 1;
/* Use pull down */
pu_pd = pu_pd_orig & ~mask;
}
omap_writel(pu_pd, cfg->pu_pd_reg);
spin_unlock_irqrestore(&mux_spin_lock, flags);
}
}
/* Check for an associated pull down register */
if (cfg->pull_reg) {
spin_lock_irqsave(&mux_spin_lock, flags);
pull_orig = omap_readl(cfg->pull_reg);
mask = 1 << cfg->pull_bit;
if (cfg->pull_val) {
if (pull_orig & mask)
warn = 1;
/* Low bit = pull enabled */
pull = pull_orig & ~mask;
} else {
if (!(pull_orig & mask))
warn = 1;
/* High bit = pull disabled */
pull = pull_orig | mask;
}
omap_writel(pull, cfg->pull_reg);
spin_unlock_irqrestore(&mux_spin_lock, flags);
}
if (warn) {
#ifdef CONFIG_OMAP_MUX_WARNINGS
printk(KERN_WARNING "MUX: initialized %s\n", cfg->name);
#endif
}
#ifdef CONFIG_OMAP_MUX_DEBUG
if (cfg->debug || warn) {
printk("MUX: Setting register %s\n", cfg->name);
printk(" %s (0x%08x) = 0x%08x -> 0x%08x\n",
cfg->mux_reg_name, cfg->mux_reg, reg_orig, reg);
if (!cpu_is_omap15xx()) {
if (cfg->pu_pd_reg && cfg->pull_val) {
printk(" %s (0x%08x) = 0x%08x -> 0x%08x\n",
cfg->pu_pd_name, cfg->pu_pd_reg,
pu_pd_orig, pu_pd);
}
}
if (cfg->pull_reg)
printk(" %s (0x%08x) = 0x%08x -> 0x%08x\n",
cfg->pull_name, cfg->pull_reg, pull_orig, pull);
}
#endif
#ifdef CONFIG_OMAP_MUX_WARNINGS
return warn ? -ETXTBSY : 0;
#else
return 0;
#endif
}
static struct omap_mux_cfg *mux_cfg;
int __init omap_mux_register(struct omap_mux_cfg *arch_mux_cfg)
{
if (!arch_mux_cfg || !arch_mux_cfg->pins || arch_mux_cfg->size == 0
|| !arch_mux_cfg->cfg_reg) {
printk(KERN_ERR "Invalid pin table\n");
return -EINVAL;
}
mux_cfg = arch_mux_cfg;
return 0;
}
/*
* Sets the Omap MUX and PULL_DWN registers based on the table
*/
int omap_cfg_reg(const unsigned long index)
{
struct pin_config *reg;
if (!cpu_class_is_omap1()) {
printk(KERN_ERR "mux: Broken omap_cfg_reg(%lu) entry\n",
index);
WARN_ON(1);
return -EINVAL;
}
if (mux_cfg == NULL) {
printk(KERN_ERR "Pin mux table not initialized\n");
return -ENODEV;
}
if (index >= mux_cfg->size) {
printk(KERN_ERR "Invalid pin mux index: %lu (%lu)\n",
index, mux_cfg->size);
dump_stack();
return -ENODEV;
}
reg = &mux_cfg->pins[index];
if (!mux_cfg->cfg_reg)
return -ENODEV;
return mux_cfg->cfg_reg(reg);
}
EXPORT_SYMBOL(omap_cfg_reg);
int __init omap1_mux_init(void)
{
if (cpu_is_omap15xx() || cpu_is_omap16xx()) {
arch_mux_cfg.pins = omap1xxx_pins;
arch_mux_cfg.size = OMAP1XXX_PINS_SZ;
arch_mux_cfg.cfg_reg = omap1_cfg_reg;
}
return omap_mux_register(&arch_mux_cfg);
}
#else
#define omap_mux_init() do {} while(0)
#define omap_cfg_reg(x) do {} while(0)
#endif /* CONFIG_OMAP_MUX */
| linux-master | arch/arm/mach-omap1/mux.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/arch/arm/mach-omap1/board-palmte.c
*
* Modified from board-generic.c
*
* Support for the Palm Tungsten E PDA.
*
* Original version : Laurent Gonzalez
*
* Maintainers : http://palmtelinux.sf.net
* [email protected]
*
* Copyright (c) 2006 Andrzej Zaborowski <[email protected]>
*/
#include <linux/gpio/machine.h>
#include <linux/gpio/consumer.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/input.h>
#include <linux/platform_device.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/physmap.h>
#include <linux/spi/spi.h>
#include <linux/interrupt.h>
#include <linux/apm-emulation.h>
#include <linux/omapfb.h>
#include <linux/omap-dma.h>
#include <linux/platform_data/keypad-omap.h>
#include <linux/platform_data/omap1_bl.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include "tc.h"
#include "flash.h"
#include "mux.h"
#include "hardware.h"
#include "usb.h"
#include "mmc.h"
#include "common.h"
#define PALMTE_USBDETECT_GPIO 0
#define PALMTE_USB_OR_DC_GPIO 1
#define PALMTE_TSC_GPIO 4
#define PALMTE_PINTDAV_GPIO 6
#define PALMTE_MMC_WP_GPIO 8
#define PALMTE_MMC_POWER_GPIO 9
#define PALMTE_HDQ_GPIO 11
#define PALMTE_HEADPHONES_GPIO 14
#define PALMTE_SPEAKER_GPIO 15
#define PALMTE_DC_GPIO OMAP_MPUIO(2)
#define PALMTE_MMC_SWITCH_GPIO OMAP_MPUIO(4)
#define PALMTE_MMC1_GPIO OMAP_MPUIO(6)
#define PALMTE_MMC2_GPIO OMAP_MPUIO(7)
#define PALMTE_MMC3_GPIO OMAP_MPUIO(11)
static const unsigned int palmte_keymap[] = {
KEY(0, 0, KEY_F1), /* Calendar */
KEY(1, 0, KEY_F2), /* Contacts */
KEY(2, 0, KEY_F3), /* Tasks List */
KEY(3, 0, KEY_F4), /* Note Pad */
KEY(4, 0, KEY_POWER),
KEY(0, 1, KEY_LEFT),
KEY(1, 1, KEY_DOWN),
KEY(2, 1, KEY_UP),
KEY(3, 1, KEY_RIGHT),
KEY(4, 1, KEY_ENTER),
};
static const struct matrix_keymap_data palmte_keymap_data = {
.keymap = palmte_keymap,
.keymap_size = ARRAY_SIZE(palmte_keymap),
};
static struct omap_kp_platform_data palmte_kp_data = {
.rows = 8,
.cols = 8,
.keymap_data = &palmte_keymap_data,
.rep = true,
.delay = 12,
};
static struct resource palmte_kp_resources[] = {
[0] = {
.start = INT_KEYBOARD,
.end = INT_KEYBOARD,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device palmte_kp_device = {
.name = "omap-keypad",
.id = -1,
.dev = {
.platform_data = &palmte_kp_data,
},
.num_resources = ARRAY_SIZE(palmte_kp_resources),
.resource = palmte_kp_resources,
};
static struct mtd_partition palmte_rom_partitions[] = {
/* PalmOS "Small ROM", contains the bootloader and the debugger */
{
.name = "smallrom",
.offset = 0,
.size = 0xa000,
.mask_flags = MTD_WRITEABLE,
},
/* PalmOS "Big ROM", a filesystem with all the OS code and data */
{
.name = "bigrom",
.offset = SZ_128K,
/*
* 0x5f0000 bytes big in the multi-language ("EFIGS") version,
* 0x7b0000 bytes in the English-only ("enUS") version.
*/
.size = 0x7b0000,
.mask_flags = MTD_WRITEABLE,
},
};
static struct physmap_flash_data palmte_rom_data = {
.width = 2,
.set_vpp = omap1_set_vpp,
.parts = palmte_rom_partitions,
.nr_parts = ARRAY_SIZE(palmte_rom_partitions),
};
static struct resource palmte_rom_resource = {
.start = OMAP_CS0_PHYS,
.end = OMAP_CS0_PHYS + SZ_8M - 1,
.flags = IORESOURCE_MEM,
};
static struct platform_device palmte_rom_device = {
.name = "physmap-flash",
.id = -1,
.dev = {
.platform_data = &palmte_rom_data,
},
.num_resources = 1,
.resource = &palmte_rom_resource,
};
static struct platform_device palmte_lcd_device = {
.name = "lcd_palmte",
.id = -1,
};
static struct omap_backlight_config palmte_backlight_config = {
.default_intensity = 0xa0,
};
static struct platform_device palmte_backlight_device = {
.name = "omap-bl",
.id = -1,
.dev = {
.platform_data = &palmte_backlight_config,
},
};
static struct platform_device *palmte_devices[] __initdata = {
&palmte_rom_device,
&palmte_kp_device,
&palmte_lcd_device,
&palmte_backlight_device,
};
static struct omap_usb_config palmte_usb_config __initdata = {
.register_dev = 1, /* Mini-B only receptacle */
.hmc_mode = 0,
.pins[0] = 2,
};
static const struct omap_lcd_config palmte_lcd_config __initconst = {
.ctrl_name = "internal",
};
static struct spi_board_info palmte_spi_info[] __initdata = {
{
.modalias = "tsc2102",
.bus_num = 2, /* uWire (officially) */
.chip_select = 0, /* As opposed to 3 */
.max_speed_hz = 8000000,
},
};
#if IS_ENABLED(CONFIG_MMC_OMAP)
static struct omap_mmc_platform_data _palmte_mmc_config = {
.nr_slots = 1,
.slots[0] = {
.ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
.name = "mmcblk",
},
};
static struct omap_mmc_platform_data *palmte_mmc_config[OMAP15XX_NR_MMC] = {
[0] = &_palmte_mmc_config,
};
static void palmte_mmc_init(void)
{
omap1_init_mmc(palmte_mmc_config, OMAP15XX_NR_MMC);
}
#else /* CONFIG_MMC_OMAP */
static void palmte_mmc_init(void)
{
}
#endif /* CONFIG_MMC_OMAP */
static struct gpiod_lookup_table palmte_irq_gpio_table = {
.dev_id = NULL,
.table = {
/* GPIO used for TSC2102 PINTDAV IRQ */
GPIO_LOOKUP("gpio-0-15", PALMTE_PINTDAV_GPIO, "tsc2102_irq",
GPIO_ACTIVE_HIGH),
/* GPIO used for USB or DC input detection */
GPIO_LOOKUP("gpio-0-15", PALMTE_USB_OR_DC_GPIO, "usb_dc_irq",
GPIO_ACTIVE_HIGH),
{ }
},
};
static void __init omap_palmte_init(void)
{
struct gpio_desc *d;
/* mux pins for uarts */
omap_cfg_reg(UART1_TX);
omap_cfg_reg(UART1_RTS);
omap_cfg_reg(UART2_TX);
omap_cfg_reg(UART2_RTS);
omap_cfg_reg(UART3_TX);
omap_cfg_reg(UART3_RX);
platform_add_devices(palmte_devices, ARRAY_SIZE(palmte_devices));
gpiod_add_lookup_table(&palmte_irq_gpio_table);
d = gpiod_get(NULL, "tsc2102_irq", GPIOD_IN);
if (IS_ERR(d))
pr_err("Unable to get TSC2102 IRQ GPIO descriptor\n");
else
palmte_spi_info[0].irq = gpiod_to_irq(d);
spi_register_board_info(palmte_spi_info, ARRAY_SIZE(palmte_spi_info));
/* We are getting this just to set it up as input */
d = gpiod_get(NULL, "usb_dc_irq", GPIOD_IN);
if (IS_ERR(d))
pr_err("Unable to get USB/DC IRQ GPIO descriptor\n");
else
gpiod_put(d);
omap_serial_init();
omap1_usb_init(&palmte_usb_config);
omap_register_i2c_bus(1, 100, NULL, 0);
omapfb_set_lcd_config(&palmte_lcd_config);
palmte_mmc_init();
}
MACHINE_START(OMAP_PALMTE, "OMAP310 based Palm Tungsten E")
.atag_offset = 0x100,
.map_io = omap1_map_io,
.init_early = omap1_init_early,
.init_irq = omap1_init_irq,
.init_machine = omap_palmte_init,
.init_late = omap1_init_late,
.init_time = omap1_timer_init,
.restart = omap1_restart,
MACHINE_END
| linux-master | arch/arm/mach-omap1/board-palmte.c |
/*
* linux/arch/arm/mach-omap1/board-osk.c
*
* Board specific init for OMAP5912 OSK
*
* Written by Dirk Behme <[email protected]>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
* NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/gpio/consumer.h>
#include <linux/gpio/driver.h>
#include <linux/gpio/machine.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/i2c.h>
#include <linux/leds.h>
#include <linux/smc91x.h>
#include <linux/omapfb.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/physmap.h>
#include <linux/mfd/tps65010.h>
#include <linux/platform_data/gpio-omap.h>
#include <linux/platform_data/omap1_bl.h>
#include <linux/soc/ti/omap1-io.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include "tc.h"
#include "flash.h"
#include "mux.h"
#include "hardware.h"
#include "usb.h"
#include "common.h"
/* Name of the GPIO chip used by the OMAP for GPIOs 0..15 */
#define OMAP_GPIO_LABEL "gpio-0-15"
/* At OMAP5912 OSK the Ethernet is directly connected to CS1 */
#define OMAP_OSK_ETHR_START 0x04800300
/* TPS65010 has four GPIOs. nPG and LED2 can be treated like GPIOs with
* alternate pin configurations for hardware-controlled blinking.
*/
#define OSK_TPS_GPIO_USB_PWR_EN 0
#define OSK_TPS_GPIO_LED_D3 1
#define OSK_TPS_GPIO_LAN_RESET 2
#define OSK_TPS_GPIO_DSP_PWR_EN 3
#define OSK_TPS_GPIO_LED_D9 4
#define OSK_TPS_GPIO_LED_D2 5
static struct mtd_partition osk_partitions[] = {
/* bootloader (U-Boot, etc) in first sector */
{
.name = "bootloader",
.offset = 0,
.size = SZ_128K,
.mask_flags = MTD_WRITEABLE, /* force read-only */
},
/* bootloader params in the next sector */
{
.name = "params",
.offset = MTDPART_OFS_APPEND,
.size = SZ_128K,
.mask_flags = 0,
}, {
.name = "kernel",
.offset = MTDPART_OFS_APPEND,
.size = SZ_2M,
.mask_flags = 0
}, {
.name = "filesystem",
.offset = MTDPART_OFS_APPEND,
.size = MTDPART_SIZ_FULL,
.mask_flags = 0
}
};
static struct physmap_flash_data osk_flash_data = {
.width = 2,
.set_vpp = omap1_set_vpp,
.parts = osk_partitions,
.nr_parts = ARRAY_SIZE(osk_partitions),
};
static struct resource osk_flash_resource = {
/* this is on CS3, wherever it's mapped */
.flags = IORESOURCE_MEM,
};
static struct platform_device osk5912_flash_device = {
.name = "physmap-flash",
.id = 0,
.dev = {
.platform_data = &osk_flash_data,
},
.num_resources = 1,
.resource = &osk_flash_resource,
};
static struct smc91x_platdata osk5912_smc91x_info = {
.flags = SMC91X_USE_16BIT | SMC91X_NOWAIT,
.leda = RPC_LED_100_10,
.ledb = RPC_LED_TX_RX,
};
static struct resource osk5912_smc91x_resources[] = {
[0] = {
.start = OMAP_OSK_ETHR_START, /* Physical */
.end = OMAP_OSK_ETHR_START + 0xf,
.flags = IORESOURCE_MEM,
},
[1] = {
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE,
},
};
static struct platform_device osk5912_smc91x_device = {
.name = "smc91x",
.id = -1,
.dev = {
.platform_data = &osk5912_smc91x_info,
},
.num_resources = ARRAY_SIZE(osk5912_smc91x_resources),
.resource = osk5912_smc91x_resources,
};
static struct resource osk5912_cf_resources[] = {
[0] = {
.flags = IORESOURCE_IRQ,
},
[1] = {
.flags = IORESOURCE_MEM,
},
};
static struct platform_device osk5912_cf_device = {
.name = "omap_cf",
.id = -1,
.num_resources = ARRAY_SIZE(osk5912_cf_resources),
.resource = osk5912_cf_resources,
};
static struct platform_device *osk5912_devices[] __initdata = {
&osk5912_flash_device,
&osk5912_smc91x_device,
&osk5912_cf_device,
};
static const struct gpio_led tps_leds[] = {
/* NOTE: D9 and D2 have hardware blink support.
* Also, D9 requires non-battery power.
*/
{ .name = "d9", .default_trigger = "disk-activity", },
{ .name = "d2", },
{ .name = "d3", .default_trigger = "heartbeat", },
};
static struct gpiod_lookup_table tps_leds_gpio_table = {
.dev_id = "leds-gpio",
.table = {
/* Use local offsets on TPS65010 */
GPIO_LOOKUP_IDX("tps65010", OSK_TPS_GPIO_LED_D9, NULL, 0, GPIO_ACTIVE_HIGH),
GPIO_LOOKUP_IDX("tps65010", OSK_TPS_GPIO_LED_D2, NULL, 1, GPIO_ACTIVE_HIGH),
GPIO_LOOKUP_IDX("tps65010", OSK_TPS_GPIO_LED_D3, NULL, 2, GPIO_ACTIVE_LOW),
{ }
},
};
static struct gpio_led_platform_data tps_leds_data = {
.num_leds = 3,
.leds = tps_leds,
};
static struct platform_device osk5912_tps_leds = {
.name = "leds-gpio",
.id = 0,
.dev.platform_data = &tps_leds_data,
};
/* The board just hold these GPIOs hogged from setup to teardown */
static struct gpio_desc *eth_reset;
static struct gpio_desc *vdd_dsp;
static int osk_tps_setup(struct i2c_client *client, struct gpio_chip *gc)
{
struct gpio_desc *d;
if (!IS_BUILTIN(CONFIG_TPS65010))
return -ENOSYS;
/* Set GPIO 1 HIGH to disable VBUS power supply;
* OHCI driver powers it up/down as needed.
*/
d = gpiochip_request_own_desc(gc, OSK_TPS_GPIO_USB_PWR_EN, "n_vbus_en",
GPIO_ACTIVE_HIGH, GPIOD_OUT_HIGH);
/* Free the GPIO again as the driver will request it */
gpiochip_free_own_desc(d);
/* Set GPIO 2 high so LED D3 is off by default */
tps65010_set_gpio_out_value(GPIO2, HIGH);
/* Set GPIO 3 low to take ethernet out of reset */
eth_reset = gpiochip_request_own_desc(gc, OSK_TPS_GPIO_LAN_RESET, "smc_reset",
GPIO_ACTIVE_HIGH, GPIOD_OUT_LOW);
/* GPIO4 is VDD_DSP */
vdd_dsp = gpiochip_request_own_desc(gc, OSK_TPS_GPIO_DSP_PWR_EN, "dsp_power",
GPIO_ACTIVE_HIGH, GPIOD_OUT_HIGH);
/* REVISIT if DSP support isn't configured, power it off ... */
/* Let LED1 (D9) blink; leds-gpio may override it */
tps65010_set_led(LED1, BLINK);
/* Set LED2 off by default */
tps65010_set_led(LED2, OFF);
/* Enable LOW_PWR handshake */
tps65010_set_low_pwr(ON);
/* Switch VLDO2 to 3.0V for AIC23 */
tps65010_config_vregs1(TPS_LDO2_ENABLE | TPS_VLDO2_3_0V
| TPS_LDO1_ENABLE);
/* register these three LEDs */
osk5912_tps_leds.dev.parent = &client->dev;
gpiod_add_lookup_table(&tps_leds_gpio_table);
platform_device_register(&osk5912_tps_leds);
return 0;
}
static void osk_tps_teardown(struct i2c_client *client, struct gpio_chip *gc)
{
gpiochip_free_own_desc(eth_reset);
gpiochip_free_own_desc(vdd_dsp);
}
static struct tps65010_board tps_board = {
.outmask = 0x0f,
.setup = osk_tps_setup,
.teardown = osk_tps_teardown,
};
static struct i2c_board_info __initdata osk_i2c_board_info[] = {
{
/* This device will get the name "i2c-tps65010" */
I2C_BOARD_INFO("tps65010", 0x48),
.dev_name = "tps65010",
.platform_data = &tps_board,
},
{
I2C_BOARD_INFO("tlv320aic23", 0x1B),
},
/* TODO when driver support is ready:
* - optionally on Mistral, ov9640 camera sensor at 0x30
*/
};
static void __init osk_init_smc91x(void)
{
u32 l;
/* Check EMIFS wait states to fix errors with SMC_GET_PKT_HDR */
l = omap_readl(EMIFS_CCS(1));
l |= 0x3;
omap_writel(l, EMIFS_CCS(1));
}
static void __init osk_init_cf(int seg)
{
struct resource *res = &osk5912_cf_resources[1];
omap_cfg_reg(M7_1610_GPIO62);
switch (seg) {
/* NOTE: CS0 could be configured too ... */
case 1:
res->start = OMAP_CS1_PHYS;
break;
case 2:
res->start = OMAP_CS2_PHYS;
break;
case 3:
res->start = omap_cs3_phys();
break;
}
res->end = res->start + SZ_8K - 1;
osk5912_cf_device.dev.platform_data = (void *)(uintptr_t)seg;
/* NOTE: better EMIFS setup might support more cards; but the
* TRM only shows how to affect regular flash signals, not their
* CF/PCMCIA variants...
*/
pr_debug("%s: cs%d, previous ccs %08x acs %08x\n", __func__,
seg, omap_readl(EMIFS_CCS(seg)), omap_readl(EMIFS_ACS(seg)));
omap_writel(0x0004a1b3, EMIFS_CCS(seg)); /* synch mode 4 etc */
omap_writel(0x00000000, EMIFS_ACS(seg)); /* OE hold/setup */
}
static struct gpiod_lookup_table osk_usb_gpio_table = {
.dev_id = "ohci",
.table = {
/* Power GPIO on the I2C-attached TPS65010 */
GPIO_LOOKUP("tps65010", OSK_TPS_GPIO_USB_PWR_EN, "power",
GPIO_ACTIVE_HIGH),
GPIO_LOOKUP(OMAP_GPIO_LABEL, 9, "overcurrent",
GPIO_ACTIVE_HIGH),
{ }
},
};
static struct omap_usb_config osk_usb_config __initdata = {
/* has usb host connector (A) ... for development it can also
* be used, with a NONSTANDARD gender-bending cable/dongle, as
* a peripheral.
*/
#if IS_ENABLED(CONFIG_USB_OMAP)
.register_dev = 1,
.hmc_mode = 0,
#else
.register_host = 1,
.hmc_mode = 16,
.rwc = 1,
#endif
.pins[0] = 2,
};
#define EMIFS_CS3_VAL (0x88013141)
static struct gpiod_lookup_table osk_irq_gpio_table = {
.dev_id = NULL,
.table = {
/* GPIO used for SMC91x IRQ */
GPIO_LOOKUP(OMAP_GPIO_LABEL, 0, "smc_irq",
GPIO_ACTIVE_HIGH),
/* GPIO used for CF IRQ */
GPIO_LOOKUP("gpio-48-63", 14, "cf_irq",
GPIO_ACTIVE_HIGH),
/* GPIO used by the TPS65010 chip */
GPIO_LOOKUP("mpuio", 1, "tps65010",
GPIO_ACTIVE_HIGH),
/* GPIOs used for serial wakeup IRQs */
GPIO_LOOKUP_IDX("gpio-32-47", 5, "wakeup", 0,
GPIO_ACTIVE_HIGH),
GPIO_LOOKUP_IDX("gpio-16-31", 2, "wakeup", 1,
GPIO_ACTIVE_HIGH),
GPIO_LOOKUP_IDX("gpio-48-63", 1, "wakeup", 2,
GPIO_ACTIVE_HIGH),
{ }
},
};
static void __init osk_init(void)
{
struct gpio_desc *d;
u32 l;
osk_init_smc91x();
osk_init_cf(2); /* CS2 */
/* Workaround for wrong CS3 (NOR flash) timing
* There are some U-Boot versions out there which configure
* wrong CS3 memory timings. This mainly leads to CRC
* or similar errors if you use NOR flash (e.g. with JFFS2)
*/
l = omap_readl(EMIFS_CCS(3));
if (l != EMIFS_CS3_VAL)
omap_writel(EMIFS_CS3_VAL, EMIFS_CCS(3));
osk_flash_resource.end = osk_flash_resource.start = omap_cs3_phys();
osk_flash_resource.end += SZ_32M - 1;
/*
* Add the GPIOs to be used as IRQs and immediately look them up
* to be passed as an IRQ resource. This is ugly but should work
* until the day we convert to device tree.
*/
gpiod_add_lookup_table(&osk_irq_gpio_table);
d = gpiod_get(NULL, "smc_irq", GPIOD_IN);
if (IS_ERR(d)) {
pr_err("Unable to get SMC IRQ GPIO descriptor\n");
} else {
irq_set_irq_type(gpiod_to_irq(d), IRQ_TYPE_EDGE_RISING);
osk5912_smc91x_resources[1] = DEFINE_RES_IRQ(gpiod_to_irq(d));
}
d = gpiod_get(NULL, "cf_irq", GPIOD_IN);
if (IS_ERR(d)) {
pr_err("Unable to get CF IRQ GPIO descriptor\n");
} else {
/* the CF I/O IRQ is really active-low */
irq_set_irq_type(gpiod_to_irq(d), IRQ_TYPE_EDGE_FALLING);
osk5912_cf_resources[0] = DEFINE_RES_IRQ(gpiod_to_irq(d));
}
platform_add_devices(osk5912_devices, ARRAY_SIZE(osk5912_devices));
l = omap_readl(USB_TRANSCEIVER_CTRL);
l |= (3 << 1);
omap_writel(l, USB_TRANSCEIVER_CTRL);
gpiod_add_lookup_table(&osk_usb_gpio_table);
omap1_usb_init(&osk_usb_config);
omap_serial_init();
/* irq for tps65010 chip */
/* bootloader effectively does: omap_cfg_reg(U19_1610_MPUIO1); */
d = gpiod_get(NULL, "tps65010", GPIOD_IN);
if (IS_ERR(d))
pr_err("Unable to get TPS65010 IRQ GPIO descriptor\n");
else
osk_i2c_board_info[0].irq = gpiod_to_irq(d);
omap_register_i2c_bus(1, 400, osk_i2c_board_info,
ARRAY_SIZE(osk_i2c_board_info));
}
MACHINE_START(OMAP_OSK, "TI-OSK")
/* Maintainer: Dirk Behme <[email protected]> */
.atag_offset = 0x100,
.map_io = omap1_map_io,
.init_early = omap1_init_early,
.init_irq = omap1_init_irq,
.init_machine = osk_init,
.init_late = omap1_init_late,
.init_time = omap1_timer_init,
.restart = omap1_restart,
MACHINE_END
| linux-master | arch/arm/mach-omap1/board-osk.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/arch/arm/mach-omap1/id.c
*
* OMAP1 CPU identification code
*
* Copyright (C) 2004 Nokia Corporation
* Written by Tony Lindgren <[email protected]>
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/soc/ti/omap1-io.h>
#include <asm/system_info.h>
#include "soc.h"
#include "hardware.h"
#include "common.h"
#define OMAP_DIE_ID_0 0xfffe1800
#define OMAP_DIE_ID_1 0xfffe1804
#define OMAP_PRODUCTION_ID_0 0xfffe2000
#define OMAP_PRODUCTION_ID_1 0xfffe2004
#define OMAP32_ID_0 0xfffed400
#define OMAP32_ID_1 0xfffed404
struct omap_id {
u16 jtag_id; /* Used to determine OMAP type */
u8 die_rev; /* Processor revision */
u32 omap_id; /* OMAP revision */
u32 type; /* Cpu id bits [31:08], cpu class bits [07:00] */
};
static unsigned int omap_revision;
/* Register values to detect the OMAP version */
static struct omap_id omap_ids[] __initdata = {
{ .jtag_id = 0xb574, .die_rev = 0x2, .omap_id = 0x03310315, .type = 0x03100000},
{ .jtag_id = 0x355f, .die_rev = 0x0, .omap_id = 0x03320000, .type = 0x07300100},
{ .jtag_id = 0xb55f, .die_rev = 0x0, .omap_id = 0x03320000, .type = 0x07300300},
{ .jtag_id = 0xb62c, .die_rev = 0x1, .omap_id = 0x03320500, .type = 0x08500000},
{ .jtag_id = 0xb470, .die_rev = 0x0, .omap_id = 0x03310100, .type = 0x15100000},
{ .jtag_id = 0xb576, .die_rev = 0x0, .omap_id = 0x03320000, .type = 0x16100000},
{ .jtag_id = 0xb576, .die_rev = 0x2, .omap_id = 0x03320100, .type = 0x16110000},
{ .jtag_id = 0xb576, .die_rev = 0x3, .omap_id = 0x03320100, .type = 0x16100c00},
{ .jtag_id = 0xb576, .die_rev = 0x0, .omap_id = 0x03320200, .type = 0x16100d00},
{ .jtag_id = 0xb613, .die_rev = 0x0, .omap_id = 0x03320300, .type = 0x1610ef00},
{ .jtag_id = 0xb613, .die_rev = 0x0, .omap_id = 0x03320300, .type = 0x1610ef00},
{ .jtag_id = 0xb576, .die_rev = 0x1, .omap_id = 0x03320100, .type = 0x16110000},
{ .jtag_id = 0xb58c, .die_rev = 0x2, .omap_id = 0x03320200, .type = 0x16110b00},
{ .jtag_id = 0xb58c, .die_rev = 0x3, .omap_id = 0x03320200, .type = 0x16110c00},
{ .jtag_id = 0xb65f, .die_rev = 0x0, .omap_id = 0x03320400, .type = 0x16212300},
{ .jtag_id = 0xb65f, .die_rev = 0x1, .omap_id = 0x03320400, .type = 0x16212300},
{ .jtag_id = 0xb65f, .die_rev = 0x1, .omap_id = 0x03320500, .type = 0x16212300},
{ .jtag_id = 0xb5f7, .die_rev = 0x0, .omap_id = 0x03330000, .type = 0x17100000},
{ .jtag_id = 0xb5f7, .die_rev = 0x1, .omap_id = 0x03330100, .type = 0x17100000},
{ .jtag_id = 0xb5f7, .die_rev = 0x2, .omap_id = 0x03330100, .type = 0x17100000},
};
unsigned int omap_rev(void)
{
return omap_revision;
}
EXPORT_SYMBOL(omap_rev);
/*
* Get OMAP type from PROD_ID.
* 1710 has the PROD_ID in bits 15:00, not in 16:01 as documented in TRM.
* 1510 PROD_ID is empty, and 1610 PROD_ID does not make sense.
* Undocumented register in TEST BLOCK is used as fallback; This seems to
* work on 1510, 1610 & 1710. The official way hopefully will work in future
* processors.
*/
static u16 __init omap_get_jtag_id(void)
{
u32 prod_id, omap_id;
prod_id = omap_readl(OMAP_PRODUCTION_ID_1);
omap_id = omap_readl(OMAP32_ID_1);
/* Check for unusable OMAP_PRODUCTION_ID_1 on 1611B/5912 and 730/850 */
if (((prod_id >> 20) == 0) || (prod_id == omap_id))
prod_id = 0;
else
prod_id &= 0xffff;
if (prod_id)
return prod_id;
/* Use OMAP32_ID_1 as fallback */
prod_id = ((omap_id >> 12) & 0xffff);
return prod_id;
}
/*
* Get OMAP revision from DIE_REV.
* Early 1710 processors may have broken OMAP_DIE_ID, it contains PROD_ID.
* Undocumented register in the TEST BLOCK is used as fallback.
* REVISIT: This does not seem to work on 1510
*/
static u8 __init omap_get_die_rev(void)
{
u32 die_rev;
die_rev = omap_readl(OMAP_DIE_ID_1);
/* Check for broken OMAP_DIE_ID on early 1710 */
if (((die_rev >> 12) & 0xffff) == omap_get_jtag_id())
die_rev = 0;
die_rev = (die_rev >> 17) & 0xf;
if (die_rev)
return die_rev;
die_rev = (omap_readl(OMAP32_ID_1) >> 28) & 0xf;
return die_rev;
}
void __init omap_check_revision(void)
{
int i;
u16 jtag_id;
u8 die_rev;
u32 omap_id;
u8 cpu_type;
jtag_id = omap_get_jtag_id();
die_rev = omap_get_die_rev();
omap_id = omap_readl(OMAP32_ID_0);
#ifdef DEBUG
printk(KERN_DEBUG "OMAP_DIE_ID_0: 0x%08x\n", omap_readl(OMAP_DIE_ID_0));
printk(KERN_DEBUG "OMAP_DIE_ID_1: 0x%08x DIE_REV: %i\n",
omap_readl(OMAP_DIE_ID_1),
(omap_readl(OMAP_DIE_ID_1) >> 17) & 0xf);
printk(KERN_DEBUG "OMAP_PRODUCTION_ID_0: 0x%08x\n",
omap_readl(OMAP_PRODUCTION_ID_0));
printk(KERN_DEBUG "OMAP_PRODUCTION_ID_1: 0x%08x JTAG_ID: 0x%04x\n",
omap_readl(OMAP_PRODUCTION_ID_1),
omap_readl(OMAP_PRODUCTION_ID_1) & 0xffff);
printk(KERN_DEBUG "OMAP32_ID_0: 0x%08x\n", omap_readl(OMAP32_ID_0));
printk(KERN_DEBUG "OMAP32_ID_1: 0x%08x\n", omap_readl(OMAP32_ID_1));
printk(KERN_DEBUG "JTAG_ID: 0x%04x DIE_REV: %i\n", jtag_id, die_rev);
#endif
system_serial_high = omap_readl(OMAP_DIE_ID_0);
system_serial_low = omap_readl(OMAP_DIE_ID_1);
/* First check only the major version in a safe way */
for (i = 0; i < ARRAY_SIZE(omap_ids); i++) {
if (jtag_id == (omap_ids[i].jtag_id)) {
omap_revision = omap_ids[i].type;
break;
}
}
/* Check if we can find the die revision */
for (i = 0; i < ARRAY_SIZE(omap_ids); i++) {
if (jtag_id == omap_ids[i].jtag_id && die_rev == omap_ids[i].die_rev) {
omap_revision = omap_ids[i].type;
break;
}
}
/* Finally check also the omap_id */
for (i = 0; i < ARRAY_SIZE(omap_ids); i++) {
if (jtag_id == omap_ids[i].jtag_id
&& die_rev == omap_ids[i].die_rev
&& omap_id == omap_ids[i].omap_id) {
omap_revision = omap_ids[i].type;
break;
}
}
/* Add the cpu class info (7xx, 15xx, 16xx, 24xx) */
cpu_type = omap_revision >> 24;
switch (cpu_type) {
case 0x07:
case 0x08:
omap_revision |= 0x07;
break;
case 0x03:
case 0x15:
omap_revision |= 0x15;
break;
case 0x16:
case 0x17:
omap_revision |= 0x16;
break;
default:
printk(KERN_INFO "Unknown OMAP cpu type: 0x%02x\n", cpu_type);
}
pr_info("OMAP%04x", omap_revision >> 16);
if ((omap_revision >> 8) & 0xff)
pr_cont("%x", (omap_revision >> 8) & 0xff);
pr_cont(" revision %i handled as %02xxx id: %08x%08x\n",
die_rev, omap_revision & 0xff, system_serial_low,
system_serial_high);
}
| linux-master | arch/arm/mach-omap1/id.c |
/*
* linux/arch/arm/vfp/vfpdouble.c
*
* This code is derived in part from John R. Housers softfloat library, which
* carries the following notice:
*
* ===========================================================================
* This C source file is part of the SoftFloat IEC/IEEE Floating-point
* Arithmetic Package, Release 2.
*
* Written by John R. Hauser. This work was made possible in part by the
* International Computer Science Institute, located at Suite 600, 1947 Center
* Street, Berkeley, California 94704. Funding was partially provided by the
* National Science Foundation under grant MIP-9311980. The original version
* of this code was written as part of a project to build a fixed-point vector
* processor in collaboration with the University of California at Berkeley,
* overseen by Profs. Nelson Morgan and John Wawrzynek. More information
* is available through the web page `http://HTTP.CS.Berkeley.EDU/~jhauser/
* arithmetic/softfloat.html'.
*
* THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort
* has been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT
* TIMES RESULT IN INCORRECT BEHAVIOR. USE OF THIS SOFTWARE IS RESTRICTED TO
* PERSONS AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ANY
* AND ALL LOSSES, COSTS, OR OTHER PROBLEMS ARISING FROM ITS USE.
*
* Derivative works are acceptable, even for commercial purposes, so long as
* (1) they include prominent notice that the work is derivative, and (2) they
* include prominent notice akin to these three paragraphs for those parts of
* this code that are retained.
* ===========================================================================
*/
#include <linux/kernel.h>
#include <linux/bitops.h>
#include <asm/div64.h>
#include <asm/vfp.h>
#include "vfpinstr.h"
#include "vfp.h"
static struct vfp_double vfp_double_default_qnan = {
.exponent = 2047,
.sign = 0,
.significand = VFP_DOUBLE_SIGNIFICAND_QNAN,
};
static void vfp_double_dump(const char *str, struct vfp_double *d)
{
pr_debug("VFP: %s: sign=%d exponent=%d significand=%016llx\n",
str, d->sign != 0, d->exponent, d->significand);
}
static void vfp_double_normalise_denormal(struct vfp_double *vd)
{
int bits = 31 - fls(vd->significand >> 32);
if (bits == 31)
bits = 63 - fls(vd->significand);
vfp_double_dump("normalise_denormal: in", vd);
if (bits) {
vd->exponent -= bits - 1;
vd->significand <<= bits;
}
vfp_double_dump("normalise_denormal: out", vd);
}
u32 vfp_double_normaliseround(int dd, struct vfp_double *vd, u32 fpscr, u32 exceptions, const char *func)
{
u64 significand, incr;
int exponent, shift, underflow;
u32 rmode;
vfp_double_dump("pack: in", vd);
/*
* Infinities and NaNs are a special case.
*/
if (vd->exponent == 2047 && (vd->significand == 0 || exceptions))
goto pack;
/*
* Special-case zero.
*/
if (vd->significand == 0) {
vd->exponent = 0;
goto pack;
}
exponent = vd->exponent;
significand = vd->significand;
shift = 32 - fls(significand >> 32);
if (shift == 32)
shift = 64 - fls(significand);
if (shift) {
exponent -= shift;
significand <<= shift;
}
#ifdef DEBUG
vd->exponent = exponent;
vd->significand = significand;
vfp_double_dump("pack: normalised", vd);
#endif
/*
* Tiny number?
*/
underflow = exponent < 0;
if (underflow) {
significand = vfp_shiftright64jamming(significand, -exponent);
exponent = 0;
#ifdef DEBUG
vd->exponent = exponent;
vd->significand = significand;
vfp_double_dump("pack: tiny number", vd);
#endif
if (!(significand & ((1ULL << (VFP_DOUBLE_LOW_BITS + 1)) - 1)))
underflow = 0;
}
/*
* Select rounding increment.
*/
incr = 0;
rmode = fpscr & FPSCR_RMODE_MASK;
if (rmode == FPSCR_ROUND_NEAREST) {
incr = 1ULL << VFP_DOUBLE_LOW_BITS;
if ((significand & (1ULL << (VFP_DOUBLE_LOW_BITS + 1))) == 0)
incr -= 1;
} else if (rmode == FPSCR_ROUND_TOZERO) {
incr = 0;
} else if ((rmode == FPSCR_ROUND_PLUSINF) ^ (vd->sign != 0))
incr = (1ULL << (VFP_DOUBLE_LOW_BITS + 1)) - 1;
pr_debug("VFP: rounding increment = 0x%08llx\n", incr);
/*
* Is our rounding going to overflow?
*/
if ((significand + incr) < significand) {
exponent += 1;
significand = (significand >> 1) | (significand & 1);
incr >>= 1;
#ifdef DEBUG
vd->exponent = exponent;
vd->significand = significand;
vfp_double_dump("pack: overflow", vd);
#endif
}
/*
* If any of the low bits (which will be shifted out of the
* number) are non-zero, the result is inexact.
*/
if (significand & ((1 << (VFP_DOUBLE_LOW_BITS + 1)) - 1))
exceptions |= FPSCR_IXC;
/*
* Do our rounding.
*/
significand += incr;
/*
* Infinity?
*/
if (exponent >= 2046) {
exceptions |= FPSCR_OFC | FPSCR_IXC;
if (incr == 0) {
vd->exponent = 2045;
vd->significand = 0x7fffffffffffffffULL;
} else {
vd->exponent = 2047; /* infinity */
vd->significand = 0;
}
} else {
if (significand >> (VFP_DOUBLE_LOW_BITS + 1) == 0)
exponent = 0;
if (exponent || significand > 0x8000000000000000ULL)
underflow = 0;
if (underflow)
exceptions |= FPSCR_UFC;
vd->exponent = exponent;
vd->significand = significand >> 1;
}
pack:
vfp_double_dump("pack: final", vd);
{
s64 d = vfp_double_pack(vd);
pr_debug("VFP: %s: d(d%d)=%016llx exceptions=%08x\n", func,
dd, d, exceptions);
vfp_put_double(d, dd);
}
return exceptions;
}
/*
* Propagate the NaN, setting exceptions if it is signalling.
* 'n' is always a NaN. 'm' may be a number, NaN or infinity.
*/
static u32
vfp_propagate_nan(struct vfp_double *vdd, struct vfp_double *vdn,
struct vfp_double *vdm, u32 fpscr)
{
struct vfp_double *nan;
int tn, tm = 0;
tn = vfp_double_type(vdn);
if (vdm)
tm = vfp_double_type(vdm);
if (fpscr & FPSCR_DEFAULT_NAN)
/*
* Default NaN mode - always returns a quiet NaN
*/
nan = &vfp_double_default_qnan;
else {
/*
* Contemporary mode - select the first signalling
* NAN, or if neither are signalling, the first
* quiet NAN.
*/
if (tn == VFP_SNAN || (tm != VFP_SNAN && tn == VFP_QNAN))
nan = vdn;
else
nan = vdm;
/*
* Make the NaN quiet.
*/
nan->significand |= VFP_DOUBLE_SIGNIFICAND_QNAN;
}
*vdd = *nan;
/*
* If one was a signalling NAN, raise invalid operation.
*/
return tn == VFP_SNAN || tm == VFP_SNAN ? FPSCR_IOC : VFP_NAN_FLAG;
}
/*
* Extended operations
*/
static u32 vfp_double_fabs(int dd, int unused, int dm, u32 fpscr)
{
vfp_put_double(vfp_double_packed_abs(vfp_get_double(dm)), dd);
return 0;
}
static u32 vfp_double_fcpy(int dd, int unused, int dm, u32 fpscr)
{
vfp_put_double(vfp_get_double(dm), dd);
return 0;
}
static u32 vfp_double_fneg(int dd, int unused, int dm, u32 fpscr)
{
vfp_put_double(vfp_double_packed_negate(vfp_get_double(dm)), dd);
return 0;
}
static u32 vfp_double_fsqrt(int dd, int unused, int dm, u32 fpscr)
{
struct vfp_double vdm, vdd;
int ret, tm;
vfp_double_unpack(&vdm, vfp_get_double(dm));
tm = vfp_double_type(&vdm);
if (tm & (VFP_NAN|VFP_INFINITY)) {
struct vfp_double *vdp = &vdd;
if (tm & VFP_NAN)
ret = vfp_propagate_nan(vdp, &vdm, NULL, fpscr);
else if (vdm.sign == 0) {
sqrt_copy:
vdp = &vdm;
ret = 0;
} else {
sqrt_invalid:
vdp = &vfp_double_default_qnan;
ret = FPSCR_IOC;
}
vfp_put_double(vfp_double_pack(vdp), dd);
return ret;
}
/*
* sqrt(+/- 0) == +/- 0
*/
if (tm & VFP_ZERO)
goto sqrt_copy;
/*
* Normalise a denormalised number
*/
if (tm & VFP_DENORMAL)
vfp_double_normalise_denormal(&vdm);
/*
* sqrt(<0) = invalid
*/
if (vdm.sign)
goto sqrt_invalid;
vfp_double_dump("sqrt", &vdm);
/*
* Estimate the square root.
*/
vdd.sign = 0;
vdd.exponent = ((vdm.exponent - 1023) >> 1) + 1023;
vdd.significand = (u64)vfp_estimate_sqrt_significand(vdm.exponent, vdm.significand >> 32) << 31;
vfp_double_dump("sqrt estimate1", &vdd);
vdm.significand >>= 1 + (vdm.exponent & 1);
vdd.significand += 2 + vfp_estimate_div128to64(vdm.significand, 0, vdd.significand);
vfp_double_dump("sqrt estimate2", &vdd);
/*
* And now adjust.
*/
if ((vdd.significand & VFP_DOUBLE_LOW_BITS_MASK) <= 5) {
if (vdd.significand < 2) {
vdd.significand = ~0ULL;
} else {
u64 termh, terml, remh, reml;
vdm.significand <<= 2;
mul64to128(&termh, &terml, vdd.significand, vdd.significand);
sub128(&remh, &reml, vdm.significand, 0, termh, terml);
while ((s64)remh < 0) {
vdd.significand -= 1;
shift64left(&termh, &terml, vdd.significand);
terml |= 1;
add128(&remh, &reml, remh, reml, termh, terml);
}
vdd.significand |= (remh | reml) != 0;
}
}
vdd.significand = vfp_shiftright64jamming(vdd.significand, 1);
return vfp_double_normaliseround(dd, &vdd, fpscr, 0, "fsqrt");
}
/*
* Equal := ZC
* Less than := N
* Greater than := C
* Unordered := CV
*/
static u32 vfp_compare(int dd, int signal_on_qnan, int dm, u32 fpscr)
{
s64 d, m;
u32 ret = 0;
m = vfp_get_double(dm);
if (vfp_double_packed_exponent(m) == 2047 && vfp_double_packed_mantissa(m)) {
ret |= FPSCR_C | FPSCR_V;
if (signal_on_qnan || !(vfp_double_packed_mantissa(m) & (1ULL << (VFP_DOUBLE_MANTISSA_BITS - 1))))
/*
* Signalling NaN, or signalling on quiet NaN
*/
ret |= FPSCR_IOC;
}
d = vfp_get_double(dd);
if (vfp_double_packed_exponent(d) == 2047 && vfp_double_packed_mantissa(d)) {
ret |= FPSCR_C | FPSCR_V;
if (signal_on_qnan || !(vfp_double_packed_mantissa(d) & (1ULL << (VFP_DOUBLE_MANTISSA_BITS - 1))))
/*
* Signalling NaN, or signalling on quiet NaN
*/
ret |= FPSCR_IOC;
}
if (ret == 0) {
if (d == m || vfp_double_packed_abs(d | m) == 0) {
/*
* equal
*/
ret |= FPSCR_Z | FPSCR_C;
} else if (vfp_double_packed_sign(d ^ m)) {
/*
* different signs
*/
if (vfp_double_packed_sign(d))
/*
* d is negative, so d < m
*/
ret |= FPSCR_N;
else
/*
* d is positive, so d > m
*/
ret |= FPSCR_C;
} else if ((vfp_double_packed_sign(d) != 0) ^ (d < m)) {
/*
* d < m
*/
ret |= FPSCR_N;
} else if ((vfp_double_packed_sign(d) != 0) ^ (d > m)) {
/*
* d > m
*/
ret |= FPSCR_C;
}
}
return ret;
}
static u32 vfp_double_fcmp(int dd, int unused, int dm, u32 fpscr)
{
return vfp_compare(dd, 0, dm, fpscr);
}
static u32 vfp_double_fcmpe(int dd, int unused, int dm, u32 fpscr)
{
return vfp_compare(dd, 1, dm, fpscr);
}
static u32 vfp_double_fcmpz(int dd, int unused, int dm, u32 fpscr)
{
return vfp_compare(dd, 0, VFP_REG_ZERO, fpscr);
}
static u32 vfp_double_fcmpez(int dd, int unused, int dm, u32 fpscr)
{
return vfp_compare(dd, 1, VFP_REG_ZERO, fpscr);
}
static u32 vfp_double_fcvts(int sd, int unused, int dm, u32 fpscr)
{
struct vfp_double vdm;
struct vfp_single vsd;
int tm;
u32 exceptions = 0;
vfp_double_unpack(&vdm, vfp_get_double(dm));
tm = vfp_double_type(&vdm);
/*
* If we have a signalling NaN, signal invalid operation.
*/
if (tm == VFP_SNAN)
exceptions = FPSCR_IOC;
if (tm & VFP_DENORMAL)
vfp_double_normalise_denormal(&vdm);
vsd.sign = vdm.sign;
vsd.significand = vfp_hi64to32jamming(vdm.significand);
/*
* If we have an infinity or a NaN, the exponent must be 255
*/
if (tm & (VFP_INFINITY|VFP_NAN)) {
vsd.exponent = 255;
if (tm == VFP_QNAN)
vsd.significand |= VFP_SINGLE_SIGNIFICAND_QNAN;
goto pack_nan;
} else if (tm & VFP_ZERO)
vsd.exponent = 0;
else
vsd.exponent = vdm.exponent - (1023 - 127);
return vfp_single_normaliseround(sd, &vsd, fpscr, exceptions, "fcvts");
pack_nan:
vfp_put_float(vfp_single_pack(&vsd), sd);
return exceptions;
}
static u32 vfp_double_fuito(int dd, int unused, int dm, u32 fpscr)
{
struct vfp_double vdm;
u32 m = vfp_get_float(dm);
vdm.sign = 0;
vdm.exponent = 1023 + 63 - 1;
vdm.significand = (u64)m;
return vfp_double_normaliseround(dd, &vdm, fpscr, 0, "fuito");
}
static u32 vfp_double_fsito(int dd, int unused, int dm, u32 fpscr)
{
struct vfp_double vdm;
u32 m = vfp_get_float(dm);
vdm.sign = (m & 0x80000000) >> 16;
vdm.exponent = 1023 + 63 - 1;
vdm.significand = vdm.sign ? -m : m;
return vfp_double_normaliseround(dd, &vdm, fpscr, 0, "fsito");
}
static u32 vfp_double_ftoui(int sd, int unused, int dm, u32 fpscr)
{
struct vfp_double vdm;
u32 d, exceptions = 0;
int rmode = fpscr & FPSCR_RMODE_MASK;
int tm;
vfp_double_unpack(&vdm, vfp_get_double(dm));
/*
* Do we have a denormalised number?
*/
tm = vfp_double_type(&vdm);
if (tm & VFP_DENORMAL)
exceptions |= FPSCR_IDC;
if (tm & VFP_NAN)
vdm.sign = 0;
if (vdm.exponent >= 1023 + 32) {
d = vdm.sign ? 0 : 0xffffffff;
exceptions = FPSCR_IOC;
} else if (vdm.exponent >= 1023 - 1) {
int shift = 1023 + 63 - vdm.exponent;
u64 rem, incr = 0;
/*
* 2^0 <= m < 2^32-2^8
*/
d = (vdm.significand << 1) >> shift;
rem = vdm.significand << (65 - shift);
if (rmode == FPSCR_ROUND_NEAREST) {
incr = 0x8000000000000000ULL;
if ((d & 1) == 0)
incr -= 1;
} else if (rmode == FPSCR_ROUND_TOZERO) {
incr = 0;
} else if ((rmode == FPSCR_ROUND_PLUSINF) ^ (vdm.sign != 0)) {
incr = ~0ULL;
}
if ((rem + incr) < rem) {
if (d < 0xffffffff)
d += 1;
else
exceptions |= FPSCR_IOC;
}
if (d && vdm.sign) {
d = 0;
exceptions |= FPSCR_IOC;
} else if (rem)
exceptions |= FPSCR_IXC;
} else {
d = 0;
if (vdm.exponent | vdm.significand) {
exceptions |= FPSCR_IXC;
if (rmode == FPSCR_ROUND_PLUSINF && vdm.sign == 0)
d = 1;
else if (rmode == FPSCR_ROUND_MINUSINF && vdm.sign) {
d = 0;
exceptions |= FPSCR_IOC;
}
}
}
pr_debug("VFP: ftoui: d(s%d)=%08x exceptions=%08x\n", sd, d, exceptions);
vfp_put_float(d, sd);
return exceptions;
}
static u32 vfp_double_ftouiz(int sd, int unused, int dm, u32 fpscr)
{
return vfp_double_ftoui(sd, unused, dm, FPSCR_ROUND_TOZERO);
}
static u32 vfp_double_ftosi(int sd, int unused, int dm, u32 fpscr)
{
struct vfp_double vdm;
u32 d, exceptions = 0;
int rmode = fpscr & FPSCR_RMODE_MASK;
int tm;
vfp_double_unpack(&vdm, vfp_get_double(dm));
vfp_double_dump("VDM", &vdm);
/*
* Do we have denormalised number?
*/
tm = vfp_double_type(&vdm);
if (tm & VFP_DENORMAL)
exceptions |= FPSCR_IDC;
if (tm & VFP_NAN) {
d = 0;
exceptions |= FPSCR_IOC;
} else if (vdm.exponent >= 1023 + 32) {
d = 0x7fffffff;
if (vdm.sign)
d = ~d;
exceptions |= FPSCR_IOC;
} else if (vdm.exponent >= 1023 - 1) {
int shift = 1023 + 63 - vdm.exponent; /* 58 */
u64 rem, incr = 0;
d = (vdm.significand << 1) >> shift;
rem = vdm.significand << (65 - shift);
if (rmode == FPSCR_ROUND_NEAREST) {
incr = 0x8000000000000000ULL;
if ((d & 1) == 0)
incr -= 1;
} else if (rmode == FPSCR_ROUND_TOZERO) {
incr = 0;
} else if ((rmode == FPSCR_ROUND_PLUSINF) ^ (vdm.sign != 0)) {
incr = ~0ULL;
}
if ((rem + incr) < rem && d < 0xffffffff)
d += 1;
if (d > 0x7fffffff + (vdm.sign != 0)) {
d = 0x7fffffff + (vdm.sign != 0);
exceptions |= FPSCR_IOC;
} else if (rem)
exceptions |= FPSCR_IXC;
if (vdm.sign)
d = -d;
} else {
d = 0;
if (vdm.exponent | vdm.significand) {
exceptions |= FPSCR_IXC;
if (rmode == FPSCR_ROUND_PLUSINF && vdm.sign == 0)
d = 1;
else if (rmode == FPSCR_ROUND_MINUSINF && vdm.sign)
d = -1;
}
}
pr_debug("VFP: ftosi: d(s%d)=%08x exceptions=%08x\n", sd, d, exceptions);
vfp_put_float((s32)d, sd);
return exceptions;
}
static u32 vfp_double_ftosiz(int dd, int unused, int dm, u32 fpscr)
{
return vfp_double_ftosi(dd, unused, dm, FPSCR_ROUND_TOZERO);
}
static struct op fops_ext[32] = {
[FEXT_TO_IDX(FEXT_FCPY)] = { vfp_double_fcpy, 0 },
[FEXT_TO_IDX(FEXT_FABS)] = { vfp_double_fabs, 0 },
[FEXT_TO_IDX(FEXT_FNEG)] = { vfp_double_fneg, 0 },
[FEXT_TO_IDX(FEXT_FSQRT)] = { vfp_double_fsqrt, 0 },
[FEXT_TO_IDX(FEXT_FCMP)] = { vfp_double_fcmp, OP_SCALAR },
[FEXT_TO_IDX(FEXT_FCMPE)] = { vfp_double_fcmpe, OP_SCALAR },
[FEXT_TO_IDX(FEXT_FCMPZ)] = { vfp_double_fcmpz, OP_SCALAR },
[FEXT_TO_IDX(FEXT_FCMPEZ)] = { vfp_double_fcmpez, OP_SCALAR },
[FEXT_TO_IDX(FEXT_FCVT)] = { vfp_double_fcvts, OP_SCALAR|OP_SD },
[FEXT_TO_IDX(FEXT_FUITO)] = { vfp_double_fuito, OP_SCALAR|OP_SM },
[FEXT_TO_IDX(FEXT_FSITO)] = { vfp_double_fsito, OP_SCALAR|OP_SM },
[FEXT_TO_IDX(FEXT_FTOUI)] = { vfp_double_ftoui, OP_SCALAR|OP_SD },
[FEXT_TO_IDX(FEXT_FTOUIZ)] = { vfp_double_ftouiz, OP_SCALAR|OP_SD },
[FEXT_TO_IDX(FEXT_FTOSI)] = { vfp_double_ftosi, OP_SCALAR|OP_SD },
[FEXT_TO_IDX(FEXT_FTOSIZ)] = { vfp_double_ftosiz, OP_SCALAR|OP_SD },
};
static u32
vfp_double_fadd_nonnumber(struct vfp_double *vdd, struct vfp_double *vdn,
struct vfp_double *vdm, u32 fpscr)
{
struct vfp_double *vdp;
u32 exceptions = 0;
int tn, tm;
tn = vfp_double_type(vdn);
tm = vfp_double_type(vdm);
if (tn & tm & VFP_INFINITY) {
/*
* Two infinities. Are they different signs?
*/
if (vdn->sign ^ vdm->sign) {
/*
* different signs -> invalid
*/
exceptions = FPSCR_IOC;
vdp = &vfp_double_default_qnan;
} else {
/*
* same signs -> valid
*/
vdp = vdn;
}
} else if (tn & VFP_INFINITY && tm & VFP_NUMBER) {
/*
* One infinity and one number -> infinity
*/
vdp = vdn;
} else {
/*
* 'n' is a NaN of some type
*/
return vfp_propagate_nan(vdd, vdn, vdm, fpscr);
}
*vdd = *vdp;
return exceptions;
}
static u32
vfp_double_add(struct vfp_double *vdd, struct vfp_double *vdn,
struct vfp_double *vdm, u32 fpscr)
{
u32 exp_diff;
u64 m_sig;
if (vdn->significand & (1ULL << 63) ||
vdm->significand & (1ULL << 63)) {
pr_info("VFP: bad FP values in %s\n", __func__);
vfp_double_dump("VDN", vdn);
vfp_double_dump("VDM", vdm);
}
/*
* Ensure that 'n' is the largest magnitude number. Note that
* if 'n' and 'm' have equal exponents, we do not swap them.
* This ensures that NaN propagation works correctly.
*/
if (vdn->exponent < vdm->exponent) {
struct vfp_double *t = vdn;
vdn = vdm;
vdm = t;
}
/*
* Is 'n' an infinity or a NaN? Note that 'm' may be a number,
* infinity or a NaN here.
*/
if (vdn->exponent == 2047)
return vfp_double_fadd_nonnumber(vdd, vdn, vdm, fpscr);
/*
* We have two proper numbers, where 'vdn' is the larger magnitude.
*
* Copy 'n' to 'd' before doing the arithmetic.
*/
*vdd = *vdn;
/*
* Align 'm' with the result.
*/
exp_diff = vdn->exponent - vdm->exponent;
m_sig = vfp_shiftright64jamming(vdm->significand, exp_diff);
/*
* If the signs are different, we are really subtracting.
*/
if (vdn->sign ^ vdm->sign) {
m_sig = vdn->significand - m_sig;
if ((s64)m_sig < 0) {
vdd->sign = vfp_sign_negate(vdd->sign);
m_sig = -m_sig;
} else if (m_sig == 0) {
vdd->sign = (fpscr & FPSCR_RMODE_MASK) ==
FPSCR_ROUND_MINUSINF ? 0x8000 : 0;
}
} else {
m_sig += vdn->significand;
}
vdd->significand = m_sig;
return 0;
}
static u32
vfp_double_multiply(struct vfp_double *vdd, struct vfp_double *vdn,
struct vfp_double *vdm, u32 fpscr)
{
vfp_double_dump("VDN", vdn);
vfp_double_dump("VDM", vdm);
/*
* Ensure that 'n' is the largest magnitude number. Note that
* if 'n' and 'm' have equal exponents, we do not swap them.
* This ensures that NaN propagation works correctly.
*/
if (vdn->exponent < vdm->exponent) {
struct vfp_double *t = vdn;
vdn = vdm;
vdm = t;
pr_debug("VFP: swapping M <-> N\n");
}
vdd->sign = vdn->sign ^ vdm->sign;
/*
* If 'n' is an infinity or NaN, handle it. 'm' may be anything.
*/
if (vdn->exponent == 2047) {
if (vdn->significand || (vdm->exponent == 2047 && vdm->significand))
return vfp_propagate_nan(vdd, vdn, vdm, fpscr);
if ((vdm->exponent | vdm->significand) == 0) {
*vdd = vfp_double_default_qnan;
return FPSCR_IOC;
}
vdd->exponent = vdn->exponent;
vdd->significand = 0;
return 0;
}
/*
* If 'm' is zero, the result is always zero. In this case,
* 'n' may be zero or a number, but it doesn't matter which.
*/
if ((vdm->exponent | vdm->significand) == 0) {
vdd->exponent = 0;
vdd->significand = 0;
return 0;
}
/*
* We add 2 to the destination exponent for the same reason
* as the addition case - though this time we have +1 from
* each input operand.
*/
vdd->exponent = vdn->exponent + vdm->exponent - 1023 + 2;
vdd->significand = vfp_hi64multiply64(vdn->significand, vdm->significand);
vfp_double_dump("VDD", vdd);
return 0;
}
#define NEG_MULTIPLY (1 << 0)
#define NEG_SUBTRACT (1 << 1)
static u32
vfp_double_multiply_accumulate(int dd, int dn, int dm, u32 fpscr, u32 negate, char *func)
{
struct vfp_double vdd, vdp, vdn, vdm;
u32 exceptions;
vfp_double_unpack(&vdn, vfp_get_double(dn));
if (vdn.exponent == 0 && vdn.significand)
vfp_double_normalise_denormal(&vdn);
vfp_double_unpack(&vdm, vfp_get_double(dm));
if (vdm.exponent == 0 && vdm.significand)
vfp_double_normalise_denormal(&vdm);
exceptions = vfp_double_multiply(&vdp, &vdn, &vdm, fpscr);
if (negate & NEG_MULTIPLY)
vdp.sign = vfp_sign_negate(vdp.sign);
vfp_double_unpack(&vdn, vfp_get_double(dd));
if (vdn.exponent == 0 && vdn.significand)
vfp_double_normalise_denormal(&vdn);
if (negate & NEG_SUBTRACT)
vdn.sign = vfp_sign_negate(vdn.sign);
exceptions |= vfp_double_add(&vdd, &vdn, &vdp, fpscr);
return vfp_double_normaliseround(dd, &vdd, fpscr, exceptions, func);
}
/*
* Standard operations
*/
/*
* sd = sd + (sn * sm)
*/
static u32 vfp_double_fmac(int dd, int dn, int dm, u32 fpscr)
{
return vfp_double_multiply_accumulate(dd, dn, dm, fpscr, 0, "fmac");
}
/*
* sd = sd - (sn * sm)
*/
static u32 vfp_double_fnmac(int dd, int dn, int dm, u32 fpscr)
{
return vfp_double_multiply_accumulate(dd, dn, dm, fpscr, NEG_MULTIPLY, "fnmac");
}
/*
* sd = -sd + (sn * sm)
*/
static u32 vfp_double_fmsc(int dd, int dn, int dm, u32 fpscr)
{
return vfp_double_multiply_accumulate(dd, dn, dm, fpscr, NEG_SUBTRACT, "fmsc");
}
/*
* sd = -sd - (sn * sm)
*/
static u32 vfp_double_fnmsc(int dd, int dn, int dm, u32 fpscr)
{
return vfp_double_multiply_accumulate(dd, dn, dm, fpscr, NEG_SUBTRACT | NEG_MULTIPLY, "fnmsc");
}
/*
* sd = sn * sm
*/
static u32 vfp_double_fmul(int dd, int dn, int dm, u32 fpscr)
{
struct vfp_double vdd, vdn, vdm;
u32 exceptions;
vfp_double_unpack(&vdn, vfp_get_double(dn));
if (vdn.exponent == 0 && vdn.significand)
vfp_double_normalise_denormal(&vdn);
vfp_double_unpack(&vdm, vfp_get_double(dm));
if (vdm.exponent == 0 && vdm.significand)
vfp_double_normalise_denormal(&vdm);
exceptions = vfp_double_multiply(&vdd, &vdn, &vdm, fpscr);
return vfp_double_normaliseround(dd, &vdd, fpscr, exceptions, "fmul");
}
/*
* sd = -(sn * sm)
*/
static u32 vfp_double_fnmul(int dd, int dn, int dm, u32 fpscr)
{
struct vfp_double vdd, vdn, vdm;
u32 exceptions;
vfp_double_unpack(&vdn, vfp_get_double(dn));
if (vdn.exponent == 0 && vdn.significand)
vfp_double_normalise_denormal(&vdn);
vfp_double_unpack(&vdm, vfp_get_double(dm));
if (vdm.exponent == 0 && vdm.significand)
vfp_double_normalise_denormal(&vdm);
exceptions = vfp_double_multiply(&vdd, &vdn, &vdm, fpscr);
vdd.sign = vfp_sign_negate(vdd.sign);
return vfp_double_normaliseround(dd, &vdd, fpscr, exceptions, "fnmul");
}
/*
* sd = sn + sm
*/
static u32 vfp_double_fadd(int dd, int dn, int dm, u32 fpscr)
{
struct vfp_double vdd, vdn, vdm;
u32 exceptions;
vfp_double_unpack(&vdn, vfp_get_double(dn));
if (vdn.exponent == 0 && vdn.significand)
vfp_double_normalise_denormal(&vdn);
vfp_double_unpack(&vdm, vfp_get_double(dm));
if (vdm.exponent == 0 && vdm.significand)
vfp_double_normalise_denormal(&vdm);
exceptions = vfp_double_add(&vdd, &vdn, &vdm, fpscr);
return vfp_double_normaliseround(dd, &vdd, fpscr, exceptions, "fadd");
}
/*
* sd = sn - sm
*/
static u32 vfp_double_fsub(int dd, int dn, int dm, u32 fpscr)
{
struct vfp_double vdd, vdn, vdm;
u32 exceptions;
vfp_double_unpack(&vdn, vfp_get_double(dn));
if (vdn.exponent == 0 && vdn.significand)
vfp_double_normalise_denormal(&vdn);
vfp_double_unpack(&vdm, vfp_get_double(dm));
if (vdm.exponent == 0 && vdm.significand)
vfp_double_normalise_denormal(&vdm);
/*
* Subtraction is like addition, but with a negated operand.
*/
vdm.sign = vfp_sign_negate(vdm.sign);
exceptions = vfp_double_add(&vdd, &vdn, &vdm, fpscr);
return vfp_double_normaliseround(dd, &vdd, fpscr, exceptions, "fsub");
}
/*
* sd = sn / sm
*/
static u32 vfp_double_fdiv(int dd, int dn, int dm, u32 fpscr)
{
struct vfp_double vdd, vdn, vdm;
u32 exceptions = 0;
int tm, tn;
vfp_double_unpack(&vdn, vfp_get_double(dn));
vfp_double_unpack(&vdm, vfp_get_double(dm));
vdd.sign = vdn.sign ^ vdm.sign;
tn = vfp_double_type(&vdn);
tm = vfp_double_type(&vdm);
/*
* Is n a NAN?
*/
if (tn & VFP_NAN)
goto vdn_nan;
/*
* Is m a NAN?
*/
if (tm & VFP_NAN)
goto vdm_nan;
/*
* If n and m are infinity, the result is invalid
* If n and m are zero, the result is invalid
*/
if (tm & tn & (VFP_INFINITY|VFP_ZERO))
goto invalid;
/*
* If n is infinity, the result is infinity
*/
if (tn & VFP_INFINITY)
goto infinity;
/*
* If m is zero, raise div0 exceptions
*/
if (tm & VFP_ZERO)
goto divzero;
/*
* If m is infinity, or n is zero, the result is zero
*/
if (tm & VFP_INFINITY || tn & VFP_ZERO)
goto zero;
if (tn & VFP_DENORMAL)
vfp_double_normalise_denormal(&vdn);
if (tm & VFP_DENORMAL)
vfp_double_normalise_denormal(&vdm);
/*
* Ok, we have two numbers, we can perform division.
*/
vdd.exponent = vdn.exponent - vdm.exponent + 1023 - 1;
vdm.significand <<= 1;
if (vdm.significand <= (2 * vdn.significand)) {
vdn.significand >>= 1;
vdd.exponent++;
}
vdd.significand = vfp_estimate_div128to64(vdn.significand, 0, vdm.significand);
if ((vdd.significand & 0x1ff) <= 2) {
u64 termh, terml, remh, reml;
mul64to128(&termh, &terml, vdm.significand, vdd.significand);
sub128(&remh, &reml, vdn.significand, 0, termh, terml);
while ((s64)remh < 0) {
vdd.significand -= 1;
add128(&remh, &reml, remh, reml, 0, vdm.significand);
}
vdd.significand |= (reml != 0);
}
return vfp_double_normaliseround(dd, &vdd, fpscr, 0, "fdiv");
vdn_nan:
exceptions = vfp_propagate_nan(&vdd, &vdn, &vdm, fpscr);
pack:
vfp_put_double(vfp_double_pack(&vdd), dd);
return exceptions;
vdm_nan:
exceptions = vfp_propagate_nan(&vdd, &vdm, &vdn, fpscr);
goto pack;
zero:
vdd.exponent = 0;
vdd.significand = 0;
goto pack;
divzero:
exceptions = FPSCR_DZC;
infinity:
vdd.exponent = 2047;
vdd.significand = 0;
goto pack;
invalid:
vfp_put_double(vfp_double_pack(&vfp_double_default_qnan), dd);
return FPSCR_IOC;
}
static struct op fops[16] = {
[FOP_TO_IDX(FOP_FMAC)] = { vfp_double_fmac, 0 },
[FOP_TO_IDX(FOP_FNMAC)] = { vfp_double_fnmac, 0 },
[FOP_TO_IDX(FOP_FMSC)] = { vfp_double_fmsc, 0 },
[FOP_TO_IDX(FOP_FNMSC)] = { vfp_double_fnmsc, 0 },
[FOP_TO_IDX(FOP_FMUL)] = { vfp_double_fmul, 0 },
[FOP_TO_IDX(FOP_FNMUL)] = { vfp_double_fnmul, 0 },
[FOP_TO_IDX(FOP_FADD)] = { vfp_double_fadd, 0 },
[FOP_TO_IDX(FOP_FSUB)] = { vfp_double_fsub, 0 },
[FOP_TO_IDX(FOP_FDIV)] = { vfp_double_fdiv, 0 },
};
#define FREG_BANK(x) ((x) & 0x0c)
#define FREG_IDX(x) ((x) & 3)
u32 vfp_double_cpdo(u32 inst, u32 fpscr)
{
u32 op = inst & FOP_MASK;
u32 exceptions = 0;
unsigned int dest;
unsigned int dn = vfp_get_dn(inst);
unsigned int dm;
unsigned int vecitr, veclen, vecstride;
struct op *fop;
vecstride = (1 + ((fpscr & FPSCR_STRIDE_MASK) == FPSCR_STRIDE_MASK));
fop = (op == FOP_EXT) ? &fops_ext[FEXT_TO_IDX(inst)] : &fops[FOP_TO_IDX(op)];
/*
* fcvtds takes an sN register number as destination, not dN.
* It also always operates on scalars.
*/
if (fop->flags & OP_SD)
dest = vfp_get_sd(inst);
else
dest = vfp_get_dd(inst);
/*
* f[us]ito takes a sN operand, not a dN operand.
*/
if (fop->flags & OP_SM)
dm = vfp_get_sm(inst);
else
dm = vfp_get_dm(inst);
/*
* If destination bank is zero, vector length is always '1'.
* ARM DDI0100F C5.1.3, C5.3.2.
*/
if ((fop->flags & OP_SCALAR) || (FREG_BANK(dest) == 0))
veclen = 0;
else
veclen = fpscr & FPSCR_LENGTH_MASK;
pr_debug("VFP: vecstride=%u veclen=%u\n", vecstride,
(veclen >> FPSCR_LENGTH_BIT) + 1);
if (!fop->fn)
goto invalid;
for (vecitr = 0; vecitr <= veclen; vecitr += 1 << FPSCR_LENGTH_BIT) {
u32 except;
char type;
type = fop->flags & OP_SD ? 's' : 'd';
if (op == FOP_EXT)
pr_debug("VFP: itr%d (%c%u) = op[%u] (d%u)\n",
vecitr >> FPSCR_LENGTH_BIT,
type, dest, dn, dm);
else
pr_debug("VFP: itr%d (%c%u) = (d%u) op[%u] (d%u)\n",
vecitr >> FPSCR_LENGTH_BIT,
type, dest, dn, FOP_TO_IDX(op), dm);
except = fop->fn(dest, dn, dm, fpscr);
pr_debug("VFP: itr%d: exceptions=%08x\n",
vecitr >> FPSCR_LENGTH_BIT, except);
exceptions |= except;
/*
* CHECK: It appears to be undefined whether we stop when
* we encounter an exception. We continue.
*/
dest = FREG_BANK(dest) + ((FREG_IDX(dest) + vecstride) & 3);
dn = FREG_BANK(dn) + ((FREG_IDX(dn) + vecstride) & 3);
if (FREG_BANK(dm) != 0)
dm = FREG_BANK(dm) + ((FREG_IDX(dm) + vecstride) & 3);
}
return exceptions;
invalid:
return ~0;
}
| linux-master | arch/arm/vfp/vfpdouble.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/arch/arm/vfp/vfpmodule.c
*
* Copyright (C) 2004 ARM Limited.
* Written by Deep Blue Solutions Limited.
*/
#include <linux/types.h>
#include <linux/cpu.h>
#include <linux/cpu_pm.h>
#include <linux/hardirq.h>
#include <linux/kernel.h>
#include <linux/notifier.h>
#include <linux/signal.h>
#include <linux/sched/signal.h>
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/uaccess.h>
#include <linux/user.h>
#include <linux/export.h>
#include <linux/perf_event.h>
#include <asm/cp15.h>
#include <asm/cputype.h>
#include <asm/system_info.h>
#include <asm/thread_notify.h>
#include <asm/traps.h>
#include <asm/vfp.h>
#include <asm/neon.h>
#include "vfpinstr.h"
#include "vfp.h"
static bool have_vfp __ro_after_init;
/*
* Dual-use variable.
* Used in startup: set to non-zero if VFP checks fail
* After startup, holds VFP architecture
*/
static unsigned int VFP_arch;
#ifdef CONFIG_CPU_FEROCEON
extern unsigned int VFP_arch_feroceon __alias(VFP_arch);
#endif
/*
* The pointer to the vfpstate structure of the thread which currently
* owns the context held in the VFP hardware, or NULL if the hardware
* context is invalid.
*
* For UP, this is sufficient to tell which thread owns the VFP context.
* However, for SMP, we also need to check the CPU number stored in the
* saved state too to catch migrations.
*/
union vfp_state *vfp_current_hw_state[NR_CPUS];
/*
* Is 'thread's most up to date state stored in this CPUs hardware?
* Must be called from non-preemptible context.
*/
static bool vfp_state_in_hw(unsigned int cpu, struct thread_info *thread)
{
#ifdef CONFIG_SMP
if (thread->vfpstate.hard.cpu != cpu)
return false;
#endif
return vfp_current_hw_state[cpu] == &thread->vfpstate;
}
/*
* Force a reload of the VFP context from the thread structure. We do
* this by ensuring that access to the VFP hardware is disabled, and
* clear vfp_current_hw_state. Must be called from non-preemptible context.
*/
static void vfp_force_reload(unsigned int cpu, struct thread_info *thread)
{
if (vfp_state_in_hw(cpu, thread)) {
fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
vfp_current_hw_state[cpu] = NULL;
}
#ifdef CONFIG_SMP
thread->vfpstate.hard.cpu = NR_CPUS;
#endif
}
/*
* Per-thread VFP initialization.
*/
static void vfp_thread_flush(struct thread_info *thread)
{
union vfp_state *vfp = &thread->vfpstate;
unsigned int cpu;
/*
* Disable VFP to ensure we initialize it first. We must ensure
* that the modification of vfp_current_hw_state[] and hardware
* disable are done for the same CPU and without preemption.
*
* Do this first to ensure that preemption won't overwrite our
* state saving should access to the VFP be enabled at this point.
*/
cpu = get_cpu();
if (vfp_current_hw_state[cpu] == vfp)
vfp_current_hw_state[cpu] = NULL;
fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
put_cpu();
memset(vfp, 0, sizeof(union vfp_state));
vfp->hard.fpexc = FPEXC_EN;
vfp->hard.fpscr = FPSCR_ROUND_NEAREST;
#ifdef CONFIG_SMP
vfp->hard.cpu = NR_CPUS;
#endif
}
static void vfp_thread_exit(struct thread_info *thread)
{
/* release case: Per-thread VFP cleanup. */
union vfp_state *vfp = &thread->vfpstate;
unsigned int cpu = get_cpu();
if (vfp_current_hw_state[cpu] == vfp)
vfp_current_hw_state[cpu] = NULL;
put_cpu();
}
static void vfp_thread_copy(struct thread_info *thread)
{
struct thread_info *parent = current_thread_info();
vfp_sync_hwstate(parent);
thread->vfpstate = parent->vfpstate;
#ifdef CONFIG_SMP
thread->vfpstate.hard.cpu = NR_CPUS;
#endif
}
/*
* When this function is called with the following 'cmd's, the following
* is true while this function is being run:
* THREAD_NOFTIFY_SWTICH:
* - the previously running thread will not be scheduled onto another CPU.
* - the next thread to be run (v) will not be running on another CPU.
* - thread->cpu is the local CPU number
* - not preemptible as we're called in the middle of a thread switch
* THREAD_NOTIFY_FLUSH:
* - the thread (v) will be running on the local CPU, so
* v === current_thread_info()
* - thread->cpu is the local CPU number at the time it is accessed,
* but may change at any time.
* - we could be preempted if tree preempt rcu is enabled, so
* it is unsafe to use thread->cpu.
* THREAD_NOTIFY_EXIT
* - we could be preempted if tree preempt rcu is enabled, so
* it is unsafe to use thread->cpu.
*/
static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v)
{
struct thread_info *thread = v;
u32 fpexc;
#ifdef CONFIG_SMP
unsigned int cpu;
#endif
switch (cmd) {
case THREAD_NOTIFY_SWITCH:
fpexc = fmrx(FPEXC);
#ifdef CONFIG_SMP
cpu = thread->cpu;
/*
* On SMP, if VFP is enabled, save the old state in
* case the thread migrates to a different CPU. The
* restoring is done lazily.
*/
if ((fpexc & FPEXC_EN) && vfp_current_hw_state[cpu])
vfp_save_state(vfp_current_hw_state[cpu], fpexc);
#endif
/*
* Always disable VFP so we can lazily save/restore the
* old state.
*/
fmxr(FPEXC, fpexc & ~FPEXC_EN);
break;
case THREAD_NOTIFY_FLUSH:
vfp_thread_flush(thread);
break;
case THREAD_NOTIFY_EXIT:
vfp_thread_exit(thread);
break;
case THREAD_NOTIFY_COPY:
vfp_thread_copy(thread);
break;
}
return NOTIFY_DONE;
}
static struct notifier_block vfp_notifier_block = {
.notifier_call = vfp_notifier,
};
/*
* Raise a SIGFPE for the current process.
* sicode describes the signal being raised.
*/
static void vfp_raise_sigfpe(unsigned int sicode, struct pt_regs *regs)
{
/*
* This is the same as NWFPE, because it's not clear what
* this is used for
*/
current->thread.error_code = 0;
current->thread.trap_no = 6;
send_sig_fault(SIGFPE, sicode,
(void __user *)(instruction_pointer(regs) - 4),
current);
}
static void vfp_panic(char *reason, u32 inst)
{
int i;
pr_err("VFP: Error: %s\n", reason);
pr_err("VFP: EXC 0x%08x SCR 0x%08x INST 0x%08x\n",
fmrx(FPEXC), fmrx(FPSCR), inst);
for (i = 0; i < 32; i += 2)
pr_err("VFP: s%2u: 0x%08x s%2u: 0x%08x\n",
i, vfp_get_float(i), i+1, vfp_get_float(i+1));
}
/*
* Process bitmask of exception conditions.
*/
static void vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr, struct pt_regs *regs)
{
int si_code = 0;
pr_debug("VFP: raising exceptions %08x\n", exceptions);
if (exceptions == VFP_EXCEPTION_ERROR) {
vfp_panic("unhandled bounce", inst);
vfp_raise_sigfpe(FPE_FLTINV, regs);
return;
}
/*
* If any of the status flags are set, update the FPSCR.
* Comparison instructions always return at least one of
* these flags set.
*/
if (exceptions & (FPSCR_N|FPSCR_Z|FPSCR_C|FPSCR_V))
fpscr &= ~(FPSCR_N|FPSCR_Z|FPSCR_C|FPSCR_V);
fpscr |= exceptions;
fmxr(FPSCR, fpscr);
#define RAISE(stat,en,sig) \
if (exceptions & stat && fpscr & en) \
si_code = sig;
/*
* These are arranged in priority order, least to highest.
*/
RAISE(FPSCR_DZC, FPSCR_DZE, FPE_FLTDIV);
RAISE(FPSCR_IXC, FPSCR_IXE, FPE_FLTRES);
RAISE(FPSCR_UFC, FPSCR_UFE, FPE_FLTUND);
RAISE(FPSCR_OFC, FPSCR_OFE, FPE_FLTOVF);
RAISE(FPSCR_IOC, FPSCR_IOE, FPE_FLTINV);
if (si_code)
vfp_raise_sigfpe(si_code, regs);
}
/*
* Emulate a VFP instruction.
*/
static u32 vfp_emulate_instruction(u32 inst, u32 fpscr, struct pt_regs *regs)
{
u32 exceptions = VFP_EXCEPTION_ERROR;
pr_debug("VFP: emulate: INST=0x%08x SCR=0x%08x\n", inst, fpscr);
if (INST_CPRTDO(inst)) {
if (!INST_CPRT(inst)) {
/*
* CPDO
*/
if (vfp_single(inst)) {
exceptions = vfp_single_cpdo(inst, fpscr);
} else {
exceptions = vfp_double_cpdo(inst, fpscr);
}
} else {
/*
* A CPRT instruction can not appear in FPINST2, nor
* can it cause an exception. Therefore, we do not
* have to emulate it.
*/
}
} else {
/*
* A CPDT instruction can not appear in FPINST2, nor can
* it cause an exception. Therefore, we do not have to
* emulate it.
*/
}
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->ARM_pc);
return exceptions & ~VFP_NAN_FLAG;
}
/*
* Package up a bounce condition.
*/
static void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
{
u32 fpscr, orig_fpscr, fpsid, exceptions;
pr_debug("VFP: bounce: trigger %08x fpexc %08x\n", trigger, fpexc);
/*
* At this point, FPEXC can have the following configuration:
*
* EX DEX IXE
* 0 1 x - synchronous exception
* 1 x 0 - asynchronous exception
* 1 x 1 - sychronous on VFP subarch 1 and asynchronous on later
* 0 0 1 - synchronous on VFP9 (non-standard subarch 1
* implementation), undefined otherwise
*
* Clear various bits and enable access to the VFP so we can
* handle the bounce.
*/
fmxr(FPEXC, fpexc & ~(FPEXC_EX|FPEXC_DEX|FPEXC_FP2V|FPEXC_VV|FPEXC_TRAP_MASK));
fpsid = fmrx(FPSID);
orig_fpscr = fpscr = fmrx(FPSCR);
/*
* Check for the special VFP subarch 1 and FPSCR.IXE bit case
*/
if ((fpsid & FPSID_ARCH_MASK) == (1 << FPSID_ARCH_BIT)
&& (fpscr & FPSCR_IXE)) {
/*
* Synchronous exception, emulate the trigger instruction
*/
goto emulate;
}
if (fpexc & FPEXC_EX) {
/*
* Asynchronous exception. The instruction is read from FPINST
* and the interrupted instruction has to be restarted.
*/
trigger = fmrx(FPINST);
regs->ARM_pc -= 4;
} else if (!(fpexc & FPEXC_DEX)) {
/*
* Illegal combination of bits. It can be caused by an
* unallocated VFP instruction but with FPSCR.IXE set and not
* on VFP subarch 1.
*/
vfp_raise_exceptions(VFP_EXCEPTION_ERROR, trigger, fpscr, regs);
return;
}
/*
* Modify fpscr to indicate the number of iterations remaining.
* If FPEXC.EX is 0, FPEXC.DEX is 1 and the FPEXC.VV bit indicates
* whether FPEXC.VECITR or FPSCR.LEN is used.
*/
if (fpexc & (FPEXC_EX | FPEXC_VV)) {
u32 len;
len = fpexc + (1 << FPEXC_LENGTH_BIT);
fpscr &= ~FPSCR_LENGTH_MASK;
fpscr |= (len & FPEXC_LENGTH_MASK) << (FPSCR_LENGTH_BIT - FPEXC_LENGTH_BIT);
}
/*
* Handle the first FP instruction. We used to take note of the
* FPEXC bounce reason, but this appears to be unreliable.
* Emulate the bounced instruction instead.
*/
exceptions = vfp_emulate_instruction(trigger, fpscr, regs);
if (exceptions)
vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs);
/*
* If there isn't a second FP instruction, exit now. Note that
* the FPEXC.FP2V bit is valid only if FPEXC.EX is 1.
*/
if ((fpexc & (FPEXC_EX | FPEXC_FP2V)) != (FPEXC_EX | FPEXC_FP2V))
return;
/*
* The barrier() here prevents fpinst2 being read
* before the condition above.
*/
barrier();
trigger = fmrx(FPINST2);
emulate:
exceptions = vfp_emulate_instruction(trigger, orig_fpscr, regs);
if (exceptions)
vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs);
}
static void vfp_enable(void *unused)
{
u32 access;
BUG_ON(preemptible());
access = get_copro_access();
/*
* Enable full access to VFP (cp10 and cp11)
*/
set_copro_access(access | CPACC_FULL(10) | CPACC_FULL(11));
}
/* Called by platforms on which we want to disable VFP because it may not be
* present on all CPUs within a SMP complex. Needs to be called prior to
* vfp_init().
*/
void __init vfp_disable(void)
{
if (VFP_arch) {
pr_debug("%s: should be called prior to vfp_init\n", __func__);
return;
}
VFP_arch = 1;
}
#ifdef CONFIG_CPU_PM
static int vfp_pm_suspend(void)
{
struct thread_info *ti = current_thread_info();
u32 fpexc = fmrx(FPEXC);
/* if vfp is on, then save state for resumption */
if (fpexc & FPEXC_EN) {
pr_debug("%s: saving vfp state\n", __func__);
vfp_save_state(&ti->vfpstate, fpexc);
/* disable, just in case */
fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
} else if (vfp_current_hw_state[ti->cpu]) {
#ifndef CONFIG_SMP
fmxr(FPEXC, fpexc | FPEXC_EN);
vfp_save_state(vfp_current_hw_state[ti->cpu], fpexc);
fmxr(FPEXC, fpexc);
#endif
}
/* clear any information we had about last context state */
vfp_current_hw_state[ti->cpu] = NULL;
return 0;
}
static void vfp_pm_resume(void)
{
/* ensure we have access to the vfp */
vfp_enable(NULL);
/* and disable it to ensure the next usage restores the state */
fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
}
static int vfp_cpu_pm_notifier(struct notifier_block *self, unsigned long cmd,
void *v)
{
switch (cmd) {
case CPU_PM_ENTER:
vfp_pm_suspend();
break;
case CPU_PM_ENTER_FAILED:
case CPU_PM_EXIT:
vfp_pm_resume();
break;
}
return NOTIFY_OK;
}
static struct notifier_block vfp_cpu_pm_notifier_block = {
.notifier_call = vfp_cpu_pm_notifier,
};
static void vfp_pm_init(void)
{
cpu_pm_register_notifier(&vfp_cpu_pm_notifier_block);
}
#else
static inline void vfp_pm_init(void) { }
#endif /* CONFIG_CPU_PM */
/*
* Ensure that the VFP state stored in 'thread->vfpstate' is up to date
* with the hardware state.
*/
void vfp_sync_hwstate(struct thread_info *thread)
{
unsigned int cpu = get_cpu();
local_bh_disable();
if (vfp_state_in_hw(cpu, thread)) {
u32 fpexc = fmrx(FPEXC);
/*
* Save the last VFP state on this CPU.
*/
fmxr(FPEXC, fpexc | FPEXC_EN);
vfp_save_state(&thread->vfpstate, fpexc | FPEXC_EN);
fmxr(FPEXC, fpexc);
}
local_bh_enable();
put_cpu();
}
/* Ensure that the thread reloads the hardware VFP state on the next use. */
void vfp_flush_hwstate(struct thread_info *thread)
{
unsigned int cpu = get_cpu();
vfp_force_reload(cpu, thread);
put_cpu();
}
/*
* Save the current VFP state into the provided structures and prepare
* for entry into a new function (signal handler).
*/
int vfp_preserve_user_clear_hwstate(struct user_vfp *ufp,
struct user_vfp_exc *ufp_exc)
{
struct thread_info *thread = current_thread_info();
struct vfp_hard_struct *hwstate = &thread->vfpstate.hard;
/* Ensure that the saved hwstate is up-to-date. */
vfp_sync_hwstate(thread);
/*
* Copy the floating point registers. There can be unused
* registers see asm/hwcap.h for details.
*/
memcpy(&ufp->fpregs, &hwstate->fpregs, sizeof(hwstate->fpregs));
/*
* Copy the status and control register.
*/
ufp->fpscr = hwstate->fpscr;
/*
* Copy the exception registers.
*/
ufp_exc->fpexc = hwstate->fpexc;
ufp_exc->fpinst = hwstate->fpinst;
ufp_exc->fpinst2 = hwstate->fpinst2;
/* Ensure that VFP is disabled. */
vfp_flush_hwstate(thread);
/*
* As per the PCS, clear the length and stride bits for function
* entry.
*/
hwstate->fpscr &= ~(FPSCR_LENGTH_MASK | FPSCR_STRIDE_MASK);
return 0;
}
/* Sanitise and restore the current VFP state from the provided structures. */
int vfp_restore_user_hwstate(struct user_vfp *ufp, struct user_vfp_exc *ufp_exc)
{
struct thread_info *thread = current_thread_info();
struct vfp_hard_struct *hwstate = &thread->vfpstate.hard;
unsigned long fpexc;
/* Disable VFP to avoid corrupting the new thread state. */
vfp_flush_hwstate(thread);
/*
* Copy the floating point registers. There can be unused
* registers see asm/hwcap.h for details.
*/
memcpy(&hwstate->fpregs, &ufp->fpregs, sizeof(hwstate->fpregs));
/*
* Copy the status and control register.
*/
hwstate->fpscr = ufp->fpscr;
/*
* Sanitise and restore the exception registers.
*/
fpexc = ufp_exc->fpexc;
/* Ensure the VFP is enabled. */
fpexc |= FPEXC_EN;
/* Ensure FPINST2 is invalid and the exception flag is cleared. */
fpexc &= ~(FPEXC_EX | FPEXC_FP2V);
hwstate->fpexc = fpexc;
hwstate->fpinst = ufp_exc->fpinst;
hwstate->fpinst2 = ufp_exc->fpinst2;
return 0;
}
/*
* VFP hardware can lose all context when a CPU goes offline.
* As we will be running in SMP mode with CPU hotplug, we will save the
* hardware state at every thread switch. We clear our held state when
* a CPU has been killed, indicating that the VFP hardware doesn't contain
* a threads VFP state. When a CPU starts up, we re-enable access to the
* VFP hardware. The callbacks below are called on the CPU which
* is being offlined/onlined.
*/
static int vfp_dying_cpu(unsigned int cpu)
{
vfp_current_hw_state[cpu] = NULL;
return 0;
}
static int vfp_starting_cpu(unsigned int unused)
{
vfp_enable(NULL);
return 0;
}
static int vfp_kmode_exception(struct pt_regs *regs, unsigned int instr)
{
/*
* If we reach this point, a floating point exception has been raised
* while running in kernel mode. If the NEON/VFP unit was enabled at the
* time, it means a VFP instruction has been issued that requires
* software assistance to complete, something which is not currently
* supported in kernel mode.
* If the NEON/VFP unit was disabled, and the location pointed to below
* is properly preceded by a call to kernel_neon_begin(), something has
* caused the task to be scheduled out and back in again. In this case,
* rebuilding and running with CONFIG_DEBUG_ATOMIC_SLEEP enabled should
* be helpful in localizing the problem.
*/
if (fmrx(FPEXC) & FPEXC_EN)
pr_crit("BUG: unsupported FP instruction in kernel mode\n");
else
pr_crit("BUG: FP instruction issued in kernel mode with FP unit disabled\n");
pr_crit("FPEXC == 0x%08x\n", fmrx(FPEXC));
return 1;
}
/*
* vfp_support_entry - Handle VFP exception
*
* @regs: pt_regs structure holding the register state at exception entry
* @trigger: The opcode of the instruction that triggered the exception
*
* Returns 0 if the exception was handled, or an error code otherwise.
*/
static int vfp_support_entry(struct pt_regs *regs, u32 trigger)
{
struct thread_info *ti = current_thread_info();
u32 fpexc;
if (unlikely(!have_vfp))
return -ENODEV;
if (!user_mode(regs))
return vfp_kmode_exception(regs, trigger);
local_bh_disable();
fpexc = fmrx(FPEXC);
/*
* If the VFP unit was not enabled yet, we have to check whether the
* VFP state in the CPU's registers is the most recent VFP state
* associated with the process. On UP systems, we don't save the VFP
* state eagerly on a context switch, so we may need to save the
* VFP state to memory first, as it may belong to another process.
*/
if (!(fpexc & FPEXC_EN)) {
/*
* Enable the VFP unit but mask the FP exception flag for the
* time being, so we can access all the registers.
*/
fpexc |= FPEXC_EN;
fmxr(FPEXC, fpexc & ~FPEXC_EX);
/*
* Check whether or not the VFP state in the CPU's registers is
* the most recent VFP state associated with this task. On SMP,
* migration may result in multiple CPUs holding VFP states
* that belong to the same task, but only the most recent one
* is valid.
*/
if (!vfp_state_in_hw(ti->cpu, ti)) {
if (!IS_ENABLED(CONFIG_SMP) &&
vfp_current_hw_state[ti->cpu] != NULL) {
/*
* This CPU is currently holding the most
* recent VFP state associated with another
* task, and we must save that to memory first.
*/
vfp_save_state(vfp_current_hw_state[ti->cpu],
fpexc);
}
/*
* We can now proceed with loading the task's VFP state
* from memory into the CPU registers.
*/
fpexc = vfp_load_state(&ti->vfpstate);
vfp_current_hw_state[ti->cpu] = &ti->vfpstate;
#ifdef CONFIG_SMP
/*
* Record that this CPU is now the one holding the most
* recent VFP state of the task.
*/
ti->vfpstate.hard.cpu = ti->cpu;
#endif
}
if (fpexc & FPEXC_EX)
/*
* Might as well handle the pending exception before
* retrying branch out before setting an FPEXC that
* stops us reading stuff.
*/
goto bounce;
/*
* No FP exception is pending: just enable the VFP and
* replay the instruction that trapped.
*/
fmxr(FPEXC, fpexc);
} else {
/* Check for synchronous or asynchronous exceptions */
if (!(fpexc & (FPEXC_EX | FPEXC_DEX))) {
u32 fpscr = fmrx(FPSCR);
/*
* On some implementations of the VFP subarch 1,
* setting FPSCR.IXE causes all the CDP instructions to
* be bounced synchronously without setting the
* FPEXC.EX bit
*/
if (!(fpscr & FPSCR_IXE)) {
if (!(fpscr & FPSCR_LENGTH_MASK)) {
pr_debug("not VFP\n");
local_bh_enable();
return -ENOEXEC;
}
fpexc |= FPEXC_DEX;
}
}
bounce: regs->ARM_pc += 4;
VFP_bounce(trigger, fpexc, regs);
}
local_bh_enable();
return 0;
}
static struct undef_hook neon_support_hook[] = {{
.instr_mask = 0xfe000000,
.instr_val = 0xf2000000,
.cpsr_mask = PSR_T_BIT,
.cpsr_val = 0,
.fn = vfp_support_entry,
}, {
.instr_mask = 0xff100000,
.instr_val = 0xf4000000,
.cpsr_mask = PSR_T_BIT,
.cpsr_val = 0,
.fn = vfp_support_entry,
}, {
.instr_mask = 0xef000000,
.instr_val = 0xef000000,
.cpsr_mask = PSR_T_BIT,
.cpsr_val = PSR_T_BIT,
.fn = vfp_support_entry,
}, {
.instr_mask = 0xff100000,
.instr_val = 0xf9000000,
.cpsr_mask = PSR_T_BIT,
.cpsr_val = PSR_T_BIT,
.fn = vfp_support_entry,
}};
static struct undef_hook vfp_support_hook = {
.instr_mask = 0x0c000e00,
.instr_val = 0x0c000a00,
.fn = vfp_support_entry,
};
#ifdef CONFIG_KERNEL_MODE_NEON
/*
* Kernel-side NEON support functions
*/
void kernel_neon_begin(void)
{
struct thread_info *thread = current_thread_info();
unsigned int cpu;
u32 fpexc;
local_bh_disable();
/*
* Kernel mode NEON is only allowed outside of hardirq context with
* preemption and softirq processing disabled. This will make sure that
* the kernel mode NEON register contents never need to be preserved.
*/
BUG_ON(in_hardirq());
cpu = __smp_processor_id();
fpexc = fmrx(FPEXC) | FPEXC_EN;
fmxr(FPEXC, fpexc);
/*
* Save the userland NEON/VFP state. Under UP,
* the owner could be a task other than 'current'
*/
if (vfp_state_in_hw(cpu, thread))
vfp_save_state(&thread->vfpstate, fpexc);
#ifndef CONFIG_SMP
else if (vfp_current_hw_state[cpu] != NULL)
vfp_save_state(vfp_current_hw_state[cpu], fpexc);
#endif
vfp_current_hw_state[cpu] = NULL;
}
EXPORT_SYMBOL(kernel_neon_begin);
void kernel_neon_end(void)
{
/* Disable the NEON/VFP unit. */
fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
local_bh_enable();
}
EXPORT_SYMBOL(kernel_neon_end);
#endif /* CONFIG_KERNEL_MODE_NEON */
static int __init vfp_detect(struct pt_regs *regs, unsigned int instr)
{
VFP_arch = UINT_MAX; /* mark as not present */
regs->ARM_pc += 4;
return 0;
}
static struct undef_hook vfp_detect_hook __initdata = {
.instr_mask = 0x0c000e00,
.instr_val = 0x0c000a00,
.cpsr_mask = MODE_MASK,
.cpsr_val = SVC_MODE,
.fn = vfp_detect,
};
/*
* VFP support code initialisation.
*/
static int __init vfp_init(void)
{
unsigned int vfpsid;
unsigned int cpu_arch = cpu_architecture();
unsigned int isar6;
/*
* Enable the access to the VFP on all online CPUs so the
* following test on FPSID will succeed.
*/
if (cpu_arch >= CPU_ARCH_ARMv6)
on_each_cpu(vfp_enable, NULL, 1);
/*
* First check that there is a VFP that we can use.
* The handler is already setup to just log calls, so
* we just need to read the VFPSID register.
*/
register_undef_hook(&vfp_detect_hook);
barrier();
vfpsid = fmrx(FPSID);
barrier();
unregister_undef_hook(&vfp_detect_hook);
pr_info("VFP support v0.3: ");
if (VFP_arch) {
pr_cont("not present\n");
return 0;
/* Extract the architecture on CPUID scheme */
} else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
VFP_arch = vfpsid & FPSID_CPUID_ARCH_MASK;
VFP_arch >>= FPSID_ARCH_BIT;
/*
* Check for the presence of the Advanced SIMD
* load/store instructions, integer and single
* precision floating point operations. Only check
* for NEON if the hardware has the MVFR registers.
*/
if (IS_ENABLED(CONFIG_NEON) &&
(fmrx(MVFR1) & 0x000fff00) == 0x00011100) {
elf_hwcap |= HWCAP_NEON;
for (int i = 0; i < ARRAY_SIZE(neon_support_hook); i++)
register_undef_hook(&neon_support_hook[i]);
}
if (IS_ENABLED(CONFIG_VFPv3)) {
u32 mvfr0 = fmrx(MVFR0);
if (((mvfr0 & MVFR0_DP_MASK) >> MVFR0_DP_BIT) == 0x2 ||
((mvfr0 & MVFR0_SP_MASK) >> MVFR0_SP_BIT) == 0x2) {
elf_hwcap |= HWCAP_VFPv3;
/*
* Check for VFPv3 D16 and VFPv4 D16. CPUs in
* this configuration only have 16 x 64bit
* registers.
*/
if ((mvfr0 & MVFR0_A_SIMD_MASK) == 1)
/* also v4-D16 */
elf_hwcap |= HWCAP_VFPv3D16;
else
elf_hwcap |= HWCAP_VFPD32;
}
if ((fmrx(MVFR1) & 0xf0000000) == 0x10000000)
elf_hwcap |= HWCAP_VFPv4;
if (((fmrx(MVFR1) & MVFR1_ASIMDHP_MASK) >> MVFR1_ASIMDHP_BIT) == 0x2)
elf_hwcap |= HWCAP_ASIMDHP;
if (((fmrx(MVFR1) & MVFR1_FPHP_MASK) >> MVFR1_FPHP_BIT) == 0x3)
elf_hwcap |= HWCAP_FPHP;
}
/*
* Check for the presence of Advanced SIMD Dot Product
* instructions.
*/
isar6 = read_cpuid_ext(CPUID_EXT_ISAR6);
if (cpuid_feature_extract_field(isar6, 4) == 0x1)
elf_hwcap |= HWCAP_ASIMDDP;
/*
* Check for the presence of Advanced SIMD Floating point
* half-precision multiplication instructions.
*/
if (cpuid_feature_extract_field(isar6, 8) == 0x1)
elf_hwcap |= HWCAP_ASIMDFHM;
/*
* Check for the presence of Advanced SIMD Bfloat16
* floating point instructions.
*/
if (cpuid_feature_extract_field(isar6, 20) == 0x1)
elf_hwcap |= HWCAP_ASIMDBF16;
/*
* Check for the presence of Advanced SIMD and floating point
* Int8 matrix multiplication instructions instructions.
*/
if (cpuid_feature_extract_field(isar6, 24) == 0x1)
elf_hwcap |= HWCAP_I8MM;
/* Extract the architecture version on pre-cpuid scheme */
} else {
if (vfpsid & FPSID_NODOUBLE) {
pr_cont("no double precision support\n");
return 0;
}
VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT;
}
cpuhp_setup_state_nocalls(CPUHP_AP_ARM_VFP_STARTING,
"arm/vfp:starting", vfp_starting_cpu,
vfp_dying_cpu);
have_vfp = true;
register_undef_hook(&vfp_support_hook);
thread_register_notifier(&vfp_notifier_block);
vfp_pm_init();
/*
* We detected VFP, and the support code is
* in place; report VFP support to userspace.
*/
elf_hwcap |= HWCAP_VFP;
pr_cont("implementor %02x architecture %d part %02x variant %x rev %x\n",
(vfpsid & FPSID_IMPLEMENTER_MASK) >> FPSID_IMPLEMENTER_BIT,
VFP_arch,
(vfpsid & FPSID_PART_MASK) >> FPSID_PART_BIT,
(vfpsid & FPSID_VARIANT_MASK) >> FPSID_VARIANT_BIT,
(vfpsid & FPSID_REV_MASK) >> FPSID_REV_BIT);
return 0;
}
core_initcall(vfp_init);
| linux-master | arch/arm/vfp/vfpmodule.c |
/*
* linux/arch/arm/vfp/vfpsingle.c
*
* This code is derived in part from John R. Housers softfloat library, which
* carries the following notice:
*
* ===========================================================================
* This C source file is part of the SoftFloat IEC/IEEE Floating-point
* Arithmetic Package, Release 2.
*
* Written by John R. Hauser. This work was made possible in part by the
* International Computer Science Institute, located at Suite 600, 1947 Center
* Street, Berkeley, California 94704. Funding was partially provided by the
* National Science Foundation under grant MIP-9311980. The original version
* of this code was written as part of a project to build a fixed-point vector
* processor in collaboration with the University of California at Berkeley,
* overseen by Profs. Nelson Morgan and John Wawrzynek. More information
* is available through the web page `http://HTTP.CS.Berkeley.EDU/~jhauser/
* arithmetic/softfloat.html'.
*
* THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort
* has been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT
* TIMES RESULT IN INCORRECT BEHAVIOR. USE OF THIS SOFTWARE IS RESTRICTED TO
* PERSONS AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ANY
* AND ALL LOSSES, COSTS, OR OTHER PROBLEMS ARISING FROM ITS USE.
*
* Derivative works are acceptable, even for commercial purposes, so long as
* (1) they include prominent notice that the work is derivative, and (2) they
* include prominent notice akin to these three paragraphs for those parts of
* this code that are retained.
* ===========================================================================
*/
#include <linux/kernel.h>
#include <linux/bitops.h>
#include <asm/div64.h>
#include <asm/vfp.h>
#include "vfpinstr.h"
#include "vfp.h"
static struct vfp_single vfp_single_default_qnan = {
.exponent = 255,
.sign = 0,
.significand = VFP_SINGLE_SIGNIFICAND_QNAN,
};
static void vfp_single_dump(const char *str, struct vfp_single *s)
{
pr_debug("VFP: %s: sign=%d exponent=%d significand=%08x\n",
str, s->sign != 0, s->exponent, s->significand);
}
static void vfp_single_normalise_denormal(struct vfp_single *vs)
{
int bits = 31 - fls(vs->significand);
vfp_single_dump("normalise_denormal: in", vs);
if (bits) {
vs->exponent -= bits - 1;
vs->significand <<= bits;
}
vfp_single_dump("normalise_denormal: out", vs);
}
#ifndef DEBUG
#define vfp_single_normaliseround(sd,vsd,fpscr,except,func) __vfp_single_normaliseround(sd,vsd,fpscr,except)
u32 __vfp_single_normaliseround(int sd, struct vfp_single *vs, u32 fpscr, u32 exceptions)
#else
u32 vfp_single_normaliseround(int sd, struct vfp_single *vs, u32 fpscr, u32 exceptions, const char *func)
#endif
{
u32 significand, incr, rmode;
int exponent, shift, underflow;
vfp_single_dump("pack: in", vs);
/*
* Infinities and NaNs are a special case.
*/
if (vs->exponent == 255 && (vs->significand == 0 || exceptions))
goto pack;
/*
* Special-case zero.
*/
if (vs->significand == 0) {
vs->exponent = 0;
goto pack;
}
exponent = vs->exponent;
significand = vs->significand;
/*
* Normalise first. Note that we shift the significand up to
* bit 31, so we have VFP_SINGLE_LOW_BITS + 1 below the least
* significant bit.
*/
shift = 32 - fls(significand);
if (shift < 32 && shift) {
exponent -= shift;
significand <<= shift;
}
#ifdef DEBUG
vs->exponent = exponent;
vs->significand = significand;
vfp_single_dump("pack: normalised", vs);
#endif
/*
* Tiny number?
*/
underflow = exponent < 0;
if (underflow) {
significand = vfp_shiftright32jamming(significand, -exponent);
exponent = 0;
#ifdef DEBUG
vs->exponent = exponent;
vs->significand = significand;
vfp_single_dump("pack: tiny number", vs);
#endif
if (!(significand & ((1 << (VFP_SINGLE_LOW_BITS + 1)) - 1)))
underflow = 0;
}
/*
* Select rounding increment.
*/
incr = 0;
rmode = fpscr & FPSCR_RMODE_MASK;
if (rmode == FPSCR_ROUND_NEAREST) {
incr = 1 << VFP_SINGLE_LOW_BITS;
if ((significand & (1 << (VFP_SINGLE_LOW_BITS + 1))) == 0)
incr -= 1;
} else if (rmode == FPSCR_ROUND_TOZERO) {
incr = 0;
} else if ((rmode == FPSCR_ROUND_PLUSINF) ^ (vs->sign != 0))
incr = (1 << (VFP_SINGLE_LOW_BITS + 1)) - 1;
pr_debug("VFP: rounding increment = 0x%08x\n", incr);
/*
* Is our rounding going to overflow?
*/
if ((significand + incr) < significand) {
exponent += 1;
significand = (significand >> 1) | (significand & 1);
incr >>= 1;
#ifdef DEBUG
vs->exponent = exponent;
vs->significand = significand;
vfp_single_dump("pack: overflow", vs);
#endif
}
/*
* If any of the low bits (which will be shifted out of the
* number) are non-zero, the result is inexact.
*/
if (significand & ((1 << (VFP_SINGLE_LOW_BITS + 1)) - 1))
exceptions |= FPSCR_IXC;
/*
* Do our rounding.
*/
significand += incr;
/*
* Infinity?
*/
if (exponent >= 254) {
exceptions |= FPSCR_OFC | FPSCR_IXC;
if (incr == 0) {
vs->exponent = 253;
vs->significand = 0x7fffffff;
} else {
vs->exponent = 255; /* infinity */
vs->significand = 0;
}
} else {
if (significand >> (VFP_SINGLE_LOW_BITS + 1) == 0)
exponent = 0;
if (exponent || significand > 0x80000000)
underflow = 0;
if (underflow)
exceptions |= FPSCR_UFC;
vs->exponent = exponent;
vs->significand = significand >> 1;
}
pack:
vfp_single_dump("pack: final", vs);
{
s32 d = vfp_single_pack(vs);
#ifdef DEBUG
pr_debug("VFP: %s: d(s%d)=%08x exceptions=%08x\n", func,
sd, d, exceptions);
#endif
vfp_put_float(d, sd);
}
return exceptions;
}
/*
* Propagate the NaN, setting exceptions if it is signalling.
* 'n' is always a NaN. 'm' may be a number, NaN or infinity.
*/
static u32
vfp_propagate_nan(struct vfp_single *vsd, struct vfp_single *vsn,
struct vfp_single *vsm, u32 fpscr)
{
struct vfp_single *nan;
int tn, tm = 0;
tn = vfp_single_type(vsn);
if (vsm)
tm = vfp_single_type(vsm);
if (fpscr & FPSCR_DEFAULT_NAN)
/*
* Default NaN mode - always returns a quiet NaN
*/
nan = &vfp_single_default_qnan;
else {
/*
* Contemporary mode - select the first signalling
* NAN, or if neither are signalling, the first
* quiet NAN.
*/
if (tn == VFP_SNAN || (tm != VFP_SNAN && tn == VFP_QNAN))
nan = vsn;
else
nan = vsm;
/*
* Make the NaN quiet.
*/
nan->significand |= VFP_SINGLE_SIGNIFICAND_QNAN;
}
*vsd = *nan;
/*
* If one was a signalling NAN, raise invalid operation.
*/
return tn == VFP_SNAN || tm == VFP_SNAN ? FPSCR_IOC : VFP_NAN_FLAG;
}
/*
* Extended operations
*/
static u32 vfp_single_fabs(int sd, int unused, s32 m, u32 fpscr)
{
vfp_put_float(vfp_single_packed_abs(m), sd);
return 0;
}
static u32 vfp_single_fcpy(int sd, int unused, s32 m, u32 fpscr)
{
vfp_put_float(m, sd);
return 0;
}
static u32 vfp_single_fneg(int sd, int unused, s32 m, u32 fpscr)
{
vfp_put_float(vfp_single_packed_negate(m), sd);
return 0;
}
static const u16 sqrt_oddadjust[] = {
0x0004, 0x0022, 0x005d, 0x00b1, 0x011d, 0x019f, 0x0236, 0x02e0,
0x039c, 0x0468, 0x0545, 0x0631, 0x072b, 0x0832, 0x0946, 0x0a67
};
static const u16 sqrt_evenadjust[] = {
0x0a2d, 0x08af, 0x075a, 0x0629, 0x051a, 0x0429, 0x0356, 0x029e,
0x0200, 0x0179, 0x0109, 0x00af, 0x0068, 0x0034, 0x0012, 0x0002
};
u32 vfp_estimate_sqrt_significand(u32 exponent, u32 significand)
{
int index;
u32 z, a;
if ((significand & 0xc0000000) != 0x40000000) {
pr_warn("VFP: estimate_sqrt: invalid significand\n");
}
a = significand << 1;
index = (a >> 27) & 15;
if (exponent & 1) {
z = 0x4000 + (a >> 17) - sqrt_oddadjust[index];
z = ((a / z) << 14) + (z << 15);
a >>= 1;
} else {
z = 0x8000 + (a >> 17) - sqrt_evenadjust[index];
z = a / z + z;
z = (z >= 0x20000) ? 0xffff8000 : (z << 15);
if (z <= a)
return (s32)a >> 1;
}
{
u64 v = (u64)a << 31;
do_div(v, z);
return v + (z >> 1);
}
}
static u32 vfp_single_fsqrt(int sd, int unused, s32 m, u32 fpscr)
{
struct vfp_single vsm, vsd;
int ret, tm;
vfp_single_unpack(&vsm, m);
tm = vfp_single_type(&vsm);
if (tm & (VFP_NAN|VFP_INFINITY)) {
struct vfp_single *vsp = &vsd;
if (tm & VFP_NAN)
ret = vfp_propagate_nan(vsp, &vsm, NULL, fpscr);
else if (vsm.sign == 0) {
sqrt_copy:
vsp = &vsm;
ret = 0;
} else {
sqrt_invalid:
vsp = &vfp_single_default_qnan;
ret = FPSCR_IOC;
}
vfp_put_float(vfp_single_pack(vsp), sd);
return ret;
}
/*
* sqrt(+/- 0) == +/- 0
*/
if (tm & VFP_ZERO)
goto sqrt_copy;
/*
* Normalise a denormalised number
*/
if (tm & VFP_DENORMAL)
vfp_single_normalise_denormal(&vsm);
/*
* sqrt(<0) = invalid
*/
if (vsm.sign)
goto sqrt_invalid;
vfp_single_dump("sqrt", &vsm);
/*
* Estimate the square root.
*/
vsd.sign = 0;
vsd.exponent = ((vsm.exponent - 127) >> 1) + 127;
vsd.significand = vfp_estimate_sqrt_significand(vsm.exponent, vsm.significand) + 2;
vfp_single_dump("sqrt estimate", &vsd);
/*
* And now adjust.
*/
if ((vsd.significand & VFP_SINGLE_LOW_BITS_MASK) <= 5) {
if (vsd.significand < 2) {
vsd.significand = 0xffffffff;
} else {
u64 term;
s64 rem;
vsm.significand <<= !(vsm.exponent & 1);
term = (u64)vsd.significand * vsd.significand;
rem = ((u64)vsm.significand << 32) - term;
pr_debug("VFP: term=%016llx rem=%016llx\n", term, rem);
while (rem < 0) {
vsd.significand -= 1;
rem += ((u64)vsd.significand << 1) | 1;
}
vsd.significand |= rem != 0;
}
}
vsd.significand = vfp_shiftright32jamming(vsd.significand, 1);
return vfp_single_normaliseround(sd, &vsd, fpscr, 0, "fsqrt");
}
/*
* Equal := ZC
* Less than := N
* Greater than := C
* Unordered := CV
*/
static u32 vfp_compare(int sd, int signal_on_qnan, s32 m, u32 fpscr)
{
s32 d;
u32 ret = 0;
d = vfp_get_float(sd);
if (vfp_single_packed_exponent(m) == 255 && vfp_single_packed_mantissa(m)) {
ret |= FPSCR_C | FPSCR_V;
if (signal_on_qnan || !(vfp_single_packed_mantissa(m) & (1 << (VFP_SINGLE_MANTISSA_BITS - 1))))
/*
* Signalling NaN, or signalling on quiet NaN
*/
ret |= FPSCR_IOC;
}
if (vfp_single_packed_exponent(d) == 255 && vfp_single_packed_mantissa(d)) {
ret |= FPSCR_C | FPSCR_V;
if (signal_on_qnan || !(vfp_single_packed_mantissa(d) & (1 << (VFP_SINGLE_MANTISSA_BITS - 1))))
/*
* Signalling NaN, or signalling on quiet NaN
*/
ret |= FPSCR_IOC;
}
if (ret == 0) {
if (d == m || vfp_single_packed_abs(d | m) == 0) {
/*
* equal
*/
ret |= FPSCR_Z | FPSCR_C;
} else if (vfp_single_packed_sign(d ^ m)) {
/*
* different signs
*/
if (vfp_single_packed_sign(d))
/*
* d is negative, so d < m
*/
ret |= FPSCR_N;
else
/*
* d is positive, so d > m
*/
ret |= FPSCR_C;
} else if ((vfp_single_packed_sign(d) != 0) ^ (d < m)) {
/*
* d < m
*/
ret |= FPSCR_N;
} else if ((vfp_single_packed_sign(d) != 0) ^ (d > m)) {
/*
* d > m
*/
ret |= FPSCR_C;
}
}
return ret;
}
static u32 vfp_single_fcmp(int sd, int unused, s32 m, u32 fpscr)
{
return vfp_compare(sd, 0, m, fpscr);
}
static u32 vfp_single_fcmpe(int sd, int unused, s32 m, u32 fpscr)
{
return vfp_compare(sd, 1, m, fpscr);
}
static u32 vfp_single_fcmpz(int sd, int unused, s32 m, u32 fpscr)
{
return vfp_compare(sd, 0, 0, fpscr);
}
static u32 vfp_single_fcmpez(int sd, int unused, s32 m, u32 fpscr)
{
return vfp_compare(sd, 1, 0, fpscr);
}
static u32 vfp_single_fcvtd(int dd, int unused, s32 m, u32 fpscr)
{
struct vfp_single vsm;
struct vfp_double vdd;
int tm;
u32 exceptions = 0;
vfp_single_unpack(&vsm, m);
tm = vfp_single_type(&vsm);
/*
* If we have a signalling NaN, signal invalid operation.
*/
if (tm == VFP_SNAN)
exceptions = FPSCR_IOC;
if (tm & VFP_DENORMAL)
vfp_single_normalise_denormal(&vsm);
vdd.sign = vsm.sign;
vdd.significand = (u64)vsm.significand << 32;
/*
* If we have an infinity or NaN, the exponent must be 2047.
*/
if (tm & (VFP_INFINITY|VFP_NAN)) {
vdd.exponent = 2047;
if (tm == VFP_QNAN)
vdd.significand |= VFP_DOUBLE_SIGNIFICAND_QNAN;
goto pack_nan;
} else if (tm & VFP_ZERO)
vdd.exponent = 0;
else
vdd.exponent = vsm.exponent + (1023 - 127);
return vfp_double_normaliseround(dd, &vdd, fpscr, exceptions, "fcvtd");
pack_nan:
vfp_put_double(vfp_double_pack(&vdd), dd);
return exceptions;
}
static u32 vfp_single_fuito(int sd, int unused, s32 m, u32 fpscr)
{
struct vfp_single vs;
vs.sign = 0;
vs.exponent = 127 + 31 - 1;
vs.significand = (u32)m;
return vfp_single_normaliseround(sd, &vs, fpscr, 0, "fuito");
}
static u32 vfp_single_fsito(int sd, int unused, s32 m, u32 fpscr)
{
struct vfp_single vs;
vs.sign = (m & 0x80000000) >> 16;
vs.exponent = 127 + 31 - 1;
vs.significand = vs.sign ? -m : m;
return vfp_single_normaliseround(sd, &vs, fpscr, 0, "fsito");
}
static u32 vfp_single_ftoui(int sd, int unused, s32 m, u32 fpscr)
{
struct vfp_single vsm;
u32 d, exceptions = 0;
int rmode = fpscr & FPSCR_RMODE_MASK;
int tm;
vfp_single_unpack(&vsm, m);
vfp_single_dump("VSM", &vsm);
/*
* Do we have a denormalised number?
*/
tm = vfp_single_type(&vsm);
if (tm & VFP_DENORMAL)
exceptions |= FPSCR_IDC;
if (tm & VFP_NAN)
vsm.sign = 0;
if (vsm.exponent >= 127 + 32) {
d = vsm.sign ? 0 : 0xffffffff;
exceptions = FPSCR_IOC;
} else if (vsm.exponent >= 127 - 1) {
int shift = 127 + 31 - vsm.exponent;
u32 rem, incr = 0;
/*
* 2^0 <= m < 2^32-2^8
*/
d = (vsm.significand << 1) >> shift;
rem = vsm.significand << (33 - shift);
if (rmode == FPSCR_ROUND_NEAREST) {
incr = 0x80000000;
if ((d & 1) == 0)
incr -= 1;
} else if (rmode == FPSCR_ROUND_TOZERO) {
incr = 0;
} else if ((rmode == FPSCR_ROUND_PLUSINF) ^ (vsm.sign != 0)) {
incr = ~0;
}
if ((rem + incr) < rem) {
if (d < 0xffffffff)
d += 1;
else
exceptions |= FPSCR_IOC;
}
if (d && vsm.sign) {
d = 0;
exceptions |= FPSCR_IOC;
} else if (rem)
exceptions |= FPSCR_IXC;
} else {
d = 0;
if (vsm.exponent | vsm.significand) {
exceptions |= FPSCR_IXC;
if (rmode == FPSCR_ROUND_PLUSINF && vsm.sign == 0)
d = 1;
else if (rmode == FPSCR_ROUND_MINUSINF && vsm.sign) {
d = 0;
exceptions |= FPSCR_IOC;
}
}
}
pr_debug("VFP: ftoui: d(s%d)=%08x exceptions=%08x\n", sd, d, exceptions);
vfp_put_float(d, sd);
return exceptions;
}
static u32 vfp_single_ftouiz(int sd, int unused, s32 m, u32 fpscr)
{
return vfp_single_ftoui(sd, unused, m, FPSCR_ROUND_TOZERO);
}
static u32 vfp_single_ftosi(int sd, int unused, s32 m, u32 fpscr)
{
struct vfp_single vsm;
u32 d, exceptions = 0;
int rmode = fpscr & FPSCR_RMODE_MASK;
int tm;
vfp_single_unpack(&vsm, m);
vfp_single_dump("VSM", &vsm);
/*
* Do we have a denormalised number?
*/
tm = vfp_single_type(&vsm);
if (vfp_single_type(&vsm) & VFP_DENORMAL)
exceptions |= FPSCR_IDC;
if (tm & VFP_NAN) {
d = 0;
exceptions |= FPSCR_IOC;
} else if (vsm.exponent >= 127 + 32) {
/*
* m >= 2^31-2^7: invalid
*/
d = 0x7fffffff;
if (vsm.sign)
d = ~d;
exceptions |= FPSCR_IOC;
} else if (vsm.exponent >= 127 - 1) {
int shift = 127 + 31 - vsm.exponent;
u32 rem, incr = 0;
/* 2^0 <= m <= 2^31-2^7 */
d = (vsm.significand << 1) >> shift;
rem = vsm.significand << (33 - shift);
if (rmode == FPSCR_ROUND_NEAREST) {
incr = 0x80000000;
if ((d & 1) == 0)
incr -= 1;
} else if (rmode == FPSCR_ROUND_TOZERO) {
incr = 0;
} else if ((rmode == FPSCR_ROUND_PLUSINF) ^ (vsm.sign != 0)) {
incr = ~0;
}
if ((rem + incr) < rem && d < 0xffffffff)
d += 1;
if (d > 0x7fffffff + (vsm.sign != 0)) {
d = 0x7fffffff + (vsm.sign != 0);
exceptions |= FPSCR_IOC;
} else if (rem)
exceptions |= FPSCR_IXC;
if (vsm.sign)
d = -d;
} else {
d = 0;
if (vsm.exponent | vsm.significand) {
exceptions |= FPSCR_IXC;
if (rmode == FPSCR_ROUND_PLUSINF && vsm.sign == 0)
d = 1;
else if (rmode == FPSCR_ROUND_MINUSINF && vsm.sign)
d = -1;
}
}
pr_debug("VFP: ftosi: d(s%d)=%08x exceptions=%08x\n", sd, d, exceptions);
vfp_put_float((s32)d, sd);
return exceptions;
}
static u32 vfp_single_ftosiz(int sd, int unused, s32 m, u32 fpscr)
{
return vfp_single_ftosi(sd, unused, m, FPSCR_ROUND_TOZERO);
}
static struct op fops_ext[32] = {
[FEXT_TO_IDX(FEXT_FCPY)] = { vfp_single_fcpy, 0 },
[FEXT_TO_IDX(FEXT_FABS)] = { vfp_single_fabs, 0 },
[FEXT_TO_IDX(FEXT_FNEG)] = { vfp_single_fneg, 0 },
[FEXT_TO_IDX(FEXT_FSQRT)] = { vfp_single_fsqrt, 0 },
[FEXT_TO_IDX(FEXT_FCMP)] = { vfp_single_fcmp, OP_SCALAR },
[FEXT_TO_IDX(FEXT_FCMPE)] = { vfp_single_fcmpe, OP_SCALAR },
[FEXT_TO_IDX(FEXT_FCMPZ)] = { vfp_single_fcmpz, OP_SCALAR },
[FEXT_TO_IDX(FEXT_FCMPEZ)] = { vfp_single_fcmpez, OP_SCALAR },
[FEXT_TO_IDX(FEXT_FCVT)] = { vfp_single_fcvtd, OP_SCALAR|OP_DD },
[FEXT_TO_IDX(FEXT_FUITO)] = { vfp_single_fuito, OP_SCALAR },
[FEXT_TO_IDX(FEXT_FSITO)] = { vfp_single_fsito, OP_SCALAR },
[FEXT_TO_IDX(FEXT_FTOUI)] = { vfp_single_ftoui, OP_SCALAR },
[FEXT_TO_IDX(FEXT_FTOUIZ)] = { vfp_single_ftouiz, OP_SCALAR },
[FEXT_TO_IDX(FEXT_FTOSI)] = { vfp_single_ftosi, OP_SCALAR },
[FEXT_TO_IDX(FEXT_FTOSIZ)] = { vfp_single_ftosiz, OP_SCALAR },
};
static u32
vfp_single_fadd_nonnumber(struct vfp_single *vsd, struct vfp_single *vsn,
struct vfp_single *vsm, u32 fpscr)
{
struct vfp_single *vsp;
u32 exceptions = 0;
int tn, tm;
tn = vfp_single_type(vsn);
tm = vfp_single_type(vsm);
if (tn & tm & VFP_INFINITY) {
/*
* Two infinities. Are they different signs?
*/
if (vsn->sign ^ vsm->sign) {
/*
* different signs -> invalid
*/
exceptions = FPSCR_IOC;
vsp = &vfp_single_default_qnan;
} else {
/*
* same signs -> valid
*/
vsp = vsn;
}
} else if (tn & VFP_INFINITY && tm & VFP_NUMBER) {
/*
* One infinity and one number -> infinity
*/
vsp = vsn;
} else {
/*
* 'n' is a NaN of some type
*/
return vfp_propagate_nan(vsd, vsn, vsm, fpscr);
}
*vsd = *vsp;
return exceptions;
}
static u32
vfp_single_add(struct vfp_single *vsd, struct vfp_single *vsn,
struct vfp_single *vsm, u32 fpscr)
{
u32 exp_diff, m_sig;
if (vsn->significand & 0x80000000 ||
vsm->significand & 0x80000000) {
pr_info("VFP: bad FP values in %s\n", __func__);
vfp_single_dump("VSN", vsn);
vfp_single_dump("VSM", vsm);
}
/*
* Ensure that 'n' is the largest magnitude number. Note that
* if 'n' and 'm' have equal exponents, we do not swap them.
* This ensures that NaN propagation works correctly.
*/
if (vsn->exponent < vsm->exponent) {
struct vfp_single *t = vsn;
vsn = vsm;
vsm = t;
}
/*
* Is 'n' an infinity or a NaN? Note that 'm' may be a number,
* infinity or a NaN here.
*/
if (vsn->exponent == 255)
return vfp_single_fadd_nonnumber(vsd, vsn, vsm, fpscr);
/*
* We have two proper numbers, where 'vsn' is the larger magnitude.
*
* Copy 'n' to 'd' before doing the arithmetic.
*/
*vsd = *vsn;
/*
* Align both numbers.
*/
exp_diff = vsn->exponent - vsm->exponent;
m_sig = vfp_shiftright32jamming(vsm->significand, exp_diff);
/*
* If the signs are different, we are really subtracting.
*/
if (vsn->sign ^ vsm->sign) {
m_sig = vsn->significand - m_sig;
if ((s32)m_sig < 0) {
vsd->sign = vfp_sign_negate(vsd->sign);
m_sig = -m_sig;
} else if (m_sig == 0) {
vsd->sign = (fpscr & FPSCR_RMODE_MASK) ==
FPSCR_ROUND_MINUSINF ? 0x8000 : 0;
}
} else {
m_sig = vsn->significand + m_sig;
}
vsd->significand = m_sig;
return 0;
}
static u32
vfp_single_multiply(struct vfp_single *vsd, struct vfp_single *vsn, struct vfp_single *vsm, u32 fpscr)
{
vfp_single_dump("VSN", vsn);
vfp_single_dump("VSM", vsm);
/*
* Ensure that 'n' is the largest magnitude number. Note that
* if 'n' and 'm' have equal exponents, we do not swap them.
* This ensures that NaN propagation works correctly.
*/
if (vsn->exponent < vsm->exponent) {
struct vfp_single *t = vsn;
vsn = vsm;
vsm = t;
pr_debug("VFP: swapping M <-> N\n");
}
vsd->sign = vsn->sign ^ vsm->sign;
/*
* If 'n' is an infinity or NaN, handle it. 'm' may be anything.
*/
if (vsn->exponent == 255) {
if (vsn->significand || (vsm->exponent == 255 && vsm->significand))
return vfp_propagate_nan(vsd, vsn, vsm, fpscr);
if ((vsm->exponent | vsm->significand) == 0) {
*vsd = vfp_single_default_qnan;
return FPSCR_IOC;
}
vsd->exponent = vsn->exponent;
vsd->significand = 0;
return 0;
}
/*
* If 'm' is zero, the result is always zero. In this case,
* 'n' may be zero or a number, but it doesn't matter which.
*/
if ((vsm->exponent | vsm->significand) == 0) {
vsd->exponent = 0;
vsd->significand = 0;
return 0;
}
/*
* We add 2 to the destination exponent for the same reason as
* the addition case - though this time we have +1 from each
* input operand.
*/
vsd->exponent = vsn->exponent + vsm->exponent - 127 + 2;
vsd->significand = vfp_hi64to32jamming((u64)vsn->significand * vsm->significand);
vfp_single_dump("VSD", vsd);
return 0;
}
#define NEG_MULTIPLY (1 << 0)
#define NEG_SUBTRACT (1 << 1)
static u32
vfp_single_multiply_accumulate(int sd, int sn, s32 m, u32 fpscr, u32 negate, char *func)
{
struct vfp_single vsd, vsp, vsn, vsm;
u32 exceptions;
s32 v;
v = vfp_get_float(sn);
pr_debug("VFP: s%u = %08x\n", sn, v);
vfp_single_unpack(&vsn, v);
if (vsn.exponent == 0 && vsn.significand)
vfp_single_normalise_denormal(&vsn);
vfp_single_unpack(&vsm, m);
if (vsm.exponent == 0 && vsm.significand)
vfp_single_normalise_denormal(&vsm);
exceptions = vfp_single_multiply(&vsp, &vsn, &vsm, fpscr);
if (negate & NEG_MULTIPLY)
vsp.sign = vfp_sign_negate(vsp.sign);
v = vfp_get_float(sd);
pr_debug("VFP: s%u = %08x\n", sd, v);
vfp_single_unpack(&vsn, v);
if (vsn.exponent == 0 && vsn.significand)
vfp_single_normalise_denormal(&vsn);
if (negate & NEG_SUBTRACT)
vsn.sign = vfp_sign_negate(vsn.sign);
exceptions |= vfp_single_add(&vsd, &vsn, &vsp, fpscr);
return vfp_single_normaliseround(sd, &vsd, fpscr, exceptions, func);
}
/*
* Standard operations
*/
/*
* sd = sd + (sn * sm)
*/
static u32 vfp_single_fmac(int sd, int sn, s32 m, u32 fpscr)
{
return vfp_single_multiply_accumulate(sd, sn, m, fpscr, 0, "fmac");
}
/*
* sd = sd - (sn * sm)
*/
static u32 vfp_single_fnmac(int sd, int sn, s32 m, u32 fpscr)
{
return vfp_single_multiply_accumulate(sd, sn, m, fpscr, NEG_MULTIPLY, "fnmac");
}
/*
* sd = -sd + (sn * sm)
*/
static u32 vfp_single_fmsc(int sd, int sn, s32 m, u32 fpscr)
{
return vfp_single_multiply_accumulate(sd, sn, m, fpscr, NEG_SUBTRACT, "fmsc");
}
/*
* sd = -sd - (sn * sm)
*/
static u32 vfp_single_fnmsc(int sd, int sn, s32 m, u32 fpscr)
{
return vfp_single_multiply_accumulate(sd, sn, m, fpscr, NEG_SUBTRACT | NEG_MULTIPLY, "fnmsc");
}
/*
* sd = sn * sm
*/
static u32 vfp_single_fmul(int sd, int sn, s32 m, u32 fpscr)
{
struct vfp_single vsd, vsn, vsm;
u32 exceptions;
s32 n = vfp_get_float(sn);
pr_debug("VFP: s%u = %08x\n", sn, n);
vfp_single_unpack(&vsn, n);
if (vsn.exponent == 0 && vsn.significand)
vfp_single_normalise_denormal(&vsn);
vfp_single_unpack(&vsm, m);
if (vsm.exponent == 0 && vsm.significand)
vfp_single_normalise_denormal(&vsm);
exceptions = vfp_single_multiply(&vsd, &vsn, &vsm, fpscr);
return vfp_single_normaliseround(sd, &vsd, fpscr, exceptions, "fmul");
}
/*
* sd = -(sn * sm)
*/
static u32 vfp_single_fnmul(int sd, int sn, s32 m, u32 fpscr)
{
struct vfp_single vsd, vsn, vsm;
u32 exceptions;
s32 n = vfp_get_float(sn);
pr_debug("VFP: s%u = %08x\n", sn, n);
vfp_single_unpack(&vsn, n);
if (vsn.exponent == 0 && vsn.significand)
vfp_single_normalise_denormal(&vsn);
vfp_single_unpack(&vsm, m);
if (vsm.exponent == 0 && vsm.significand)
vfp_single_normalise_denormal(&vsm);
exceptions = vfp_single_multiply(&vsd, &vsn, &vsm, fpscr);
vsd.sign = vfp_sign_negate(vsd.sign);
return vfp_single_normaliseround(sd, &vsd, fpscr, exceptions, "fnmul");
}
/*
* sd = sn + sm
*/
static u32 vfp_single_fadd(int sd, int sn, s32 m, u32 fpscr)
{
struct vfp_single vsd, vsn, vsm;
u32 exceptions;
s32 n = vfp_get_float(sn);
pr_debug("VFP: s%u = %08x\n", sn, n);
/*
* Unpack and normalise denormals.
*/
vfp_single_unpack(&vsn, n);
if (vsn.exponent == 0 && vsn.significand)
vfp_single_normalise_denormal(&vsn);
vfp_single_unpack(&vsm, m);
if (vsm.exponent == 0 && vsm.significand)
vfp_single_normalise_denormal(&vsm);
exceptions = vfp_single_add(&vsd, &vsn, &vsm, fpscr);
return vfp_single_normaliseround(sd, &vsd, fpscr, exceptions, "fadd");
}
/*
* sd = sn - sm
*/
static u32 vfp_single_fsub(int sd, int sn, s32 m, u32 fpscr)
{
/*
* Subtraction is addition with one sign inverted.
*/
return vfp_single_fadd(sd, sn, vfp_single_packed_negate(m), fpscr);
}
/*
* sd = sn / sm
*/
static u32 vfp_single_fdiv(int sd, int sn, s32 m, u32 fpscr)
{
struct vfp_single vsd, vsn, vsm;
u32 exceptions = 0;
s32 n = vfp_get_float(sn);
int tm, tn;
pr_debug("VFP: s%u = %08x\n", sn, n);
vfp_single_unpack(&vsn, n);
vfp_single_unpack(&vsm, m);
vsd.sign = vsn.sign ^ vsm.sign;
tn = vfp_single_type(&vsn);
tm = vfp_single_type(&vsm);
/*
* Is n a NAN?
*/
if (tn & VFP_NAN)
goto vsn_nan;
/*
* Is m a NAN?
*/
if (tm & VFP_NAN)
goto vsm_nan;
/*
* If n and m are infinity, the result is invalid
* If n and m are zero, the result is invalid
*/
if (tm & tn & (VFP_INFINITY|VFP_ZERO))
goto invalid;
/*
* If n is infinity, the result is infinity
*/
if (tn & VFP_INFINITY)
goto infinity;
/*
* If m is zero, raise div0 exception
*/
if (tm & VFP_ZERO)
goto divzero;
/*
* If m is infinity, or n is zero, the result is zero
*/
if (tm & VFP_INFINITY || tn & VFP_ZERO)
goto zero;
if (tn & VFP_DENORMAL)
vfp_single_normalise_denormal(&vsn);
if (tm & VFP_DENORMAL)
vfp_single_normalise_denormal(&vsm);
/*
* Ok, we have two numbers, we can perform division.
*/
vsd.exponent = vsn.exponent - vsm.exponent + 127 - 1;
vsm.significand <<= 1;
if (vsm.significand <= (2 * vsn.significand)) {
vsn.significand >>= 1;
vsd.exponent++;
}
{
u64 significand = (u64)vsn.significand << 32;
do_div(significand, vsm.significand);
vsd.significand = significand;
}
if ((vsd.significand & 0x3f) == 0)
vsd.significand |= ((u64)vsm.significand * vsd.significand != (u64)vsn.significand << 32);
return vfp_single_normaliseround(sd, &vsd, fpscr, 0, "fdiv");
vsn_nan:
exceptions = vfp_propagate_nan(&vsd, &vsn, &vsm, fpscr);
pack:
vfp_put_float(vfp_single_pack(&vsd), sd);
return exceptions;
vsm_nan:
exceptions = vfp_propagate_nan(&vsd, &vsm, &vsn, fpscr);
goto pack;
zero:
vsd.exponent = 0;
vsd.significand = 0;
goto pack;
divzero:
exceptions = FPSCR_DZC;
infinity:
vsd.exponent = 255;
vsd.significand = 0;
goto pack;
invalid:
vfp_put_float(vfp_single_pack(&vfp_single_default_qnan), sd);
return FPSCR_IOC;
}
static struct op fops[16] = {
[FOP_TO_IDX(FOP_FMAC)] = { vfp_single_fmac, 0 },
[FOP_TO_IDX(FOP_FNMAC)] = { vfp_single_fnmac, 0 },
[FOP_TO_IDX(FOP_FMSC)] = { vfp_single_fmsc, 0 },
[FOP_TO_IDX(FOP_FNMSC)] = { vfp_single_fnmsc, 0 },
[FOP_TO_IDX(FOP_FMUL)] = { vfp_single_fmul, 0 },
[FOP_TO_IDX(FOP_FNMUL)] = { vfp_single_fnmul, 0 },
[FOP_TO_IDX(FOP_FADD)] = { vfp_single_fadd, 0 },
[FOP_TO_IDX(FOP_FSUB)] = { vfp_single_fsub, 0 },
[FOP_TO_IDX(FOP_FDIV)] = { vfp_single_fdiv, 0 },
};
#define FREG_BANK(x) ((x) & 0x18)
#define FREG_IDX(x) ((x) & 7)
u32 vfp_single_cpdo(u32 inst, u32 fpscr)
{
u32 op = inst & FOP_MASK;
u32 exceptions = 0;
unsigned int dest;
unsigned int sn = vfp_get_sn(inst);
unsigned int sm = vfp_get_sm(inst);
unsigned int vecitr, veclen, vecstride;
struct op *fop;
vecstride = 1 + ((fpscr & FPSCR_STRIDE_MASK) == FPSCR_STRIDE_MASK);
fop = (op == FOP_EXT) ? &fops_ext[FEXT_TO_IDX(inst)] : &fops[FOP_TO_IDX(op)];
/*
* fcvtsd takes a dN register number as destination, not sN.
* Technically, if bit 0 of dd is set, this is an invalid
* instruction. However, we ignore this for efficiency.
* It also only operates on scalars.
*/
if (fop->flags & OP_DD)
dest = vfp_get_dd(inst);
else
dest = vfp_get_sd(inst);
/*
* If destination bank is zero, vector length is always '1'.
* ARM DDI0100F C5.1.3, C5.3.2.
*/
if ((fop->flags & OP_SCALAR) || FREG_BANK(dest) == 0)
veclen = 0;
else
veclen = fpscr & FPSCR_LENGTH_MASK;
pr_debug("VFP: vecstride=%u veclen=%u\n", vecstride,
(veclen >> FPSCR_LENGTH_BIT) + 1);
if (!fop->fn)
goto invalid;
for (vecitr = 0; vecitr <= veclen; vecitr += 1 << FPSCR_LENGTH_BIT) {
s32 m = vfp_get_float(sm);
u32 except;
char type;
type = fop->flags & OP_DD ? 'd' : 's';
if (op == FOP_EXT)
pr_debug("VFP: itr%d (%c%u) = op[%u] (s%u=%08x)\n",
vecitr >> FPSCR_LENGTH_BIT, type, dest, sn,
sm, m);
else
pr_debug("VFP: itr%d (%c%u) = (s%u) op[%u] (s%u=%08x)\n",
vecitr >> FPSCR_LENGTH_BIT, type, dest, sn,
FOP_TO_IDX(op), sm, m);
except = fop->fn(dest, sn, m, fpscr);
pr_debug("VFP: itr%d: exceptions=%08x\n",
vecitr >> FPSCR_LENGTH_BIT, except);
exceptions |= except;
/*
* CHECK: It appears to be undefined whether we stop when
* we encounter an exception. We continue.
*/
dest = FREG_BANK(dest) + ((FREG_IDX(dest) + vecstride) & 7);
sn = FREG_BANK(sn) + ((FREG_IDX(sn) + vecstride) & 7);
if (FREG_BANK(sm) != 0)
sm = FREG_BANK(sm) + ((FREG_IDX(sm) + vecstride) & 7);
}
return exceptions;
invalid:
return (u32)-1;
}
| linux-master | arch/arm/vfp/vfpsingle.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* arch/arm/mach-vt8500/vt8500.c
*
* Copyright (C) 2012 Tony Prisk <[email protected]>
*/
#include <linux/io.h>
#include <linux/pm.h>
#include <linux/reboot.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/time.h>
#include <asm/mach/map.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#define LEGACY_GPIO_BASE 0xD8110000
#define LEGACY_PMC_BASE 0xD8130000
/* Registers in GPIO Controller */
#define VT8500_GPIO_MUX_REG 0x200
/* Registers in Power Management Controller */
#define VT8500_HCR_REG 0x12
#define VT8500_PMSR_REG 0x60
static void __iomem *pmc_base;
static void vt8500_restart(enum reboot_mode mode, const char *cmd)
{
if (pmc_base)
writel(1, pmc_base + VT8500_PMSR_REG);
}
static struct map_desc vt8500_io_desc[] __initdata = {
/* SoC MMIO registers */
[0] = {
.virtual = 0xf8000000,
.pfn = __phys_to_pfn(0xd8000000),
.length = 0x00390000, /* max of all chip variants */
.type = MT_DEVICE
},
};
static void __init vt8500_map_io(void)
{
iotable_init(vt8500_io_desc, ARRAY_SIZE(vt8500_io_desc));
}
static void vt8500_power_off(void)
{
local_irq_disable();
writew(5, pmc_base + VT8500_HCR_REG);
asm("mcr p15, 0, %0, c7, c0, 4" : : "r" (0));
}
static void __init vt8500_init(void)
{
struct device_node *np;
#if defined(CONFIG_FB_VT8500) || defined(CONFIG_FB_WM8505)
struct device_node *fb;
void __iomem *gpio_base;
#endif
#ifdef CONFIG_FB_VT8500
fb = of_find_compatible_node(NULL, NULL, "via,vt8500-fb");
if (fb) {
np = of_find_compatible_node(NULL, NULL, "via,vt8500-gpio");
if (np) {
gpio_base = of_iomap(np, 0);
if (!gpio_base)
pr_err("%s: of_iomap(gpio_mux) failed\n",
__func__);
of_node_put(np);
} else {
gpio_base = ioremap(LEGACY_GPIO_BASE, 0x1000);
if (!gpio_base)
pr_err("%s: ioremap(legacy_gpio_mux) failed\n",
__func__);
}
if (gpio_base) {
writel(readl(gpio_base + VT8500_GPIO_MUX_REG) | 1,
gpio_base + VT8500_GPIO_MUX_REG);
iounmap(gpio_base);
} else
pr_err("%s: Could not remap GPIO mux\n", __func__);
of_node_put(fb);
}
#endif
#ifdef CONFIG_FB_WM8505
fb = of_find_compatible_node(NULL, NULL, "wm,wm8505-fb");
if (fb) {
np = of_find_compatible_node(NULL, NULL, "wm,wm8505-gpio");
if (!np)
np = of_find_compatible_node(NULL, NULL,
"wm,wm8650-gpio");
if (np) {
gpio_base = of_iomap(np, 0);
if (!gpio_base)
pr_err("%s: of_iomap(gpio_mux) failed\n",
__func__);
of_node_put(np);
} else {
gpio_base = ioremap(LEGACY_GPIO_BASE, 0x1000);
if (!gpio_base)
pr_err("%s: ioremap(legacy_gpio_mux) failed\n",
__func__);
}
if (gpio_base) {
writel(readl(gpio_base + VT8500_GPIO_MUX_REG) |
0x80000000, gpio_base + VT8500_GPIO_MUX_REG);
iounmap(gpio_base);
} else
pr_err("%s: Could not remap GPIO mux\n", __func__);
of_node_put(fb);
}
#endif
np = of_find_compatible_node(NULL, NULL, "via,vt8500-pmc");
if (np) {
pmc_base = of_iomap(np, 0);
if (!pmc_base)
pr_err("%s:of_iomap(pmc) failed\n", __func__);
of_node_put(np);
} else {
pmc_base = ioremap(LEGACY_PMC_BASE, 0x1000);
if (!pmc_base)
pr_err("%s:ioremap(power_off) failed\n", __func__);
}
if (pmc_base)
pm_power_off = &vt8500_power_off;
else
pr_err("%s: PMC Hibernation register could not be remapped, not enabling power off!\n", __func__);
}
static const char * const vt8500_dt_compat[] = {
"via,vt8500",
"wm,wm8650",
"wm,wm8505",
"wm,wm8750",
"wm,wm8850",
NULL
};
DT_MACHINE_START(WMT_DT, "VIA/Wondermedia SoC (Device Tree Support)")
.dt_compat = vt8500_dt_compat,
.map_io = vt8500_map_io,
.init_machine = vt8500_init,
.restart = vt8500_restart,
MACHINE_END
| linux-master | arch/arm/mach-vt8500/vt8500.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2022 Hewlett-Packard Enterprise Development Company, L.P. */
#include <asm/mach/arch.h>
static const char * const gxp_board_dt_compat[] = {
"hpe,gxp",
NULL,
};
DT_MACHINE_START(GXP_DT, "HPE GXP")
.dt_compat = gxp_board_dt_compat,
.l2c_aux_val = 0,
.l2c_aux_mask = ~0,
MACHINE_END
| linux-master | arch/arm/mach-hpe/gxp.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/arch/arm/mach-sa1100/ssp.c
*
* Copyright (C) 2003 Russell King.
*
* Generic SSP driver. This provides the generic core for simple
* IO-based SSP applications.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/io.h>
#include <mach/hardware.h>
#include <mach/irqs.h>
#include <asm/hardware/ssp.h>
#define TIMEOUT 100000
static irqreturn_t ssp_interrupt(int irq, void *dev_id)
{
unsigned int status = Ser4SSSR;
if (status & SSSR_ROR)
printk(KERN_WARNING "SSP: receiver overrun\n");
Ser4SSSR = SSSR_ROR;
return status ? IRQ_HANDLED : IRQ_NONE;
}
/**
* ssp_write_word - write a word to the SSP port
* @data: 16-bit, MSB justified data to write.
*
* Wait for a free entry in the SSP transmit FIFO, and write a data
* word to the SSP port. Wait for the SSP port to start sending
* the data.
*
* The caller is expected to perform the necessary locking.
*
* Returns:
* %-ETIMEDOUT timeout occurred
* 0 success
*/
int ssp_write_word(u16 data)
{
int timeout = TIMEOUT;
while (!(Ser4SSSR & SSSR_TNF)) {
if (!--timeout)
return -ETIMEDOUT;
cpu_relax();
}
Ser4SSDR = data;
timeout = TIMEOUT;
while (!(Ser4SSSR & SSSR_BSY)) {
if (!--timeout)
return -ETIMEDOUT;
cpu_relax();
}
return 0;
}
/**
* ssp_read_word - read a word from the SSP port
*
* Wait for a data word in the SSP receive FIFO, and return the
* received data. Data is LSB justified.
*
* Note: Currently, if data is not expected to be received, this
* function will wait for ever.
*
* The caller is expected to perform the necessary locking.
*
* Returns:
* %-ETIMEDOUT timeout occurred
* 16-bit data success
*/
int ssp_read_word(u16 *data)
{
int timeout = TIMEOUT;
while (!(Ser4SSSR & SSSR_RNE)) {
if (!--timeout)
return -ETIMEDOUT;
cpu_relax();
}
*data = (u16)Ser4SSDR;
return 0;
}
/**
* ssp_flush - flush the transmit and receive FIFOs
*
* Wait for the SSP to idle, and ensure that the receive FIFO
* is empty.
*
* The caller is expected to perform the necessary locking.
*
* Returns:
* %-ETIMEDOUT timeout occurred
* 0 success
*/
int ssp_flush(void)
{
int timeout = TIMEOUT * 2;
do {
while (Ser4SSSR & SSSR_RNE) {
if (!--timeout)
return -ETIMEDOUT;
(void) Ser4SSDR;
}
if (!--timeout)
return -ETIMEDOUT;
} while (Ser4SSSR & SSSR_BSY);
return 0;
}
/**
* ssp_enable - enable the SSP port
*
* Turn on the SSP port.
*/
void ssp_enable(void)
{
Ser4SSCR0 |= SSCR0_SSE;
}
/**
* ssp_disable - shut down the SSP port
*
* Turn off the SSP port, optionally powering it down.
*/
void ssp_disable(void)
{
Ser4SSCR0 &= ~SSCR0_SSE;
}
/**
* ssp_save_state - save the SSP configuration
* @ssp: pointer to structure to save SSP configuration
*
* Save the configured SSP state for suspend.
*/
void ssp_save_state(struct ssp_state *ssp)
{
ssp->cr0 = Ser4SSCR0;
ssp->cr1 = Ser4SSCR1;
Ser4SSCR0 &= ~SSCR0_SSE;
}
/**
* ssp_restore_state - restore a previously saved SSP configuration
* @ssp: pointer to configuration saved by ssp_save_state
*
* Restore the SSP configuration saved previously by ssp_save_state.
*/
void ssp_restore_state(struct ssp_state *ssp)
{
Ser4SSSR = SSSR_ROR;
Ser4SSCR0 = ssp->cr0 & ~SSCR0_SSE;
Ser4SSCR1 = ssp->cr1;
Ser4SSCR0 = ssp->cr0;
}
/**
* ssp_init - setup the SSP port
*
* initialise and claim resources for the SSP port.
*
* Returns:
* %-ENODEV if the SSP port is unavailable
* %-EBUSY if the resources are already in use
* %0 on success
*/
int ssp_init(void)
{
int ret;
if (!(PPAR & PPAR_SPR) && (Ser4MCCR0 & MCCR0_MCE))
return -ENODEV;
if (!request_mem_region(__PREG(Ser4SSCR0), 0x18, "SSP")) {
return -EBUSY;
}
Ser4SSSR = SSSR_ROR;
ret = request_irq(IRQ_Ser4SSP, ssp_interrupt, 0, "SSP", NULL);
if (ret)
goto out_region;
return 0;
out_region:
release_mem_region(__PREG(Ser4SSCR0), 0x18);
return ret;
}
/**
* ssp_exit - undo the effects of ssp_init
*
* release and free resources for the SSP port.
*/
void ssp_exit(void)
{
Ser4SSCR0 &= ~SSCR0_SSE;
free_irq(IRQ_Ser4SSP, NULL);
release_mem_region(__PREG(Ser4SSCR0), 0x18);
}
MODULE_AUTHOR("Russell King");
MODULE_DESCRIPTION("SA11x0 SSP PIO driver");
MODULE_LICENSE("GPL");
EXPORT_SYMBOL(ssp_write_word);
EXPORT_SYMBOL(ssp_read_word);
EXPORT_SYMBOL(ssp_flush);
EXPORT_SYMBOL(ssp_enable);
EXPORT_SYMBOL(ssp_disable);
EXPORT_SYMBOL(ssp_save_state);
EXPORT_SYMBOL(ssp_restore_state);
EXPORT_SYMBOL(ssp_init);
EXPORT_SYMBOL(ssp_exit);
| linux-master | arch/arm/mach-sa1100/ssp.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/arch/arm/mach-sa1100/jornada720.c
*
* HP Jornada720 init code
*
* Copyright (C) 2007 Kristoffer Ericson <[email protected]>
* Copyright (C) 2006 Filip Zyzniewski <[email protected]>
* Copyright (C) 2005 Michael Gernoth <[email protected]>
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/tty.h>
#include <linux/delay.h>
#include <linux/gpio/machine.h>
#include <linux/platform_data/sa11x0-serial.h>
#include <linux/platform_device.h>
#include <linux/ioport.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <video/s1d13xxxfb.h>
#include <asm/hardware/sa1111.h>
#include <asm/page.h>
#include <asm/mach-types.h>
#include <asm/setup.h>
#include <asm/mach/arch.h>
#include <asm/mach/flash.h>
#include <asm/mach/map.h>
#include <mach/hardware.h>
#include <mach/irqs.h>
#include "generic.h"
/*
* HP Documentation referred in this file:
* http://www.jlime.com/downloads/development/docs/jornada7xx/jornada720.txt
*/
/* line 110 of HP's doc */
#define TUCR_VAL 0x20000400
/* memory space (line 52 of HP's doc) */
#define SA1111REGSTART 0x40000000
#define SA1111REGLEN 0x00002000
#define EPSONREGSTART 0x48000000
#define EPSONREGLEN 0x00100000
#define EPSONFBSTART 0x48200000
/* 512kB framebuffer */
#define EPSONFBLEN 512*1024
static struct s1d13xxxfb_regval s1d13xxxfb_initregs[] = {
/* line 344 of HP's doc */
{0x0001,0x00}, // Miscellaneous Register
{0x01FC,0x00}, // Display Mode Register
{0x0004,0x00}, // General IO Pins Configuration Register 0
{0x0005,0x00}, // General IO Pins Configuration Register 1
{0x0008,0x00}, // General IO Pins Control Register 0
{0x0009,0x00}, // General IO Pins Control Register 1
{0x0010,0x01}, // Memory Clock Configuration Register
{0x0014,0x11}, // LCD Pixel Clock Configuration Register
{0x0018,0x01}, // CRT/TV Pixel Clock Configuration Register
{0x001C,0x01}, // MediaPlug Clock Configuration Register
{0x001E,0x01}, // CPU To Memory Wait State Select Register
{0x0020,0x00}, // Memory Configuration Register
{0x0021,0x45}, // DRAM Refresh Rate Register
{0x002A,0x01}, // DRAM Timings Control Register 0
{0x002B,0x03}, // DRAM Timings Control Register 1
{0x0030,0x1c}, // Panel Type Register
{0x0031,0x00}, // MOD Rate Register
{0x0032,0x4F}, // LCD Horizontal Display Width Register
{0x0034,0x07}, // LCD Horizontal Non-Display Period Register
{0x0035,0x01}, // TFT FPLINE Start Position Register
{0x0036,0x0B}, // TFT FPLINE Pulse Width Register
{0x0038,0xEF}, // LCD Vertical Display Height Register 0
{0x0039,0x00}, // LCD Vertical Display Height Register 1
{0x003A,0x13}, // LCD Vertical Non-Display Period Register
{0x003B,0x0B}, // TFT FPFRAME Start Position Register
{0x003C,0x01}, // TFT FPFRAME Pulse Width Register
{0x0040,0x05}, // LCD Display Mode Register (2:4bpp,3:8bpp,5:16bpp)
{0x0041,0x00}, // LCD Miscellaneous Register
{0x0042,0x00}, // LCD Display Start Address Register 0
{0x0043,0x00}, // LCD Display Start Address Register 1
{0x0044,0x00}, // LCD Display Start Address Register 2
{0x0046,0x80}, // LCD Memory Address Offset Register 0
{0x0047,0x02}, // LCD Memory Address Offset Register 1
{0x0048,0x00}, // LCD Pixel Panning Register
{0x004A,0x00}, // LCD Display FIFO High Threshold Control Register
{0x004B,0x00}, // LCD Display FIFO Low Threshold Control Register
{0x0050,0x4F}, // CRT/TV Horizontal Display Width Register
{0x0052,0x13}, // CRT/TV Horizontal Non-Display Period Register
{0x0053,0x01}, // CRT/TV HRTC Start Position Register
{0x0054,0x0B}, // CRT/TV HRTC Pulse Width Register
{0x0056,0xDF}, // CRT/TV Vertical Display Height Register 0
{0x0057,0x01}, // CRT/TV Vertical Display Height Register 1
{0x0058,0x2B}, // CRT/TV Vertical Non-Display Period Register
{0x0059,0x09}, // CRT/TV VRTC Start Position Register
{0x005A,0x01}, // CRT/TV VRTC Pulse Width Register
{0x005B,0x10}, // TV Output Control Register
{0x0060,0x03}, // CRT/TV Display Mode Register (2:4bpp,3:8bpp,5:16bpp)
{0x0062,0x00}, // CRT/TV Display Start Address Register 0
{0x0063,0x00}, // CRT/TV Display Start Address Register 1
{0x0064,0x00}, // CRT/TV Display Start Address Register 2
{0x0066,0x40}, // CRT/TV Memory Address Offset Register 0
{0x0067,0x01}, // CRT/TV Memory Address Offset Register 1
{0x0068,0x00}, // CRT/TV Pixel Panning Register
{0x006A,0x00}, // CRT/TV Display FIFO High Threshold Control Register
{0x006B,0x00}, // CRT/TV Display FIFO Low Threshold Control Register
{0x0070,0x00}, // LCD Ink/Cursor Control Register
{0x0071,0x01}, // LCD Ink/Cursor Start Address Register
{0x0072,0x00}, // LCD Cursor X Position Register 0
{0x0073,0x00}, // LCD Cursor X Position Register 1
{0x0074,0x00}, // LCD Cursor Y Position Register 0
{0x0075,0x00}, // LCD Cursor Y Position Register 1
{0x0076,0x00}, // LCD Ink/Cursor Blue Color 0 Register
{0x0077,0x00}, // LCD Ink/Cursor Green Color 0 Register
{0x0078,0x00}, // LCD Ink/Cursor Red Color 0 Register
{0x007A,0x1F}, // LCD Ink/Cursor Blue Color 1 Register
{0x007B,0x3F}, // LCD Ink/Cursor Green Color 1 Register
{0x007C,0x1F}, // LCD Ink/Cursor Red Color 1 Register
{0x007E,0x00}, // LCD Ink/Cursor FIFO Threshold Register
{0x0080,0x00}, // CRT/TV Ink/Cursor Control Register
{0x0081,0x01}, // CRT/TV Ink/Cursor Start Address Register
{0x0082,0x00}, // CRT/TV Cursor X Position Register 0
{0x0083,0x00}, // CRT/TV Cursor X Position Register 1
{0x0084,0x00}, // CRT/TV Cursor Y Position Register 0
{0x0085,0x00}, // CRT/TV Cursor Y Position Register 1
{0x0086,0x00}, // CRT/TV Ink/Cursor Blue Color 0 Register
{0x0087,0x00}, // CRT/TV Ink/Cursor Green Color 0 Register
{0x0088,0x00}, // CRT/TV Ink/Cursor Red Color 0 Register
{0x008A,0x1F}, // CRT/TV Ink/Cursor Blue Color 1 Register
{0x008B,0x3F}, // CRT/TV Ink/Cursor Green Color 1 Register
{0x008C,0x1F}, // CRT/TV Ink/Cursor Red Color 1 Register
{0x008E,0x00}, // CRT/TV Ink/Cursor FIFO Threshold Register
{0x0100,0x00}, // BitBlt Control Register 0
{0x0101,0x00}, // BitBlt Control Register 1
{0x0102,0x00}, // BitBlt ROP Code/Color Expansion Register
{0x0103,0x00}, // BitBlt Operation Register
{0x0104,0x00}, // BitBlt Source Start Address Register 0
{0x0105,0x00}, // BitBlt Source Start Address Register 1
{0x0106,0x00}, // BitBlt Source Start Address Register 2
{0x0108,0x00}, // BitBlt Destination Start Address Register 0
{0x0109,0x00}, // BitBlt Destination Start Address Register 1
{0x010A,0x00}, // BitBlt Destination Start Address Register 2
{0x010C,0x00}, // BitBlt Memory Address Offset Register 0
{0x010D,0x00}, // BitBlt Memory Address Offset Register 1
{0x0110,0x00}, // BitBlt Width Register 0
{0x0111,0x00}, // BitBlt Width Register 1
{0x0112,0x00}, // BitBlt Height Register 0
{0x0113,0x00}, // BitBlt Height Register 1
{0x0114,0x00}, // BitBlt Background Color Register 0
{0x0115,0x00}, // BitBlt Background Color Register 1
{0x0118,0x00}, // BitBlt Foreground Color Register 0
{0x0119,0x00}, // BitBlt Foreground Color Register 1
{0x01E0,0x00}, // Look-Up Table Mode Register
{0x01E2,0x00}, // Look-Up Table Address Register
/* not sure, wouldn't like to mess with the driver */
{0x01E4,0x00}, // Look-Up Table Data Register
/* jornada doc says 0x00, but I trust the driver */
{0x01F0,0x10}, // Power Save Configuration Register
{0x01F1,0x00}, // Power Save Status Register
{0x01F4,0x00}, // CPU-to-Memory Access Watchdog Timer Register
{0x01FC,0x01}, // Display Mode Register(0x01:LCD, 0x02:CRT, 0x03:LCD&CRT)
};
static struct s1d13xxxfb_pdata s1d13xxxfb_data = {
.initregs = s1d13xxxfb_initregs,
.initregssize = ARRAY_SIZE(s1d13xxxfb_initregs),
.platform_init_video = NULL
};
static struct resource s1d13xxxfb_resources[] = {
[0] = DEFINE_RES_MEM(EPSONFBSTART, EPSONFBLEN),
[1] = DEFINE_RES_MEM(EPSONREGSTART, EPSONREGLEN),
};
static struct platform_device s1d13xxxfb_device = {
.name = S1D_DEVICENAME,
.id = 0,
.dev = {
.platform_data = &s1d13xxxfb_data,
},
.num_resources = ARRAY_SIZE(s1d13xxxfb_resources),
.resource = s1d13xxxfb_resources,
};
static struct gpiod_lookup_table jornada_pcmcia_gpiod_table = {
.dev_id = "1800",
.table = {
GPIO_LOOKUP("sa1111", 0, "s0-power", GPIO_ACTIVE_HIGH),
GPIO_LOOKUP("sa1111", 1, "s1-power", GPIO_ACTIVE_HIGH),
GPIO_LOOKUP("sa1111", 2, "s0-3v", GPIO_ACTIVE_HIGH),
GPIO_LOOKUP("sa1111", 3, "s1-3v", GPIO_ACTIVE_HIGH),
{ },
},
};
static struct resource sa1111_resources[] = {
[0] = DEFINE_RES_MEM(SA1111REGSTART, SA1111REGLEN),
[1] = DEFINE_RES_IRQ(IRQ_GPIO1),
};
static struct sa1111_platform_data sa1111_info = {
.disable_devs = SA1111_DEVID_PS2_MSE,
};
static u64 sa1111_dmamask = 0xffffffffUL;
static struct platform_device sa1111_device = {
.name = "sa1111",
.id = 0,
.dev = {
.dma_mask = &sa1111_dmamask,
.coherent_dma_mask = 0xffffffff,
.platform_data = &sa1111_info,
},
.num_resources = ARRAY_SIZE(sa1111_resources),
.resource = sa1111_resources,
};
static struct platform_device jornada_ssp_device = {
.name = "jornada_ssp",
.id = -1,
};
static struct resource jornada_kbd_resources[] = {
DEFINE_RES_IRQ(IRQ_GPIO0),
};
static struct platform_device jornada_kbd_device = {
.name = "jornada720_kbd",
.id = -1,
.num_resources = ARRAY_SIZE(jornada_kbd_resources),
.resource = jornada_kbd_resources,
};
static struct gpiod_lookup_table jornada_ts_gpiod_table = {
.dev_id = "jornada_ts",
.table = {
GPIO_LOOKUP("gpio", 9, "penup", GPIO_ACTIVE_HIGH),
},
};
static struct platform_device jornada_ts_device = {
.name = "jornada_ts",
.id = -1,
};
static struct platform_device *devices[] __initdata = {
&sa1111_device,
&jornada_ssp_device,
&s1d13xxxfb_device,
&jornada_kbd_device,
&jornada_ts_device,
};
static int __init jornada720_init(void)
{
int ret = -ENODEV;
if (machine_is_jornada720()) {
/* we want to use gpio20 as input to drive the clock of our uart 3 */
GPDR |= GPIO_GPIO20; /* Clear gpio20 pin as input */
TUCR = TUCR_VAL;
GPSR = GPIO_GPIO20; /* start gpio20 pin */
udelay(1);
GPCR = GPIO_GPIO20; /* stop gpio20 */
udelay(1);
GPSR = GPIO_GPIO20; /* restart gpio20 */
udelay(20); /* give it some time to restart */
gpiod_add_lookup_table(&jornada_ts_gpiod_table);
gpiod_add_lookup_table(&jornada_pcmcia_gpiod_table);
ret = platform_add_devices(devices, ARRAY_SIZE(devices));
}
return ret;
}
arch_initcall(jornada720_init);
static struct map_desc jornada720_io_desc[] __initdata = {
{ /* Epson registers */
.virtual = 0xf0000000,
.pfn = __phys_to_pfn(EPSONREGSTART),
.length = EPSONREGLEN,
.type = MT_DEVICE
}, { /* Epson frame buffer */
.virtual = 0xf1000000,
.pfn = __phys_to_pfn(EPSONFBSTART),
.length = EPSONFBLEN,
.type = MT_DEVICE
}
};
static void __init jornada720_map_io(void)
{
sa1100_map_io();
iotable_init(jornada720_io_desc, ARRAY_SIZE(jornada720_io_desc));
sa1100_register_uart(0, 3);
sa1100_register_uart(1, 1);
}
static struct mtd_partition jornada720_partitions[] = {
{
.name = "JORNADA720 boot firmware",
.size = 0x00040000,
.offset = 0,
.mask_flags = MTD_WRITEABLE, /* force read-only */
}, {
.name = "JORNADA720 kernel",
.size = 0x000c0000,
.offset = 0x00040000,
}, {
.name = "JORNADA720 params",
.size = 0x00040000,
.offset = 0x00100000,
}, {
.name = "JORNADA720 initrd",
.size = 0x00100000,
.offset = 0x00140000,
}, {
.name = "JORNADA720 root cramfs",
.size = 0x00300000,
.offset = 0x00240000,
}, {
.name = "JORNADA720 usr cramfs",
.size = 0x00800000,
.offset = 0x00540000,
}, {
.name = "JORNADA720 usr local",
.size = 0, /* will expand to the end of the flash */
.offset = 0x00d00000,
}
};
static void jornada720_set_vpp(int vpp)
{
if (vpp)
/* enabling flash write (line 470 of HP's doc) */
PPSR |= PPC_LDD7;
else
/* disabling flash write (line 470 of HP's doc) */
PPSR &= ~PPC_LDD7;
PPDR |= PPC_LDD7;
}
static struct flash_platform_data jornada720_flash_data = {
.map_name = "cfi_probe",
.set_vpp = jornada720_set_vpp,
.parts = jornada720_partitions,
.nr_parts = ARRAY_SIZE(jornada720_partitions),
};
static struct resource jornada720_flash_resource =
DEFINE_RES_MEM(SA1100_CS0_PHYS, SZ_32M);
static void __init jornada720_mach_init(void)
{
sa11x0_register_mtd(&jornada720_flash_data, &jornada720_flash_resource, 1);
}
MACHINE_START(JORNADA720, "HP Jornada 720")
/* Maintainer: Kristoffer Ericson <[email protected]> */
.atag_offset = 0x100,
.map_io = jornada720_map_io,
.nr_irqs = SA1100_NR_IRQS,
.init_irq = sa1100_init_irq,
.init_time = sa1100_timer_init,
.init_machine = jornada720_mach_init,
.init_late = sa11x0_init_late,
#ifdef CONFIG_SA1111
.dma_zone_size = SZ_1M,
#endif
.restart = sa11x0_restart,
MACHINE_END
| linux-master | arch/arm/mach-sa1100/jornada720.c |
/*
* linux/arch/arm/mach-sa1100/collie.c
*
* May be copied or modified under the terms of the GNU General Public
* License. See linux/COPYING for more information.
*
* This file contains all Collie-specific tweaks.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* ChangeLog:
* 2006 Pavel Machek <[email protected]>
* 03-06-2004 John Lenz <[email protected]>
* 06-04-2002 Chris Larson <[email protected]>
* 04-16-2001 Lineo Japan,Inc. ...
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/tty.h>
#include <linux/delay.h>
#include <linux/platform_data/sa11x0-serial.h>
#include <linux/platform_device.h>
#include <linux/mfd/ucb1x00.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/timer.h>
#include <linux/gpio_keys.h>
#include <linux/input.h>
#include <linux/gpio.h>
#include <linux/gpio/machine.h>
#include <linux/power/gpio-charger.h>
#include <video/sa1100fb.h>
#include <mach/hardware.h>
#include <asm/mach-types.h>
#include <asm/page.h>
#include <asm/setup.h>
#include <mach/collie.h>
#include <asm/mach/arch.h>
#include <asm/mach/flash.h>
#include <asm/mach/map.h>
#include <asm/hardware/scoop.h>
#include <asm/mach/sharpsl_param.h>
#include <asm/hardware/locomo.h>
#include <linux/platform_data/mfd-mcp-sa11x0.h>
#include <mach/irqs.h>
#include "generic.h"
static struct resource collie_scoop_resources[] = {
[0] = DEFINE_RES_MEM(0x40800000, SZ_4K),
};
static struct scoop_config collie_scoop_setup = {
.io_dir = COLLIE_SCOOP_IO_DIR,
.io_out = COLLIE_SCOOP_IO_OUT,
.gpio_base = COLLIE_SCOOP_GPIO_BASE,
};
struct platform_device colliescoop_device = {
.name = "sharp-scoop",
.id = -1,
.dev = {
.platform_data = &collie_scoop_setup,
},
.num_resources = ARRAY_SIZE(collie_scoop_resources),
.resource = collie_scoop_resources,
};
static struct scoop_pcmcia_dev collie_pcmcia_scoop[] = {
{
.dev = &colliescoop_device.dev,
.irq = COLLIE_IRQ_GPIO_CF_IRQ,
.cd_irq = COLLIE_IRQ_GPIO_CF_CD,
.cd_irq_str = "PCMCIA0 CD",
},
};
static struct scoop_pcmcia_config collie_pcmcia_config = {
.devs = &collie_pcmcia_scoop[0],
.num_devs = 1,
};
static struct ucb1x00_plat_data collie_ucb1x00_data = {
.gpio_base = COLLIE_TC35143_GPIO_BASE,
};
static struct mcp_plat_data collie_mcp_data = {
.mccr0 = MCCR0_ADM | MCCR0_ExtClk,
.sclk_rate = 9216000,
.codec_pdata = &collie_ucb1x00_data,
};
/* Battery management GPIOs */
static struct gpiod_lookup_table collie_battery_gpiod_table = {
/* the MCP codec mcp0 has the ucb1x00 as attached device */
.dev_id = "ucb1x00",
.table = {
/* This is found on the main GPIO on the SA1100 */
GPIO_LOOKUP("gpio", COLLIE_GPIO_CO,
"main battery full", GPIO_ACTIVE_HIGH),
GPIO_LOOKUP("gpio", COLLIE_GPIO_MAIN_BAT_LOW,
"main battery low", GPIO_ACTIVE_HIGH),
/*
* This is GPIO 0 on the Scoop expander, which is registered
* from common/scoop.c with this gpio chip label.
*/
GPIO_LOOKUP("sharp-scoop", 0,
"main charge on", GPIO_ACTIVE_HIGH),
{ },
},
};
/*
* Collie AC IN
*/
static struct gpiod_lookup_table collie_power_gpiod_table = {
.dev_id = "gpio-charger",
.table = {
GPIO_LOOKUP("gpio", COLLIE_GPIO_AC_IN,
NULL, GPIO_ACTIVE_HIGH),
{ },
},
};
static char *collie_ac_supplied_to[] = {
"main-battery",
"backup-battery",
};
static struct gpio_charger_platform_data collie_power_data = {
.name = "charger",
.type = POWER_SUPPLY_TYPE_MAINS,
.supplied_to = collie_ac_supplied_to,
.num_supplicants = ARRAY_SIZE(collie_ac_supplied_to),
};
static struct platform_device collie_power_device = {
.name = "gpio-charger",
.id = -1,
.dev.platform_data = &collie_power_data,
};
#ifdef CONFIG_SHARP_LOCOMO
/*
* low-level UART features.
*/
struct platform_device collie_locomo_device;
static void collie_uart_set_mctrl(struct uart_port *port, u_int mctrl)
{
if (mctrl & TIOCM_RTS)
locomo_gpio_write(&collie_locomo_device.dev, LOCOMO_GPIO_RTS, 0);
else
locomo_gpio_write(&collie_locomo_device.dev, LOCOMO_GPIO_RTS, 1);
if (mctrl & TIOCM_DTR)
locomo_gpio_write(&collie_locomo_device.dev, LOCOMO_GPIO_DTR, 0);
else
locomo_gpio_write(&collie_locomo_device.dev, LOCOMO_GPIO_DTR, 1);
}
static u_int collie_uart_get_mctrl(struct uart_port *port)
{
int ret = TIOCM_CD;
unsigned int r;
r = locomo_gpio_read_output(&collie_locomo_device.dev, LOCOMO_GPIO_CTS & LOCOMO_GPIO_DSR);
if (r == -ENODEV)
return ret;
if (r & LOCOMO_GPIO_CTS)
ret |= TIOCM_CTS;
if (r & LOCOMO_GPIO_DSR)
ret |= TIOCM_DSR;
return ret;
}
static struct sa1100_port_fns collie_port_fns __initdata = {
.set_mctrl = collie_uart_set_mctrl,
.get_mctrl = collie_uart_get_mctrl,
};
static int collie_uart_probe(struct locomo_dev *dev)
{
return 0;
}
static struct locomo_driver collie_uart_driver = {
.drv = {
.name = "collie_uart",
},
.devid = LOCOMO_DEVID_UART,
.probe = collie_uart_probe,
};
static int __init collie_uart_init(void)
{
return locomo_driver_register(&collie_uart_driver);
}
device_initcall(collie_uart_init);
#endif
static struct resource locomo_resources[] = {
[0] = DEFINE_RES_MEM(0x40000000, SZ_8K),
[1] = DEFINE_RES_IRQ(IRQ_GPIO25),
};
static struct locomo_platform_data locomo_info = {
.irq_base = IRQ_BOARD_START,
};
struct platform_device collie_locomo_device = {
.name = "locomo",
.id = 0,
.dev = {
.platform_data = &locomo_info,
},
.num_resources = ARRAY_SIZE(locomo_resources),
.resource = locomo_resources,
};
static struct gpio_keys_button collie_gpio_keys[] = {
{
.type = EV_PWR,
.code = KEY_RESERVED,
.gpio = COLLIE_GPIO_ON_KEY,
.desc = "On key",
.wakeup = 1,
.active_low = 1,
},
{
.type = EV_PWR,
.code = KEY_WAKEUP,
.gpio = COLLIE_GPIO_WAKEUP,
.desc = "Sync",
.wakeup = 1,
.active_low = 1,
},
};
static struct gpio_keys_platform_data collie_gpio_keys_data = {
.buttons = collie_gpio_keys,
.nbuttons = ARRAY_SIZE(collie_gpio_keys),
};
static struct platform_device collie_gpio_keys_device = {
.name = "gpio-keys",
.id = -1,
.dev = {
.platform_data = &collie_gpio_keys_data,
},
};
static struct platform_device *devices[] __initdata = {
&collie_locomo_device,
&colliescoop_device,
&collie_power_device,
&collie_gpio_keys_device,
};
static struct mtd_partition collie_partitions[] = {
{
.name = "bootloader",
.offset = 0,
.size = 0x000C0000,
.mask_flags = MTD_WRITEABLE
}, {
.name = "kernel",
.offset = MTDPART_OFS_APPEND,
.size = 0x00100000,
}, {
.name = "rootfs",
.offset = MTDPART_OFS_APPEND,
.size = 0x00e20000,
}, {
.name = "bootblock",
.offset = MTDPART_OFS_APPEND,
.size = 0x00020000,
.mask_flags = MTD_WRITEABLE
}
};
static int collie_flash_init(void)
{
int rc = gpio_request(COLLIE_GPIO_VPEN, "flash Vpp enable");
if (rc)
return rc;
rc = gpio_direction_output(COLLIE_GPIO_VPEN, 1);
if (rc)
gpio_free(COLLIE_GPIO_VPEN);
return rc;
}
static void collie_set_vpp(int vpp)
{
gpio_set_value(COLLIE_GPIO_VPEN, vpp);
}
static void collie_flash_exit(void)
{
gpio_free(COLLIE_GPIO_VPEN);
}
static struct flash_platform_data collie_flash_data = {
.map_name = "cfi_probe",
.init = collie_flash_init,
.set_vpp = collie_set_vpp,
.exit = collie_flash_exit,
.parts = collie_partitions,
.nr_parts = ARRAY_SIZE(collie_partitions),
};
static struct resource collie_flash_resources[] = {
DEFINE_RES_MEM(SA1100_CS0_PHYS, SZ_32M),
};
static struct sa1100fb_mach_info collie_lcd_info = {
.pixclock = 171521, .bpp = 16,
.xres = 320, .yres = 240,
.hsync_len = 5, .vsync_len = 1,
.left_margin = 11, .upper_margin = 2,
.right_margin = 30, .lower_margin = 0,
.sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
.lccr0 = LCCR0_Color | LCCR0_Sngl | LCCR0_Act,
.lccr3 = LCCR3_OutEnH | LCCR3_PixRsEdg | LCCR3_ACBsDiv(2),
#ifdef CONFIG_BACKLIGHT_LOCOMO
.lcd_power = locomolcd_power
#endif
};
static void __init collie_init(void)
{
int ret = 0;
/* cpu initialize */
GAFR = GPIO_SSP_TXD | GPIO_SSP_SCLK | GPIO_SSP_SFRM | GPIO_SSP_CLK |
GPIO_MCP_CLK | GPIO_32_768kHz;
GPDR = GPIO_LDD8 | GPIO_LDD9 | GPIO_LDD10 | GPIO_LDD11 | GPIO_LDD12 |
GPIO_LDD13 | GPIO_LDD14 | GPIO_LDD15 | GPIO_SSP_TXD |
GPIO_SSP_SCLK | GPIO_SSP_SFRM | GPIO_SDLC_SCLK |
_COLLIE_GPIO_UCB1x00_RESET | _COLLIE_GPIO_nMIC_ON |
_COLLIE_GPIO_nREMOCON_ON | GPIO_32_768kHz;
PPDR = PPC_LDD0 | PPC_LDD1 | PPC_LDD2 | PPC_LDD3 | PPC_LDD4 | PPC_LDD5 |
PPC_LDD6 | PPC_LDD7 | PPC_L_PCLK | PPC_L_LCLK | PPC_L_FCLK | PPC_L_BIAS |
PPC_TXD1 | PPC_TXD2 | PPC_TXD3 | PPC_TXD4 | PPC_SCLK | PPC_SFRM;
PWER = 0;
PGSR = _COLLIE_GPIO_nREMOCON_ON;
PSDR = PPC_RXD1 | PPC_RXD2 | PPC_RXD3 | PPC_RXD4;
PCFR = PCFR_OPDE;
GPSR |= _COLLIE_GPIO_UCB1x00_RESET;
sa11x0_ppc_configure_mcp();
platform_scoop_config = &collie_pcmcia_config;
gpiod_add_lookup_table(&collie_power_gpiod_table);
gpiod_add_lookup_table(&collie_battery_gpiod_table);
ret = platform_add_devices(devices, ARRAY_SIZE(devices));
if (ret) {
printk(KERN_WARNING "collie: Unable to register LoCoMo device\n");
}
sa11x0_register_lcd(&collie_lcd_info);
sa11x0_register_mtd(&collie_flash_data, collie_flash_resources,
ARRAY_SIZE(collie_flash_resources));
sa11x0_register_mcp(&collie_mcp_data);
sharpsl_save_param();
}
static struct map_desc collie_io_desc[] __initdata = {
{ /* 32M main flash (cs0) */
.virtual = 0xe8000000,
.pfn = __phys_to_pfn(0x00000000),
.length = 0x02000000,
.type = MT_DEVICE
}, { /* 32M boot flash (cs1) */
.virtual = 0xea000000,
.pfn = __phys_to_pfn(0x08000000),
.length = 0x02000000,
.type = MT_DEVICE
}
};
static void __init collie_map_io(void)
{
sa1100_map_io();
iotable_init(collie_io_desc, ARRAY_SIZE(collie_io_desc));
#ifdef CONFIG_SHARP_LOCOMO
sa1100_register_uart_fns(&collie_port_fns);
#endif
sa1100_register_uart(0, 3);
sa1100_register_uart(1, 1);
}
MACHINE_START(COLLIE, "Sharp-Collie")
.map_io = collie_map_io,
.nr_irqs = SA1100_NR_IRQS,
.init_irq = sa1100_init_irq,
.init_time = sa1100_timer_init,
.init_machine = collie_init,
.init_late = sa11x0_init_late,
.restart = sa11x0_restart,
MACHINE_END
| linux-master | arch/arm/mach-sa1100/collie.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Support for Compaq iPAQ H3100 and H3600 handheld computers (common code)
*
* Copyright (c) 2000,1 Compaq Computer Corporation. (Author: Jamey Hicks)
* Copyright (c) 2009 Dmitry Artamonow <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/gpio/machine.h>
#include <linux/gpio.h>
#include <linux/gpio_keys.h>
#include <linux/input.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/platform_data/gpio-htc-egpio.h>
#include <linux/platform_data/sa11x0-serial.h>
#include <linux/platform_device.h>
#include <linux/serial_core.h>
#include <asm/mach/flash.h>
#include <asm/mach/map.h>
#include <mach/h3xxx.h>
#include <mach/irqs.h>
#include "generic.h"
/*
* H3xxx flash support
*/
static struct mtd_partition h3xxx_partitions[] = {
{
.name = "H3XXX boot firmware",
.size = 0x00040000,
.offset = 0,
.mask_flags = MTD_WRITEABLE, /* force read-only */
}, {
.name = "H3XXX rootfs",
.size = MTDPART_SIZ_FULL,
.offset = 0x00040000,
}
};
static void h3xxx_set_vpp(int vpp)
{
gpio_set_value(H3XXX_EGPIO_VPP_ON, vpp);
}
static int h3xxx_flash_init(void)
{
int err = gpio_request(H3XXX_EGPIO_VPP_ON, "Flash Vpp");
if (err) {
pr_err("%s: can't request H3XXX_EGPIO_VPP_ON\n", __func__);
return err;
}
err = gpio_direction_output(H3XXX_EGPIO_VPP_ON, 0);
if (err)
gpio_free(H3XXX_EGPIO_VPP_ON);
return err;
}
static void h3xxx_flash_exit(void)
{
gpio_free(H3XXX_EGPIO_VPP_ON);
}
static struct flash_platform_data h3xxx_flash_data = {
.map_name = "cfi_probe",
.set_vpp = h3xxx_set_vpp,
.init = h3xxx_flash_init,
.exit = h3xxx_flash_exit,
.parts = h3xxx_partitions,
.nr_parts = ARRAY_SIZE(h3xxx_partitions),
};
static struct resource h3xxx_flash_resource =
DEFINE_RES_MEM(SA1100_CS0_PHYS, SZ_32M);
/*
* H3xxx uart support
*/
static void h3xxx_uart_pm(struct uart_port *port, u_int state, u_int oldstate)
{
if (port->mapbase == _Ser3UTCR0) {
if (!gpio_request(H3XXX_EGPIO_RS232_ON, "RS232 transceiver")) {
gpio_direction_output(H3XXX_EGPIO_RS232_ON, !state);
gpio_free(H3XXX_EGPIO_RS232_ON);
} else {
pr_err("%s: can't request H3XXX_EGPIO_RS232_ON\n",
__func__);
}
}
}
/*
* Enable/Disable wake up events for this serial port.
* Obviously, we only support this on the normal COM port.
*/
static int h3xxx_uart_set_wake(struct uart_port *port, u_int enable)
{
int err = -EINVAL;
if (port->mapbase == _Ser3UTCR0) {
if (enable)
PWER |= PWER_GPIO23 | PWER_GPIO25; /* DCD and CTS */
else
PWER &= ~(PWER_GPIO23 | PWER_GPIO25); /* DCD and CTS */
err = 0;
}
return err;
}
static struct sa1100_port_fns h3xxx_port_fns __initdata = {
.pm = h3xxx_uart_pm,
.set_wake = h3xxx_uart_set_wake,
};
static struct gpiod_lookup_table h3xxx_uart3_gpio_table = {
.dev_id = "sa11x0-uart.3",
.table = {
GPIO_LOOKUP("gpio", H3XXX_GPIO_COM_DCD, "dcd", GPIO_ACTIVE_LOW),
GPIO_LOOKUP("gpio", H3XXX_GPIO_COM_CTS, "cts", GPIO_ACTIVE_LOW),
GPIO_LOOKUP("gpio", H3XXX_GPIO_COM_RTS, "rts", GPIO_ACTIVE_LOW),
{ },
},
};
/*
* EGPIO
*/
static struct resource egpio_resources[] = {
[0] = DEFINE_RES_MEM(H3600_EGPIO_PHYS, 0x4),
};
static struct htc_egpio_chip egpio_chips[] = {
[0] = {
.reg_start = 0,
.gpio_base = H3XXX_EGPIO_BASE,
.num_gpios = 16,
.direction = HTC_EGPIO_OUTPUT,
.initial_values = 0x0080, /* H3XXX_EGPIO_RS232_ON */
},
};
static struct htc_egpio_platform_data egpio_info = {
.reg_width = 16,
.bus_width = 16,
.chip = egpio_chips,
.num_chips = ARRAY_SIZE(egpio_chips),
};
static struct platform_device h3xxx_egpio = {
.name = "htc-egpio",
.id = -1,
.resource = egpio_resources,
.num_resources = ARRAY_SIZE(egpio_resources),
.dev = {
.platform_data = &egpio_info,
},
};
/*
* GPIO keys
*/
static struct gpio_keys_button h3xxx_button_table[] = {
{
.code = KEY_POWER,
.gpio = H3XXX_GPIO_PWR_BUTTON,
.desc = "Power Button",
.active_low = 1,
.type = EV_KEY,
.wakeup = 1,
}, {
.code = KEY_ENTER,
.gpio = H3XXX_GPIO_ACTION_BUTTON,
.active_low = 1,
.desc = "Action button",
.type = EV_KEY,
.wakeup = 0,
},
};
static struct gpio_keys_platform_data h3xxx_keys_data = {
.buttons = h3xxx_button_table,
.nbuttons = ARRAY_SIZE(h3xxx_button_table),
};
static struct platform_device h3xxx_keys = {
.name = "gpio-keys",
.id = -1,
.dev = {
.platform_data = &h3xxx_keys_data,
},
};
static struct resource h3xxx_micro_resources[] = {
DEFINE_RES_MEM(0x80010000, SZ_4K),
DEFINE_RES_MEM(0x80020000, SZ_4K),
DEFINE_RES_IRQ(IRQ_Ser1UART),
};
struct platform_device h3xxx_micro_asic = {
.name = "ipaq-h3xxx-micro",
.id = -1,
.resource = h3xxx_micro_resources,
.num_resources = ARRAY_SIZE(h3xxx_micro_resources),
};
static struct platform_device *h3xxx_devices[] = {
&h3xxx_egpio,
&h3xxx_keys,
&h3xxx_micro_asic,
};
static struct gpiod_lookup_table h3xxx_pcmcia_gpio_table = {
.dev_id = "sa11x0-pcmcia",
.table = {
GPIO_LOOKUP("gpio", H3XXX_GPIO_PCMCIA_CD0,
"pcmcia0-detect", GPIO_ACTIVE_LOW),
GPIO_LOOKUP("gpio", H3XXX_GPIO_PCMCIA_IRQ0,
"pcmcia0-ready", GPIO_ACTIVE_HIGH),
GPIO_LOOKUP("gpio", H3XXX_GPIO_PCMCIA_CD1,
"pcmcia1-detect", GPIO_ACTIVE_LOW),
GPIO_LOOKUP("gpio", H3XXX_GPIO_PCMCIA_IRQ1,
"pcmcia1-ready", GPIO_ACTIVE_HIGH),
{ },
},
};
void __init h3xxx_mach_init(void)
{
gpiod_add_lookup_table(&h3xxx_pcmcia_gpio_table);
gpiod_add_lookup_table(&h3xxx_uart3_gpio_table);
sa1100_register_uart_fns(&h3xxx_port_fns);
sa11x0_register_mtd(&h3xxx_flash_data, &h3xxx_flash_resource, 1);
platform_add_devices(h3xxx_devices, ARRAY_SIZE(h3xxx_devices));
}
static struct map_desc h3600_io_desc[] __initdata = {
{ /* static memory bank 2 CS#2 */
.virtual = H3600_BANK_2_VIRT,
.pfn = __phys_to_pfn(SA1100_CS2_PHYS),
.length = 0x02800000,
.type = MT_DEVICE
}, { /* static memory bank 4 CS#4 */
.virtual = H3600_BANK_4_VIRT,
.pfn = __phys_to_pfn(SA1100_CS4_PHYS),
.length = 0x00800000,
.type = MT_DEVICE
}, { /* EGPIO 0 CS#5 */
.virtual = H3600_EGPIO_VIRT,
.pfn = __phys_to_pfn(H3600_EGPIO_PHYS),
.length = 0x01000000,
.type = MT_DEVICE
}
};
/*
* Common map_io initialization
*/
void __init h3xxx_map_io(void)
{
sa1100_map_io();
iotable_init(h3600_io_desc, ARRAY_SIZE(h3600_io_desc));
sa1100_register_uart(0, 3); /* Common serial port */
// sa1100_register_uart(1, 1); /* Microcontroller on 3100/3600 */
/* Ensure those pins are outputs and driving low */
PPDR |= PPC_TXD4 | PPC_SCLK | PPC_SFRM;
PPSR &= ~(PPC_TXD4 | PPC_SCLK | PPC_SFRM);
/* Configure suspend conditions */
PGSR = 0;
PCFR = PCFR_OPDE;
PSDR = 0;
GPCR = 0x0fffffff; /* All outputs are set low by default */
GPDR = 0; /* Configure all GPIOs as input */
}
| linux-master | arch/arm/mach-sa1100/h3xxx.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Support for Compaq iPAQ H3600 handheld computer
*
* Copyright (c) 2000,1 Compaq Computer Corporation. (Author: Jamey Hicks)
* Copyright (c) 2009 Dmitry Artamonow <[email protected]>
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/gpio.h>
#include <video/sa1100fb.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <mach/h3xxx.h>
#include <mach/irqs.h>
#include "generic.h"
/*
* helper for sa1100fb
*/
static struct gpio h3600_lcd_gpio[] = {
{ H3XXX_EGPIO_LCD_ON, GPIOF_OUT_INIT_LOW, "LCD power" },
{ H3600_EGPIO_LCD_PCI, GPIOF_OUT_INIT_LOW, "LCD control" },
{ H3600_EGPIO_LCD_5V_ON, GPIOF_OUT_INIT_LOW, "LCD 5v" },
{ H3600_EGPIO_LVDD_ON, GPIOF_OUT_INIT_LOW, "LCD 9v/-6.5v" },
};
static bool h3600_lcd_request(void)
{
static bool h3600_lcd_ok;
int rc;
if (h3600_lcd_ok)
return true;
rc = gpio_request_array(h3600_lcd_gpio, ARRAY_SIZE(h3600_lcd_gpio));
if (rc)
pr_err("%s: can't request GPIOs\n", __func__);
else
h3600_lcd_ok = true;
return h3600_lcd_ok;
}
static void h3600_lcd_power(int enable)
{
if (!h3600_lcd_request())
return;
gpio_direction_output(H3XXX_EGPIO_LCD_ON, enable);
gpio_direction_output(H3600_EGPIO_LCD_PCI, enable);
gpio_direction_output(H3600_EGPIO_LCD_5V_ON, enable);
gpio_direction_output(H3600_EGPIO_LVDD_ON, enable);
}
static const struct sa1100fb_rgb h3600_rgb_16 = {
.red = { .offset = 12, .length = 4, },
.green = { .offset = 7, .length = 4, },
.blue = { .offset = 1, .length = 4, },
.transp = { .offset = 0, .length = 0, },
};
static struct sa1100fb_mach_info h3600_lcd_info = {
.pixclock = 174757, .bpp = 16,
.xres = 320, .yres = 240,
.hsync_len = 3, .vsync_len = 3,
.left_margin = 12, .upper_margin = 10,
.right_margin = 17, .lower_margin = 1,
.cmap_static = 1,
.lccr0 = LCCR0_Color | LCCR0_Sngl | LCCR0_Act,
.lccr3 = LCCR3_OutEnH | LCCR3_PixRsEdg | LCCR3_ACBsDiv(2),
.rgb[RGB_16] = &h3600_rgb_16,
.lcd_power = h3600_lcd_power,
};
static void __init h3600_map_io(void)
{
h3xxx_map_io();
}
static void __init h3600_mach_init(void)
{
h3xxx_mach_init();
sa11x0_register_lcd(&h3600_lcd_info);
}
MACHINE_START(H3600, "Compaq iPAQ H3600")
.atag_offset = 0x100,
.map_io = h3600_map_io,
.nr_irqs = SA1100_NR_IRQS,
.init_irq = sa1100_init_irq,
.init_time = sa1100_timer_init,
.init_machine = h3600_mach_init,
.init_late = sa11x0_init_late,
.restart = sa11x0_restart,
MACHINE_END
| linux-master | arch/arm/mach-sa1100/h3600.c |
// SPDX-License-Identifier: GPL-2.0
/*
* linux/arch/arm/mach-sa1100/neponset.c
*/
#include <linux/err.h>
#include <linux/gpio/driver.h>
#include <linux/gpio/gpio-reg.h>
#include <linux/gpio/machine.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/serial_core.h>
#include <linux/slab.h>
#include <linux/smc91x.h>
#include <asm/mach-types.h>
#include <asm/mach/map.h>
#include <asm/hardware/sa1111.h>
#include <linux/sizes.h>
#include <mach/hardware.h>
#include <mach/assabet.h>
#include <mach/neponset.h>
#include <mach/irqs.h>
#define NEP_IRQ_SMC91X 0
#define NEP_IRQ_USAR 1
#define NEP_IRQ_SA1111 2
#define NEP_IRQ_NR 3
#define WHOAMI 0x00
#define LEDS 0x10
#define SWPK 0x20
#define IRR 0x24
#define KP_Y_IN 0x80
#define KP_X_OUT 0x90
#define NCR_0 0xa0
#define MDM_CTL_0 0xb0
#define MDM_CTL_1 0xb4
#define AUD_CTL 0xc0
#define IRR_ETHERNET (1 << 0)
#define IRR_USAR (1 << 1)
#define IRR_SA1111 (1 << 2)
#define NCR_NGPIO 7
#define MDM_CTL0_NGPIO 4
#define MDM_CTL1_NGPIO 6
#define AUD_NGPIO 2
extern void sa1110_mb_disable(void);
#define to_neponset_gpio_chip(x) container_of(x, struct neponset_gpio_chip, gc)
static const char *neponset_ncr_names[] = {
"gp01_off", "tp_power", "ms_power", "enet_osc",
"spi_kb_wk_up", "a0vpp", "a1vpp"
};
static const char *neponset_mdmctl0_names[] = {
"rts3", "dtr3", "rts1", "dtr1",
};
static const char *neponset_mdmctl1_names[] = {
"cts3", "dsr3", "dcd3", "cts1", "dsr1", "dcd1"
};
static const char *neponset_aud_names[] = {
"sel_1341", "mute_1341",
};
struct neponset_drvdata {
void __iomem *base;
struct platform_device *sa1111;
struct platform_device *smc91x;
unsigned irq_base;
struct gpio_chip *gpio[4];
};
static struct gpiod_lookup_table neponset_uart1_gpio_table = {
.dev_id = "sa11x0-uart.1",
.table = {
GPIO_LOOKUP("neponset-mdm-ctl0", 2, "rts", GPIO_ACTIVE_LOW),
GPIO_LOOKUP("neponset-mdm-ctl0", 3, "dtr", GPIO_ACTIVE_LOW),
GPIO_LOOKUP("neponset-mdm-ctl1", 3, "cts", GPIO_ACTIVE_LOW),
GPIO_LOOKUP("neponset-mdm-ctl1", 4, "dsr", GPIO_ACTIVE_LOW),
GPIO_LOOKUP("neponset-mdm-ctl1", 5, "dcd", GPIO_ACTIVE_LOW),
{ },
},
};
static struct gpiod_lookup_table neponset_uart3_gpio_table = {
.dev_id = "sa11x0-uart.3",
.table = {
GPIO_LOOKUP("neponset-mdm-ctl0", 0, "rts", GPIO_ACTIVE_LOW),
GPIO_LOOKUP("neponset-mdm-ctl0", 1, "dtr", GPIO_ACTIVE_LOW),
GPIO_LOOKUP("neponset-mdm-ctl1", 0, "cts", GPIO_ACTIVE_LOW),
GPIO_LOOKUP("neponset-mdm-ctl1", 1, "dsr", GPIO_ACTIVE_LOW),
GPIO_LOOKUP("neponset-mdm-ctl1", 2, "dcd", GPIO_ACTIVE_LOW),
{ },
},
};
static struct gpiod_lookup_table neponset_pcmcia_table = {
.dev_id = "1800",
.table = {
GPIO_LOOKUP("sa1111", 1, "a0vcc", GPIO_ACTIVE_HIGH),
GPIO_LOOKUP("sa1111", 0, "a1vcc", GPIO_ACTIVE_HIGH),
GPIO_LOOKUP("neponset-ncr", 5, "a0vpp", GPIO_ACTIVE_HIGH),
GPIO_LOOKUP("neponset-ncr", 6, "a1vpp", GPIO_ACTIVE_HIGH),
GPIO_LOOKUP("sa1111", 2, "b0vcc", GPIO_ACTIVE_HIGH),
GPIO_LOOKUP("sa1111", 3, "b1vcc", GPIO_ACTIVE_HIGH),
{ },
},
};
static struct neponset_drvdata *nep;
void neponset_ncr_frob(unsigned int mask, unsigned int val)
{
struct neponset_drvdata *n = nep;
unsigned long m = mask, v = val;
if (nep)
n->gpio[0]->set_multiple(n->gpio[0], &m, &v);
else
WARN(1, "nep unset\n");
}
EXPORT_SYMBOL(neponset_ncr_frob);
/*
* Install handler for Neponset IRQ. Note that we have to loop here
* since the ETHERNET and USAR IRQs are level based, and we need to
* ensure that the IRQ signal is deasserted before returning. This
* is rather unfortunate.
*/
static void neponset_irq_handler(struct irq_desc *desc)
{
struct neponset_drvdata *d = irq_desc_get_handler_data(desc);
unsigned int irr;
while (1) {
/*
* Acknowledge the parent IRQ.
*/
desc->irq_data.chip->irq_ack(&desc->irq_data);
/*
* Read the interrupt reason register. Let's have all
* active IRQ bits high. Note: there is a typo in the
* Neponset user's guide for the SA1111 IRR level.
*/
irr = readb_relaxed(d->base + IRR);
irr ^= IRR_ETHERNET | IRR_USAR;
if ((irr & (IRR_ETHERNET | IRR_USAR | IRR_SA1111)) == 0)
break;
/*
* Since there is no individual mask, we have to
* mask the parent IRQ. This is safe, since we'll
* recheck the register for any pending IRQs.
*/
if (irr & (IRR_ETHERNET | IRR_USAR)) {
desc->irq_data.chip->irq_mask(&desc->irq_data);
/*
* Ack the interrupt now to prevent re-entering
* this neponset handler. Again, this is safe
* since we'll check the IRR register prior to
* leaving.
*/
desc->irq_data.chip->irq_ack(&desc->irq_data);
if (irr & IRR_ETHERNET)
generic_handle_irq(d->irq_base + NEP_IRQ_SMC91X);
if (irr & IRR_USAR)
generic_handle_irq(d->irq_base + NEP_IRQ_USAR);
desc->irq_data.chip->irq_unmask(&desc->irq_data);
}
if (irr & IRR_SA1111)
generic_handle_irq(d->irq_base + NEP_IRQ_SA1111);
}
}
/* Yes, we really do not have any kind of masking or unmasking */
static void nochip_noop(struct irq_data *irq)
{
}
static struct irq_chip nochip = {
.name = "neponset",
.irq_ack = nochip_noop,
.irq_mask = nochip_noop,
.irq_unmask = nochip_noop,
};
static int neponset_init_gpio(struct gpio_chip **gcp,
struct device *dev, const char *label, void __iomem *reg,
unsigned num, bool in, const char *const * names)
{
struct gpio_chip *gc;
gc = gpio_reg_init(dev, reg, -1, num, label, in ? 0xffffffff : 0,
readl_relaxed(reg), names, NULL, NULL);
if (IS_ERR(gc))
return PTR_ERR(gc);
*gcp = gc;
return 0;
}
static struct sa1111_platform_data sa1111_info = {
.disable_devs = SA1111_DEVID_PS2_MSE,
};
static int neponset_probe(struct platform_device *dev)
{
struct neponset_drvdata *d;
struct resource *nep_res, *sa1111_res, *smc91x_res;
struct resource sa1111_resources[] = {
DEFINE_RES_MEM(0x40000000, SZ_8K),
{ .flags = IORESOURCE_IRQ },
};
struct platform_device_info sa1111_devinfo = {
.parent = &dev->dev,
.name = "sa1111",
.id = 0,
.res = sa1111_resources,
.num_res = ARRAY_SIZE(sa1111_resources),
.data = &sa1111_info,
.size_data = sizeof(sa1111_info),
.dma_mask = 0xffffffffUL,
};
struct resource smc91x_resources[] = {
DEFINE_RES_MEM_NAMED(SA1100_CS3_PHYS,
0x02000000, "smc91x-regs"),
DEFINE_RES_MEM_NAMED(SA1100_CS3_PHYS + 0x02000000,
0x02000000, "smc91x-attrib"),
{ .flags = IORESOURCE_IRQ },
};
struct smc91x_platdata smc91x_platdata = {
.flags = SMC91X_USE_8BIT | SMC91X_IO_SHIFT_2 | SMC91X_NOWAIT,
};
struct platform_device_info smc91x_devinfo = {
.parent = &dev->dev,
.name = "smc91x",
.id = 0,
.res = smc91x_resources,
.num_res = ARRAY_SIZE(smc91x_resources),
.data = &smc91x_platdata,
.size_data = sizeof(smc91x_platdata),
};
int ret, irq;
if (nep)
return -EBUSY;
irq = ret = platform_get_irq(dev, 0);
if (ret < 0)
goto err_alloc;
nep_res = platform_get_resource(dev, IORESOURCE_MEM, 0);
smc91x_res = platform_get_resource(dev, IORESOURCE_MEM, 1);
sa1111_res = platform_get_resource(dev, IORESOURCE_MEM, 2);
if (!nep_res || !smc91x_res || !sa1111_res) {
ret = -ENXIO;
goto err_alloc;
}
d = kzalloc(sizeof(*d), GFP_KERNEL);
if (!d) {
ret = -ENOMEM;
goto err_alloc;
}
d->base = ioremap(nep_res->start, SZ_4K);
if (!d->base) {
ret = -ENOMEM;
goto err_ioremap;
}
if (readb_relaxed(d->base + WHOAMI) != 0x11) {
dev_warn(&dev->dev, "Neponset board detected, but wrong ID: %02x\n",
readb_relaxed(d->base + WHOAMI));
ret = -ENODEV;
goto err_id;
}
ret = irq_alloc_descs(-1, IRQ_BOARD_START, NEP_IRQ_NR, -1);
if (ret <= 0) {
dev_err(&dev->dev, "unable to allocate %u irqs: %d\n",
NEP_IRQ_NR, ret);
if (ret == 0)
ret = -ENOMEM;
goto err_irq_alloc;
}
d->irq_base = ret;
irq_set_chip_and_handler(d->irq_base + NEP_IRQ_SMC91X, &nochip,
handle_simple_irq);
irq_clear_status_flags(d->irq_base + NEP_IRQ_SMC91X, IRQ_NOREQUEST | IRQ_NOPROBE);
irq_set_chip_and_handler(d->irq_base + NEP_IRQ_USAR, &nochip,
handle_simple_irq);
irq_clear_status_flags(d->irq_base + NEP_IRQ_USAR, IRQ_NOREQUEST | IRQ_NOPROBE);
irq_set_chip(d->irq_base + NEP_IRQ_SA1111, &nochip);
irq_set_irq_type(irq, IRQ_TYPE_EDGE_RISING);
irq_set_chained_handler_and_data(irq, neponset_irq_handler, d);
/* Disable GPIO 0/1 drivers so the buttons work on the Assabet */
writeb_relaxed(NCR_GP01_OFF, d->base + NCR_0);
neponset_init_gpio(&d->gpio[0], &dev->dev, "neponset-ncr",
d->base + NCR_0, NCR_NGPIO, false,
neponset_ncr_names);
neponset_init_gpio(&d->gpio[1], &dev->dev, "neponset-mdm-ctl0",
d->base + MDM_CTL_0, MDM_CTL0_NGPIO, false,
neponset_mdmctl0_names);
neponset_init_gpio(&d->gpio[2], &dev->dev, "neponset-mdm-ctl1",
d->base + MDM_CTL_1, MDM_CTL1_NGPIO, true,
neponset_mdmctl1_names);
neponset_init_gpio(&d->gpio[3], &dev->dev, "neponset-aud-ctl",
d->base + AUD_CTL, AUD_NGPIO, false,
neponset_aud_names);
gpiod_add_lookup_table(&neponset_uart1_gpio_table);
gpiod_add_lookup_table(&neponset_uart3_gpio_table);
gpiod_add_lookup_table(&neponset_pcmcia_table);
/*
* We would set IRQ_GPIO25 to be a wake-up IRQ, but unfortunately
* something on the Neponset activates this IRQ on sleep (eth?)
*/
#if 0
enable_irq_wake(irq);
#endif
dev_info(&dev->dev, "Neponset daughter board, providing IRQ%u-%u\n",
d->irq_base, d->irq_base + NEP_IRQ_NR - 1);
nep = d;
/* Ensure that the memory bus request/grant signals are setup */
sa1110_mb_disable();
sa1111_resources[0].parent = sa1111_res;
sa1111_resources[1].start = d->irq_base + NEP_IRQ_SA1111;
sa1111_resources[1].end = d->irq_base + NEP_IRQ_SA1111;
d->sa1111 = platform_device_register_full(&sa1111_devinfo);
smc91x_resources[0].parent = smc91x_res;
smc91x_resources[1].parent = smc91x_res;
smc91x_resources[2].start = d->irq_base + NEP_IRQ_SMC91X;
smc91x_resources[2].end = d->irq_base + NEP_IRQ_SMC91X;
d->smc91x = platform_device_register_full(&smc91x_devinfo);
platform_set_drvdata(dev, d);
return 0;
err_irq_alloc:
err_id:
iounmap(d->base);
err_ioremap:
kfree(d);
err_alloc:
return ret;
}
static void neponset_remove(struct platform_device *dev)
{
struct neponset_drvdata *d = platform_get_drvdata(dev);
int irq = platform_get_irq(dev, 0);
if (!IS_ERR(d->sa1111))
platform_device_unregister(d->sa1111);
if (!IS_ERR(d->smc91x))
platform_device_unregister(d->smc91x);
gpiod_remove_lookup_table(&neponset_pcmcia_table);
gpiod_remove_lookup_table(&neponset_uart3_gpio_table);
gpiod_remove_lookup_table(&neponset_uart1_gpio_table);
irq_set_chained_handler(irq, NULL);
irq_free_descs(d->irq_base, NEP_IRQ_NR);
nep = NULL;
iounmap(d->base);
kfree(d);
}
#ifdef CONFIG_PM_SLEEP
static int neponset_resume(struct device *dev)
{
struct neponset_drvdata *d = dev_get_drvdata(dev);
int i, ret = 0;
for (i = 0; i < ARRAY_SIZE(d->gpio); i++) {
ret = gpio_reg_resume(d->gpio[i]);
if (ret)
break;
}
return ret;
}
static const struct dev_pm_ops neponset_pm_ops = {
.resume_noirq = neponset_resume,
.restore_noirq = neponset_resume,
};
#define PM_OPS &neponset_pm_ops
#else
#define PM_OPS NULL
#endif
static struct platform_driver neponset_device_driver = {
.probe = neponset_probe,
.remove_new = neponset_remove,
.driver = {
.name = "neponset",
.pm = PM_OPS,
},
};
static int __init neponset_init(void)
{
return platform_driver_register(&neponset_device_driver);
}
subsys_initcall(neponset_init);
| linux-master | arch/arm/mach-sa1100/neponset.c |
// SPDX-License-Identifier: GPL-2.0
/*
* linux/arch/arm/mach-sa1100/clock.c
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/clk-provider.h>
#include <linux/io.h>
#include <linux/spinlock.h>
#include <mach/hardware.h>
#include <mach/generic.h>
static const char * const clk_tucr_parents[] = {
"clk32768", "clk3686400",
};
static DEFINE_SPINLOCK(tucr_lock);
static int clk_gpio27_enable(struct clk_hw *hw)
{
unsigned long flags;
/*
* First, set up the 3.6864MHz clock on GPIO 27 for the SA-1111:
* (SA-1110 Developer's Manual, section 9.1.2.1)
*/
local_irq_save(flags);
GAFR |= GPIO_32_768kHz;
GPDR |= GPIO_32_768kHz;
local_irq_restore(flags);
return 0;
}
static void clk_gpio27_disable(struct clk_hw *hw)
{
unsigned long flags;
local_irq_save(flags);
GPDR &= ~GPIO_32_768kHz;
GAFR &= ~GPIO_32_768kHz;
local_irq_restore(flags);
}
static const struct clk_ops clk_gpio27_ops = {
.enable = clk_gpio27_enable,
.disable = clk_gpio27_disable,
};
static const char * const clk_gpio27_parents[] = {
"tucr-mux",
};
static const struct clk_init_data clk_gpio27_init_data __initconst = {
.name = "gpio27",
.ops = &clk_gpio27_ops,
.parent_names = clk_gpio27_parents,
.num_parents = ARRAY_SIZE(clk_gpio27_parents),
};
/*
* Derived from the table 8-1 in the SA1110 manual, the MPLL appears to
* multiply its input rate by 4 x (4 + PPCR). This calculation gives
* the exact rate. The figures given in the table are the rates rounded
* to 100kHz. Stick with sa11x0_getspeed() for the time being.
*/
static unsigned long clk_mpll_recalc_rate(struct clk_hw *hw,
unsigned long prate)
{
return sa11x0_getspeed(0) * 1000;
}
static const struct clk_ops clk_mpll_ops = {
.recalc_rate = clk_mpll_recalc_rate,
};
static const char * const clk_mpll_parents[] = {
"clk3686400",
};
static const struct clk_init_data clk_mpll_init_data __initconst = {
.name = "mpll",
.ops = &clk_mpll_ops,
.parent_names = clk_mpll_parents,
.num_parents = ARRAY_SIZE(clk_mpll_parents),
.flags = CLK_GET_RATE_NOCACHE | CLK_IS_CRITICAL,
};
int __init sa11xx_clk_init(void)
{
struct clk_hw *hw;
int ret;
hw = clk_hw_register_fixed_rate(NULL, "clk32768", NULL, 0, 32768);
if (IS_ERR(hw))
return PTR_ERR(hw);
clk_hw_register_clkdev(hw, NULL, "sa1100-rtc");
hw = clk_hw_register_fixed_rate(NULL, "clk3686400", NULL, 0, 3686400);
if (IS_ERR(hw))
return PTR_ERR(hw);
clk_hw_register_clkdev(hw, "OSTIMER0", NULL);
hw = kzalloc(sizeof(*hw), GFP_KERNEL);
if (!hw)
return -ENOMEM;
hw->init = &clk_mpll_init_data;
ret = clk_hw_register(NULL, hw);
if (ret) {
kfree(hw);
return ret;
}
clk_hw_register_clkdev(hw, NULL, "sa11x0-fb");
clk_hw_register_clkdev(hw, NULL, "sa11x0-pcmcia");
clk_hw_register_clkdev(hw, NULL, "sa11x0-pcmcia.0");
clk_hw_register_clkdev(hw, NULL, "sa11x0-pcmcia.1");
clk_hw_register_clkdev(hw, NULL, "1800");
hw = clk_hw_register_mux(NULL, "tucr-mux", clk_tucr_parents,
ARRAY_SIZE(clk_tucr_parents), 0,
(void __iomem *)&TUCR, FShft(TUCR_TSEL),
FAlnMsk(TUCR_TSEL), 0, &tucr_lock);
clk_set_rate(hw->clk, 3686400);
hw = kzalloc(sizeof(*hw), GFP_KERNEL);
if (!hw)
return -ENOMEM;
hw->init = &clk_gpio27_init_data;
ret = clk_hw_register(NULL, hw);
if (ret) {
kfree(hw);
return ret;
}
clk_hw_register_clkdev(hw, NULL, "sa1111.0");
return 0;
}
| linux-master | arch/arm/mach-sa1100/clock.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* arch/arm/mac-sa1100/jornada720_ssp.c
*
* Copyright (C) 2006/2007 Kristoffer Ericson <[email protected]>
* Copyright (C) 2006 Filip Zyzniewski <[email protected]>
*
* SSP driver for the HP Jornada 710/720/728
*/
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/io.h>
#include <mach/hardware.h>
#include <mach/jornada720.h>
#include <asm/hardware/ssp.h>
static DEFINE_SPINLOCK(jornada_ssp_lock);
static unsigned long jornada_ssp_flags;
/**
* jornada_ssp_reverse - reverses input byte
* @byte: input byte to reverse
*
* we need to reverse all data we receive from the mcu due to its physical location
* returns : 01110111 -> 11101110
*/
inline u8 jornada_ssp_reverse(u8 byte)
{
return
((0x80 & byte) >> 7) |
((0x40 & byte) >> 5) |
((0x20 & byte) >> 3) |
((0x10 & byte) >> 1) |
((0x08 & byte) << 1) |
((0x04 & byte) << 3) |
((0x02 & byte) << 5) |
((0x01 & byte) << 7);
};
EXPORT_SYMBOL(jornada_ssp_reverse);
/**
* jornada_ssp_byte - waits for ready ssp bus and sends byte
* @byte: input byte to transmit
*
* waits for fifo buffer to clear and then transmits, if it doesn't then we will
* timeout after <timeout> rounds. Needs mcu running before its called.
*
* returns : %mcu output on success
* : %-ETIMEDOUT on timeout
*/
int jornada_ssp_byte(u8 byte)
{
int timeout = 400000;
u16 ret;
while ((GPLR & GPIO_GPIO10)) {
if (!--timeout) {
printk(KERN_WARNING "SSP: timeout while waiting for transmit\n");
return -ETIMEDOUT;
}
cpu_relax();
}
ret = jornada_ssp_reverse(byte) << 8;
ssp_write_word(ret);
ssp_read_word(&ret);
return jornada_ssp_reverse(ret);
};
EXPORT_SYMBOL(jornada_ssp_byte);
/**
* jornada_ssp_inout - decide if input is command or trading byte
* @byte: input byte to send (may be %TXDUMMY)
*
* returns : (jornada_ssp_byte(byte)) on success
* : %-ETIMEDOUT on timeout failure
*/
int jornada_ssp_inout(u8 byte)
{
int ret, i;
/* true means command byte */
if (byte != TXDUMMY) {
ret = jornada_ssp_byte(byte);
/* Proper return to commands is TxDummy */
if (ret != TXDUMMY) {
for (i = 0; i < 256; i++)/* flushing bus */
if (jornada_ssp_byte(TXDUMMY) == -1)
break;
return -ETIMEDOUT;
}
} else /* Exchange TxDummy for data */
ret = jornada_ssp_byte(TXDUMMY);
return ret;
};
EXPORT_SYMBOL(jornada_ssp_inout);
/**
* jornada_ssp_start - enable mcu
*
*/
void jornada_ssp_start(void)
{
spin_lock_irqsave(&jornada_ssp_lock, jornada_ssp_flags);
GPCR = GPIO_GPIO25;
udelay(50);
return;
};
EXPORT_SYMBOL(jornada_ssp_start);
/**
* jornada_ssp_end - disable mcu and turn off lock
*
*/
void jornada_ssp_end(void)
{
GPSR = GPIO_GPIO25;
spin_unlock_irqrestore(&jornada_ssp_lock, jornada_ssp_flags);
return;
};
EXPORT_SYMBOL(jornada_ssp_end);
static int jornada_ssp_probe(struct platform_device *dev)
{
int ret;
GPSR = GPIO_GPIO25;
ret = ssp_init();
/* worked fine, lets not bother with anything else */
if (!ret) {
printk(KERN_INFO "SSP: device initialized with irq\n");
return ret;
}
printk(KERN_WARNING "SSP: initialization failed, trying non-irq solution \n");
/* init of Serial 4 port */
Ser4MCCR0 = 0;
Ser4SSCR0 = 0x0387;
Ser4SSCR1 = 0x18;
/* clear out any left over data */
ssp_flush();
/* enable MCU */
jornada_ssp_start();
/* see if return value makes sense */
ret = jornada_ssp_inout(GETBRIGHTNESS);
/* seems like it worked, just feed it with TxDummy to get rid of data */
if (ret == TXDUMMY)
jornada_ssp_inout(TXDUMMY);
jornada_ssp_end();
/* failed, lets just kill everything */
if (ret == -ETIMEDOUT) {
printk(KERN_WARNING "SSP: attempts failed, bailing\n");
ssp_exit();
return -ENODEV;
}
/* all fine */
printk(KERN_INFO "SSP: device initialized\n");
return 0;
};
static void jornada_ssp_remove(struct platform_device *dev)
{
/* Note that this doesn't actually remove the driver, since theres nothing to remove
* It just makes sure everything is turned off */
GPSR = GPIO_GPIO25;
ssp_exit();
};
struct platform_driver jornadassp_driver = {
.probe = jornada_ssp_probe,
.remove_new = jornada_ssp_remove,
.driver = {
.name = "jornada_ssp",
},
};
static int __init jornada_ssp_init(void)
{
return platform_driver_register(&jornadassp_driver);
}
module_init(jornada_ssp_init);
| linux-master | arch/arm/mach-sa1100/jornada720_ssp.c |
/*
* SA1100 Power Management Routines
*
* Copyright (c) 2001 Cliff Brake <[email protected]>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License.
*
* History:
*
* 2001-02-06: Cliff Brake Initial code
*
* 2001-02-25: Sukjae Cho <[email protected]> &
* Chester Kuo <[email protected]>
* Save more value for the resume function! Support
* Bitsy/Assabet/Freebird board
*
* 2001-08-29: Nicolas Pitre <[email protected]>
* Cleaned up, pushed platform dependent stuff
* in the platform specific files.
*
* 2002-05-27: Nicolas Pitre Killed sleep.h and the kmalloced save array.
* Storage is local on the stack now.
*/
#include <linux/init.h>
#include <linux/io.h>
#include <linux/suspend.h>
#include <linux/errno.h>
#include <linux/time.h>
#include <mach/hardware.h>
#include <asm/page.h>
#include <asm/suspend.h>
#include <asm/mach/time.h>
#include "generic.h"
extern int sa1100_finish_suspend(unsigned long);
#define SAVE(x) sleep_save[SLEEP_SAVE_##x] = x
#define RESTORE(x) x = sleep_save[SLEEP_SAVE_##x]
/*
* List of global SA11x0 peripheral registers to preserve.
* More ones like CP and general purpose register values are preserved
* on the stack and then the stack pointer is stored last in sleep.S.
*/
enum { SLEEP_SAVE_GPDR, SLEEP_SAVE_GAFR,
SLEEP_SAVE_PPDR, SLEEP_SAVE_PPSR, SLEEP_SAVE_PPAR, SLEEP_SAVE_PSDR,
SLEEP_SAVE_Ser1SDCR0,
SLEEP_SAVE_COUNT
};
static int sa11x0_pm_enter(suspend_state_t state)
{
unsigned long gpio, sleep_save[SLEEP_SAVE_COUNT];
gpio = GPLR;
/* save vital registers */
SAVE(GPDR);
SAVE(GAFR);
SAVE(PPDR);
SAVE(PPSR);
SAVE(PPAR);
SAVE(PSDR);
SAVE(Ser1SDCR0);
/* Clear previous reset status */
RCSR = RCSR_HWR | RCSR_SWR | RCSR_WDR | RCSR_SMR;
/* set resume return address */
PSPR = __pa_symbol(cpu_resume);
/* go zzz */
cpu_suspend(0, sa1100_finish_suspend);
/*
* Ensure not to come back here if it wasn't intended
*/
RCSR = RCSR_SMR;
PSPR = 0;
/*
* Ensure interrupt sources are disabled; we will re-init
* the interrupt subsystem via the device manager.
*/
ICLR = 0;
ICCR = 1;
ICMR = 0;
/* restore registers */
RESTORE(GPDR);
RESTORE(GAFR);
RESTORE(PPDR);
RESTORE(PPSR);
RESTORE(PPAR);
RESTORE(PSDR);
RESTORE(Ser1SDCR0);
GPSR = gpio;
GPCR = ~gpio;
/*
* Clear the peripheral sleep-hold bit.
*/
PSSR = PSSR_PH;
return 0;
}
static const struct platform_suspend_ops sa11x0_pm_ops = {
.enter = sa11x0_pm_enter,
.valid = suspend_valid_only_mem,
};
int __init sa11x0_pm_init(void)
{
suspend_set_ops(&sa11x0_pm_ops);
return 0;
}
| linux-master | arch/arm/mach-sa1100/pm.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/arch/arm/mach-sa1100/generic.c
*
* Author: Nicolas Pitre
*
* Code common to all SA11x0 machines.
*/
#include <linux/gpio.h>
#include <linux/gpio/machine.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/pm.h>
#include <linux/cpufreq.h>
#include <linux/ioport.h>
#include <linux/platform_device.h>
#include <linux/reboot.h>
#include <linux/regulator/fixed.h>
#include <linux/regulator/machine.h>
#include <linux/irqchip/irq-sa11x0.h>
#include <video/sa1100fb.h>
#include <soc/sa1100/pwer.h>
#include <asm/div64.h>
#include <asm/mach/map.h>
#include <asm/mach/flash.h>
#include <asm/irq.h>
#include <asm/system_misc.h>
#include <mach/hardware.h>
#include <mach/irqs.h>
#include <mach/reset.h>
#include "generic.h"
#include <clocksource/pxa.h>
#define NR_FREQS 16
/*
* This table is setup for a 3.6864MHz Crystal.
*/
struct cpufreq_frequency_table sa11x0_freq_table[NR_FREQS+1] = {
{ .frequency = 59000, /* 59.0 MHz */},
{ .frequency = 73700, /* 73.7 MHz */},
{ .frequency = 88500, /* 88.5 MHz */},
{ .frequency = 103200, /* 103.2 MHz */},
{ .frequency = 118000, /* 118.0 MHz */},
{ .frequency = 132700, /* 132.7 MHz */},
{ .frequency = 147500, /* 147.5 MHz */},
{ .frequency = 162200, /* 162.2 MHz */},
{ .frequency = 176900, /* 176.9 MHz */},
{ .frequency = 191700, /* 191.7 MHz */},
{ .frequency = 206400, /* 206.4 MHz */},
{ .frequency = 221200, /* 221.2 MHz */},
{ .frequency = 235900, /* 235.9 MHz */},
{ .frequency = 250700, /* 250.7 MHz */},
{ .frequency = 265400, /* 265.4 MHz */},
{ .frequency = 280200, /* 280.2 MHz */},
{ .frequency = CPUFREQ_TABLE_END, },
};
unsigned int sa11x0_getspeed(unsigned int cpu)
{
if (cpu)
return 0;
return sa11x0_freq_table[PPCR & 0xf].frequency;
}
/*
* Default power-off for SA1100
*/
static void sa1100_power_off(void)
{
mdelay(100);
local_irq_disable();
/* disable internal oscillator, float CS lines */
PCFR = (PCFR_OPDE | PCFR_FP | PCFR_FS);
/* enable wake-up on GPIO0 (Assabet...) */
PWER = GFER = GRER = 1;
/*
* set scratchpad to zero, just in case it is used as a
* restart address by the bootloader.
*/
PSPR = 0;
/* enter sleep mode */
PMCR = PMCR_SF;
}
void sa11x0_restart(enum reboot_mode mode, const char *cmd)
{
clear_reset_status(RESET_STATUS_ALL);
if (mode == REBOOT_SOFT) {
/* Jump into ROM at address 0 */
soft_restart(0);
} else {
/* Use on-chip reset capability */
RSRR = RSRR_SWR;
}
}
static void sa11x0_register_device(struct platform_device *dev, void *data)
{
int err;
dev->dev.platform_data = data;
err = platform_device_register(dev);
if (err)
printk(KERN_ERR "Unable to register device %s: %d\n",
dev->name, err);
}
static struct resource sa11x0udc_resources[] = {
[0] = DEFINE_RES_MEM(__PREG(Ser0UDCCR), SZ_64K),
[1] = DEFINE_RES_IRQ(IRQ_Ser0UDC),
};
static u64 sa11x0udc_dma_mask = 0xffffffffUL;
static struct platform_device sa11x0udc_device = {
.name = "sa11x0-udc",
.id = -1,
.dev = {
.dma_mask = &sa11x0udc_dma_mask,
.coherent_dma_mask = 0xffffffff,
},
.num_resources = ARRAY_SIZE(sa11x0udc_resources),
.resource = sa11x0udc_resources,
};
static struct resource sa11x0uart1_resources[] = {
[0] = DEFINE_RES_MEM(__PREG(Ser1UTCR0), SZ_64K),
[1] = DEFINE_RES_IRQ(IRQ_Ser1UART),
};
static struct platform_device sa11x0uart1_device = {
.name = "sa11x0-uart",
.id = 1,
.num_resources = ARRAY_SIZE(sa11x0uart1_resources),
.resource = sa11x0uart1_resources,
};
static struct resource sa11x0uart3_resources[] = {
[0] = DEFINE_RES_MEM(__PREG(Ser3UTCR0), SZ_64K),
[1] = DEFINE_RES_IRQ(IRQ_Ser3UART),
};
static struct platform_device sa11x0uart3_device = {
.name = "sa11x0-uart",
.id = 3,
.num_resources = ARRAY_SIZE(sa11x0uart3_resources),
.resource = sa11x0uart3_resources,
};
static struct resource sa11x0mcp_resources[] = {
[0] = DEFINE_RES_MEM(__PREG(Ser4MCCR0), SZ_64K),
[1] = DEFINE_RES_MEM(__PREG(Ser4MCCR1), 4),
[2] = DEFINE_RES_IRQ(IRQ_Ser4MCP),
};
static u64 sa11x0mcp_dma_mask = 0xffffffffUL;
static struct platform_device sa11x0mcp_device = {
.name = "sa11x0-mcp",
.id = -1,
.dev = {
.dma_mask = &sa11x0mcp_dma_mask,
.coherent_dma_mask = 0xffffffff,
},
.num_resources = ARRAY_SIZE(sa11x0mcp_resources),
.resource = sa11x0mcp_resources,
};
void __init sa11x0_ppc_configure_mcp(void)
{
/* Setup the PPC unit for the MCP */
PPDR &= ~PPC_RXD4;
PPDR |= PPC_TXD4 | PPC_SCLK | PPC_SFRM;
PSDR |= PPC_RXD4;
PSDR &= ~(PPC_TXD4 | PPC_SCLK | PPC_SFRM);
PPSR &= ~(PPC_TXD4 | PPC_SCLK | PPC_SFRM);
}
void sa11x0_register_mcp(struct mcp_plat_data *data)
{
sa11x0_register_device(&sa11x0mcp_device, data);
}
static struct resource sa11x0ssp_resources[] = {
[0] = DEFINE_RES_MEM(0x80070000, SZ_64K),
[1] = DEFINE_RES_IRQ(IRQ_Ser4SSP),
};
static u64 sa11x0ssp_dma_mask = 0xffffffffUL;
static struct platform_device sa11x0ssp_device = {
.name = "sa11x0-ssp",
.id = -1,
.dev = {
.dma_mask = &sa11x0ssp_dma_mask,
.coherent_dma_mask = 0xffffffff,
},
.num_resources = ARRAY_SIZE(sa11x0ssp_resources),
.resource = sa11x0ssp_resources,
};
static struct resource sa11x0fb_resources[] = {
[0] = DEFINE_RES_MEM(0xb0100000, SZ_64K),
[1] = DEFINE_RES_IRQ(IRQ_LCD),
};
static struct platform_device sa11x0fb_device = {
.name = "sa11x0-fb",
.id = -1,
.dev = {
.coherent_dma_mask = 0xffffffff,
},
.num_resources = ARRAY_SIZE(sa11x0fb_resources),
.resource = sa11x0fb_resources,
};
void sa11x0_register_lcd(struct sa1100fb_mach_info *inf)
{
sa11x0_register_device(&sa11x0fb_device, inf);
}
void sa11x0_register_pcmcia(int socket, struct gpiod_lookup_table *table)
{
if (table)
gpiod_add_lookup_table(table);
platform_device_register_simple("sa11x0-pcmcia", socket, NULL, 0);
}
static struct platform_device sa11x0mtd_device = {
.name = "sa1100-mtd",
.id = -1,
};
void sa11x0_register_mtd(struct flash_platform_data *flash,
struct resource *res, int nr)
{
flash->name = "sa1100";
sa11x0mtd_device.resource = res;
sa11x0mtd_device.num_resources = nr;
sa11x0_register_device(&sa11x0mtd_device, flash);
}
static struct resource sa1100_rtc_resources[] = {
DEFINE_RES_MEM(0x90010000, 0x40),
DEFINE_RES_IRQ_NAMED(IRQ_RTC1Hz, "rtc 1Hz"),
DEFINE_RES_IRQ_NAMED(IRQ_RTCAlrm, "rtc alarm"),
};
static struct platform_device sa11x0rtc_device = {
.name = "sa1100-rtc",
.id = -1,
.num_resources = ARRAY_SIZE(sa1100_rtc_resources),
.resource = sa1100_rtc_resources,
};
static struct resource sa11x0dma_resources[] = {
DEFINE_RES_MEM(DMA_PHYS, DMA_SIZE),
DEFINE_RES_IRQ(IRQ_DMA0),
DEFINE_RES_IRQ(IRQ_DMA1),
DEFINE_RES_IRQ(IRQ_DMA2),
DEFINE_RES_IRQ(IRQ_DMA3),
DEFINE_RES_IRQ(IRQ_DMA4),
DEFINE_RES_IRQ(IRQ_DMA5),
};
static u64 sa11x0dma_dma_mask = DMA_BIT_MASK(32);
static struct platform_device sa11x0dma_device = {
.name = "sa11x0-dma",
.id = -1,
.dev = {
.dma_mask = &sa11x0dma_dma_mask,
.coherent_dma_mask = 0xffffffff,
},
.num_resources = ARRAY_SIZE(sa11x0dma_resources),
.resource = sa11x0dma_resources,
};
static struct platform_device *sa11x0_devices[] __initdata = {
&sa11x0udc_device,
&sa11x0uart1_device,
&sa11x0uart3_device,
&sa11x0ssp_device,
&sa11x0rtc_device,
&sa11x0dma_device,
};
static int __init sa1100_init(void)
{
struct resource wdt_res = DEFINE_RES_MEM(0x90000000, 0x20);
pm_power_off = sa1100_power_off;
regulator_has_full_constraints();
platform_device_register_simple("sa1100_wdt", -1, &wdt_res, 1);
return platform_add_devices(sa11x0_devices, ARRAY_SIZE(sa11x0_devices));
}
arch_initcall(sa1100_init);
void __init sa11x0_init_late(void)
{
sa11x0_pm_init();
}
int __init sa11x0_register_fixed_regulator(int n,
struct fixed_voltage_config *cfg,
struct regulator_consumer_supply *supplies, unsigned num_supplies,
bool uses_gpio)
{
struct regulator_init_data *id;
cfg->init_data = id = kzalloc(sizeof(*cfg->init_data), GFP_KERNEL);
if (!cfg->init_data)
return -ENOMEM;
if (!uses_gpio)
id->constraints.always_on = 1;
id->constraints.name = cfg->supply_name;
id->constraints.min_uV = cfg->microvolts;
id->constraints.max_uV = cfg->microvolts;
id->constraints.valid_modes_mask = REGULATOR_MODE_NORMAL;
id->constraints.valid_ops_mask = REGULATOR_CHANGE_STATUS;
id->consumer_supplies = supplies;
id->num_consumer_supplies = num_supplies;
platform_device_register_resndata(NULL, "reg-fixed-voltage", n,
NULL, 0, cfg, sizeof(*cfg));
return 0;
}
/*
* Common I/O mapping:
*
* Typically, static virtual address mappings are as follow:
*
* 0xf0000000-0xf3ffffff: miscellaneous stuff (CPLDs, etc.)
* 0xf4000000-0xf4ffffff: SA-1111
* 0xf5000000-0xf5ffffff: reserved (used by cache flushing area)
* 0xf6000000-0xfffeffff: reserved (internal SA1100 IO defined above)
* 0xffff0000-0xffff0fff: SA1100 exception vectors
* 0xffff2000-0xffff2fff: Minicache copy_user_page area
*
* Below 0xe8000000 is reserved for vm allocation.
*
* The machine specific code must provide the extra mapping beside the
* default mapping provided here.
*/
static struct map_desc standard_io_desc[] __initdata = {
{ /* PCM */
.virtual = 0xf8000000,
.pfn = __phys_to_pfn(0x80000000),
.length = 0x00100000,
.type = MT_DEVICE
}, { /* SCM */
.virtual = 0xfa000000,
.pfn = __phys_to_pfn(0x90000000),
.length = 0x00100000,
.type = MT_DEVICE
}, { /* MER */
.virtual = 0xfc000000,
.pfn = __phys_to_pfn(0xa0000000),
.length = 0x00100000,
.type = MT_DEVICE
}, { /* LCD + DMA */
.virtual = 0xfe000000,
.pfn = __phys_to_pfn(0xb0000000),
.length = 0x00200000,
.type = MT_DEVICE
},
};
void __init sa1100_map_io(void)
{
iotable_init(standard_io_desc, ARRAY_SIZE(standard_io_desc));
}
void __init sa1100_timer_init(void)
{
pxa_timer_nodt_init(IRQ_OST0, io_p2v(0x90000000));
}
static struct resource irq_resource =
DEFINE_RES_MEM_NAMED(0x90050000, SZ_64K, "irqs");
void __init sa1100_init_irq(void)
{
request_resource(&iomem_resource, &irq_resource);
sa11x0_init_irq_nodt(IRQ_GPIO0_SC, irq_resource.start);
sa1100_init_gpio();
sa11xx_clk_init();
}
/*
* Disable the memory bus request/grant signals on the SA1110 to
* ensure that we don't receive spurious memory requests. We set
* the MBGNT signal false to ensure the SA1111 doesn't own the
* SDRAM bus.
*/
void sa1110_mb_disable(void)
{
unsigned long flags;
local_irq_save(flags);
PGSR &= ~GPIO_MBGNT;
GPCR = GPIO_MBGNT;
GPDR = (GPDR & ~GPIO_MBREQ) | GPIO_MBGNT;
GAFR &= ~(GPIO_MBGNT | GPIO_MBREQ);
local_irq_restore(flags);
}
/*
* If the system is going to use the SA-1111 DMA engines, set up
* the memory bus request/grant pins.
*/
void sa1110_mb_enable(void)
{
unsigned long flags;
local_irq_save(flags);
PGSR &= ~GPIO_MBGNT;
GPCR = GPIO_MBGNT;
GPDR = (GPDR & ~GPIO_MBREQ) | GPIO_MBGNT;
GAFR |= (GPIO_MBGNT | GPIO_MBREQ);
TUCR |= TUCR_MR;
local_irq_restore(flags);
}
int sa11x0_gpio_set_wake(unsigned int gpio, unsigned int on)
{
if (on)
PWER |= BIT(gpio);
else
PWER &= ~BIT(gpio);
return 0;
}
int sa11x0_sc_set_wake(unsigned int irq, unsigned int on)
{
if (BIT(irq) != IC_RTCAlrm)
return -EINVAL;
if (on)
PWER |= PWER_RTC;
else
PWER &= ~PWER_RTC;
return 0;
}
| linux-master | arch/arm/mach-sa1100/generic.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/arch/arm/mach-sa1100/assabet.c
*
* Author: Nicolas Pitre
*
* This file contains all Assabet-specific tweaks.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/gpio/driver.h>
#include <linux/gpio/gpio-reg.h>
#include <linux/gpio/machine.h>
#include <linux/gpio_keys.h>
#include <linux/ioport.h>
#include <linux/platform_data/sa11x0-serial.h>
#include <linux/regulator/fixed.h>
#include <linux/regulator/machine.h>
#include <linux/serial_core.h>
#include <linux/platform_device.h>
#include <linux/mfd/ucb1x00.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/delay.h>
#include <linux/mm.h>
#include <linux/leds.h>
#include <linux/slab.h>
#include <video/sa1100fb.h>
#include <mach/hardware.h>
#include <asm/mach-types.h>
#include <asm/setup.h>
#include <asm/page.h>
#include <asm/pgtable-hwdef.h>
#include <asm/tlbflush.h>
#include <asm/mach/arch.h>
#include <asm/mach/flash.h>
#include <asm/mach/map.h>
#include <mach/assabet.h>
#include <linux/platform_data/mfd-mcp-sa11x0.h>
#include <mach/irqs.h>
#include "generic.h"
#define ASSABET_BCR_DB1110 \
(ASSABET_BCR_SPK_OFF | \
ASSABET_BCR_LED_GREEN | ASSABET_BCR_LED_RED | \
ASSABET_BCR_RS232EN | ASSABET_BCR_LCD_12RGB | \
ASSABET_BCR_IRDA_MD0)
#define ASSABET_BCR_DB1111 \
(ASSABET_BCR_SPK_OFF | \
ASSABET_BCR_LED_GREEN | ASSABET_BCR_LED_RED | \
ASSABET_BCR_RS232EN | ASSABET_BCR_LCD_12RGB | \
ASSABET_BCR_CF_BUS_OFF | ASSABET_BCR_STEREO_LB | \
ASSABET_BCR_IRDA_MD0 | ASSABET_BCR_CF_RST)
unsigned long SCR_value = ASSABET_SCR_INIT;
EXPORT_SYMBOL(SCR_value);
static struct gpio_chip *assabet_bcr_gc;
static const char *assabet_names[] = {
"cf_pwr", "cf_gfx_reset", "nsoft_reset", "irda_fsel",
"irda_md0", "irda_md1", "stereo_loopback", "ncf_bus_on",
"audio_pwr_on", "light_pwr_on", "lcd16data", "lcd_pwr_on",
"rs232_on", "nred_led", "ngreen_led", "vib_on",
"com_dtr", "com_rts", "radio_wake_mod", "i2c_enab",
"tvir_enab", "qmute", "radio_pwr_on", "spkr_off",
"rs232_valid", "com_dcd", "com_cts", "com_dsr",
"radio_cts", "radio_dsr", "radio_dcd", "radio_ri",
};
/* The old deprecated interface */
void ASSABET_BCR_frob(unsigned int mask, unsigned int val)
{
unsigned long m = mask, v = val;
assabet_bcr_gc->set_multiple(assabet_bcr_gc, &m, &v);
}
EXPORT_SYMBOL(ASSABET_BCR_frob);
static void __init assabet_init_gpio(void __iomem *reg, u32 def_val)
{
struct gpio_chip *gc;
writel_relaxed(def_val, reg);
gc = gpio_reg_init(NULL, reg, -1, 32, "assabet", 0xff000000, def_val,
assabet_names, NULL, NULL);
if (IS_ERR(gc))
return;
assabet_bcr_gc = gc;
}
/*
* The codec reset goes to three devices, so we need to release
* the rest when any one of these requests it. However, that
* causes the ADV7171 to consume around 100mA - more than half
* the LCD-blanked power.
*
* With the ADV7171, LCD and backlight enabled, we go over
* budget on the MAX846 Li-Ion charger, and if no Li-Ion battery
* is connected, the Assabet crashes.
*/
#define RST_UCB1X00 (1 << 0)
#define RST_UDA1341 (1 << 1)
#define RST_ADV7171 (1 << 2)
#define SDA GPIO_GPIO(15)
#define SCK GPIO_GPIO(18)
#define MOD GPIO_GPIO(17)
static void adv7171_start(void)
{
GPSR = SCK;
udelay(1);
GPSR = SDA;
udelay(2);
GPCR = SDA;
}
static void adv7171_stop(void)
{
GPSR = SCK;
udelay(2);
GPSR = SDA;
udelay(1);
}
static void adv7171_send(unsigned byte)
{
unsigned i;
for (i = 0; i < 8; i++, byte <<= 1) {
GPCR = SCK;
udelay(1);
if (byte & 0x80)
GPSR = SDA;
else
GPCR = SDA;
udelay(1);
GPSR = SCK;
udelay(1);
}
GPCR = SCK;
udelay(1);
GPSR = SDA;
udelay(1);
GPDR &= ~SDA;
GPSR = SCK;
udelay(1);
if (GPLR & SDA)
printk(KERN_WARNING "No ACK from ADV7171\n");
udelay(1);
GPCR = SCK | SDA;
udelay(1);
GPDR |= SDA;
udelay(1);
}
static void adv7171_write(unsigned reg, unsigned val)
{
unsigned gpdr = GPDR;
unsigned gplr = GPLR;
ASSABET_BCR_frob(ASSABET_BCR_AUDIO_ON, ASSABET_BCR_AUDIO_ON);
udelay(100);
GPCR = SDA | SCK | MOD; /* clear L3 mode to ensure UDA1341 doesn't respond */
GPDR = (GPDR | SCK | MOD) & ~SDA;
udelay(10);
if (!(GPLR & SDA))
printk(KERN_WARNING "Something dragging SDA down?\n");
GPDR |= SDA;
adv7171_start();
adv7171_send(0x54);
adv7171_send(reg);
adv7171_send(val);
adv7171_stop();
/* Restore GPIO state for L3 bus */
GPSR = gplr & (SDA | SCK | MOD);
GPCR = (~gplr) & (SDA | SCK | MOD);
GPDR = gpdr;
}
static void adv7171_sleep(void)
{
/* Put the ADV7171 into sleep mode */
adv7171_write(0x04, 0x40);
}
static unsigned codec_nreset;
static void assabet_codec_reset(unsigned mask, int set)
{
unsigned long flags;
bool old;
local_irq_save(flags);
old = !codec_nreset;
if (set)
codec_nreset &= ~mask;
else
codec_nreset |= mask;
if (old != !codec_nreset) {
if (codec_nreset) {
ASSABET_BCR_set(ASSABET_BCR_NCODEC_RST);
adv7171_sleep();
} else {
ASSABET_BCR_clear(ASSABET_BCR_NCODEC_RST);
}
}
local_irq_restore(flags);
}
static void assabet_ucb1x00_reset(enum ucb1x00_reset state)
{
int set = state == UCB_RST_REMOVE || state == UCB_RST_SUSPEND ||
state == UCB_RST_PROBE_FAIL;
assabet_codec_reset(RST_UCB1X00, set);
}
void assabet_uda1341_reset(int set)
{
assabet_codec_reset(RST_UDA1341, set);
}
EXPORT_SYMBOL(assabet_uda1341_reset);
/*
* Assabet flash support code.
*/
#ifdef ASSABET_REV_4
/*
* Phase 4 Assabet has two 28F160B3 flash parts in bank 0:
*/
static struct mtd_partition assabet_partitions[] = {
{
.name = "bootloader",
.size = 0x00020000,
.offset = 0,
.mask_flags = MTD_WRITEABLE,
}, {
.name = "bootloader params",
.size = 0x00020000,
.offset = MTDPART_OFS_APPEND,
.mask_flags = MTD_WRITEABLE,
}, {
.name = "jffs",
.size = MTDPART_SIZ_FULL,
.offset = MTDPART_OFS_APPEND,
}
};
#else
/*
* Phase 5 Assabet has two 28F128J3A flash parts in bank 0:
*/
static struct mtd_partition assabet_partitions[] = {
{
.name = "bootloader",
.size = 0x00040000,
.offset = 0,
.mask_flags = MTD_WRITEABLE,
}, {
.name = "bootloader params",
.size = 0x00040000,
.offset = MTDPART_OFS_APPEND,
.mask_flags = MTD_WRITEABLE,
}, {
.name = "jffs",
.size = MTDPART_SIZ_FULL,
.offset = MTDPART_OFS_APPEND,
}
};
#endif
static struct flash_platform_data assabet_flash_data = {
.map_name = "cfi_probe",
.parts = assabet_partitions,
.nr_parts = ARRAY_SIZE(assabet_partitions),
};
static struct resource assabet_flash_resources[] = {
DEFINE_RES_MEM(SA1100_CS0_PHYS, SZ_32M),
DEFINE_RES_MEM(SA1100_CS1_PHYS, SZ_32M),
};
static struct ucb1x00_plat_data assabet_ucb1x00_data = {
.reset = assabet_ucb1x00_reset,
.gpio_base = -1,
.can_wakeup = 1,
};
static struct mcp_plat_data assabet_mcp_data = {
.mccr0 = MCCR0_ADM,
.sclk_rate = 11981000,
.codec_pdata = &assabet_ucb1x00_data,
};
static void assabet_lcd_set_visual(u32 visual)
{
u_int is_true_color = visual == FB_VISUAL_TRUECOLOR;
if (machine_is_assabet()) {
#if 1 // phase 4 or newer Assabet's
if (is_true_color)
ASSABET_BCR_set(ASSABET_BCR_LCD_12RGB);
else
ASSABET_BCR_clear(ASSABET_BCR_LCD_12RGB);
#else
// older Assabet's
if (is_true_color)
ASSABET_BCR_clear(ASSABET_BCR_LCD_12RGB);
else
ASSABET_BCR_set(ASSABET_BCR_LCD_12RGB);
#endif
}
}
#ifndef ASSABET_PAL_VIDEO
static void assabet_lcd_backlight_power(int on)
{
if (on)
ASSABET_BCR_set(ASSABET_BCR_LIGHT_ON);
else
ASSABET_BCR_clear(ASSABET_BCR_LIGHT_ON);
}
/*
* Turn on/off the backlight. When turning the backlight on, we wait
* 500us after turning it on so we don't cause the supplies to droop
* when we enable the LCD controller (and cause a hard reset.)
*/
static void assabet_lcd_power(int on)
{
if (on) {
ASSABET_BCR_set(ASSABET_BCR_LCD_ON);
udelay(500);
} else
ASSABET_BCR_clear(ASSABET_BCR_LCD_ON);
}
/*
* The assabet uses a sharp LQ039Q2DS54 LCD module. It is actually
* takes an RGB666 signal, but we provide it with an RGB565 signal
* instead (def_rgb_16).
*/
static struct sa1100fb_mach_info lq039q2ds54_info = {
.pixclock = 171521, .bpp = 16,
.xres = 320, .yres = 240,
.hsync_len = 5, .vsync_len = 1,
.left_margin = 61, .upper_margin = 3,
.right_margin = 9, .lower_margin = 0,
.sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
.lccr0 = LCCR0_Color | LCCR0_Sngl | LCCR0_Act,
.lccr3 = LCCR3_OutEnH | LCCR3_PixRsEdg | LCCR3_ACBsDiv(2),
.backlight_power = assabet_lcd_backlight_power,
.lcd_power = assabet_lcd_power,
.set_visual = assabet_lcd_set_visual,
};
#else
static void assabet_pal_backlight_power(int on)
{
ASSABET_BCR_clear(ASSABET_BCR_LIGHT_ON);
}
static void assabet_pal_power(int on)
{
ASSABET_BCR_clear(ASSABET_BCR_LCD_ON);
}
static struct sa1100fb_mach_info pal_info = {
.pixclock = 67797, .bpp = 16,
.xres = 640, .yres = 512,
.hsync_len = 64, .vsync_len = 6,
.left_margin = 125, .upper_margin = 70,
.right_margin = 115, .lower_margin = 36,
.lccr0 = LCCR0_Color | LCCR0_Sngl | LCCR0_Act,
.lccr3 = LCCR3_OutEnH | LCCR3_PixRsEdg | LCCR3_ACBsDiv(512),
.backlight_power = assabet_pal_backlight_power,
.lcd_power = assabet_pal_power,
.set_visual = assabet_lcd_set_visual,
};
#endif
#ifdef CONFIG_ASSABET_NEPONSET
static struct resource neponset_resources[] = {
DEFINE_RES_MEM(0x10000000, 0x08000000),
DEFINE_RES_MEM(0x18000000, 0x04000000),
DEFINE_RES_MEM(0x40000000, SZ_8K),
DEFINE_RES_IRQ(IRQ_GPIO25),
};
#endif
static struct gpiod_lookup_table assabet_cf_gpio_table = {
.dev_id = "sa11x0-pcmcia.1",
.table = {
GPIO_LOOKUP("gpio", 21, "ready", GPIO_ACTIVE_HIGH),
GPIO_LOOKUP("gpio", 22, "detect", GPIO_ACTIVE_LOW),
GPIO_LOOKUP("gpio", 24, "bvd2", GPIO_ACTIVE_HIGH),
GPIO_LOOKUP("gpio", 25, "bvd1", GPIO_ACTIVE_HIGH),
GPIO_LOOKUP("assabet", 1, "reset", GPIO_ACTIVE_HIGH),
GPIO_LOOKUP("assabet", 7, "bus-enable", GPIO_ACTIVE_LOW),
{ },
},
};
static struct regulator_consumer_supply assabet_cf_vcc_consumers[] = {
REGULATOR_SUPPLY("vcc", "sa11x0-pcmcia.1"),
};
static struct fixed_voltage_config assabet_cf_vcc_pdata __initdata = {
.supply_name = "cf-power",
.microvolts = 3300000,
};
static struct gpiod_lookup_table assabet_cf_vcc_gpio_table = {
.dev_id = "reg-fixed-voltage.0",
.table = {
GPIO_LOOKUP("assabet", 0, NULL, GPIO_ACTIVE_HIGH),
{ },
},
};
static struct gpiod_lookup_table assabet_leds_gpio_table = {
.dev_id = "leds-gpio",
.table = {
GPIO_LOOKUP("assabet", 13, NULL, GPIO_ACTIVE_LOW),
GPIO_LOOKUP("assabet", 14, NULL, GPIO_ACTIVE_LOW),
{ },
},
};
static struct gpio_led assabet_leds[] __initdata = {
{
.name = "assabet:red",
.default_trigger = "cpu0",
.default_state = LEDS_GPIO_DEFSTATE_KEEP,
}, {
.name = "assabet:green",
.default_trigger = "heartbeat",
.default_state = LEDS_GPIO_DEFSTATE_KEEP,
},
};
static const struct gpio_led_platform_data assabet_leds_pdata __initconst = {
.num_leds = ARRAY_SIZE(assabet_leds),
.leds = assabet_leds,
};
static struct gpio_keys_button assabet_keys_buttons[] = {
{
.gpio = 0,
.irq = IRQ_GPIO0,
.desc = "gpio0",
.wakeup = 1,
.can_disable = 1,
.debounce_interval = 5,
}, {
.gpio = 1,
.irq = IRQ_GPIO1,
.desc = "gpio1",
.wakeup = 1,
.can_disable = 1,
.debounce_interval = 5,
},
};
static const struct gpio_keys_platform_data assabet_keys_pdata = {
.buttons = assabet_keys_buttons,
.nbuttons = ARRAY_SIZE(assabet_keys_buttons),
.rep = 0,
};
static struct gpiod_lookup_table assabet_uart1_gpio_table = {
.dev_id = "sa11x0-uart.1",
.table = {
GPIO_LOOKUP("assabet", 16, "dtr", GPIO_ACTIVE_LOW),
GPIO_LOOKUP("assabet", 17, "rts", GPIO_ACTIVE_LOW),
GPIO_LOOKUP("assabet", 25, "dcd", GPIO_ACTIVE_LOW),
GPIO_LOOKUP("assabet", 26, "cts", GPIO_ACTIVE_LOW),
GPIO_LOOKUP("assabet", 27, "dsr", GPIO_ACTIVE_LOW),
{ },
},
};
static struct gpiod_lookup_table assabet_uart3_gpio_table = {
.dev_id = "sa11x0-uart.3",
.table = {
GPIO_LOOKUP("assabet", 28, "cts", GPIO_ACTIVE_LOW),
GPIO_LOOKUP("assabet", 29, "dsr", GPIO_ACTIVE_LOW),
GPIO_LOOKUP("assabet", 30, "dcd", GPIO_ACTIVE_LOW),
GPIO_LOOKUP("assabet", 31, "rng", GPIO_ACTIVE_LOW),
{ },
},
};
static void __init assabet_init(void)
{
/*
* Ensure that the power supply is in "high power" mode.
*/
GPSR = GPIO_GPIO16;
GPDR |= GPIO_GPIO16;
/*
* Ensure that these pins are set as outputs and are driving
* logic 0. This ensures that we won't inadvertently toggle
* the WS latch in the CPLD, and we don't float causing
* excessive power drain. --rmk
*/
GPCR = GPIO_SSP_TXD | GPIO_SSP_SCLK | GPIO_SSP_SFRM;
GPDR |= GPIO_SSP_TXD | GPIO_SSP_SCLK | GPIO_SSP_SFRM;
/*
* Also set GPIO27 as an output; this is used to clock UART3
* via the FPGA and as otherwise has no pullups or pulldowns,
* so stop it floating.
*/
GPCR = GPIO_GPIO27;
GPDR |= GPIO_GPIO27;
/*
* Set up registers for sleep mode.
*/
PWER = PWER_GPIO0;
PGSR = 0;
PCFR = 0;
PSDR = 0;
PPDR |= PPC_TXD3 | PPC_TXD1;
PPSR |= PPC_TXD3 | PPC_TXD1;
sa11x0_ppc_configure_mcp();
if (machine_has_neponset()) {
#ifndef CONFIG_ASSABET_NEPONSET
printk( "Warning: Neponset detected but full support "
"hasn't been configured in the kernel\n" );
#else
platform_device_register_simple("neponset", 0,
neponset_resources, ARRAY_SIZE(neponset_resources));
#endif
} else {
gpiod_add_lookup_table(&assabet_uart1_gpio_table);
gpiod_add_lookup_table(&assabet_uart3_gpio_table);
gpiod_add_lookup_table(&assabet_cf_vcc_gpio_table);
sa11x0_register_fixed_regulator(0, &assabet_cf_vcc_pdata,
assabet_cf_vcc_consumers,
ARRAY_SIZE(assabet_cf_vcc_consumers),
true);
}
platform_device_register_resndata(NULL, "gpio-keys", 0,
NULL, 0,
&assabet_keys_pdata,
sizeof(assabet_keys_pdata));
gpiod_add_lookup_table(&assabet_leds_gpio_table);
gpio_led_register_device(-1, &assabet_leds_pdata);
#ifndef ASSABET_PAL_VIDEO
sa11x0_register_lcd(&lq039q2ds54_info);
#else
sa11x0_register_lcd(&pal_video);
#endif
sa11x0_register_mtd(&assabet_flash_data, assabet_flash_resources,
ARRAY_SIZE(assabet_flash_resources));
sa11x0_register_mcp(&assabet_mcp_data);
if (!machine_has_neponset())
sa11x0_register_pcmcia(1, &assabet_cf_gpio_table);
}
/*
* On Assabet, we must probe for the Neponset board _before_
* paging_init() has occurred to actually determine the amount
* of RAM available. To do so, we map the appropriate IO section
* in the page table here in order to access GPIO registers.
*/
static void __init map_sa1100_gpio_regs( void )
{
unsigned long phys = __PREG(GPLR) & PMD_MASK;
unsigned long virt = (unsigned long)io_p2v(phys);
int prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_DOMAIN(DOMAIN_IO);
pmd_t *pmd;
pmd = pmd_off_k(virt);
*pmd = __pmd(phys | prot);
flush_pmd_entry(pmd);
}
/*
* Read System Configuration "Register"
* (taken from "Intel StrongARM SA-1110 Microprocessor Development Board
* User's Guide", section 4.4.1)
*
* This same scan is performed in arch/arm/boot/compressed/head-sa1100.S
* to set up the serial port for decompression status messages. We
* repeat it here because the kernel may not be loaded as a zImage, and
* also because it's a hassle to communicate the SCR value to the kernel
* from the decompressor.
*
* Note that IRQs are guaranteed to be disabled.
*/
static void __init get_assabet_scr(void)
{
unsigned long scr, i;
GPDR |= 0x3fc; /* Configure GPIO 9:2 as outputs */
GPSR = 0x3fc; /* Write 0xFF to GPIO 9:2 */
GPDR &= ~(0x3fc); /* Configure GPIO 9:2 as inputs */
for(i = 100; i--; ) /* Read GPIO 9:2 */
scr = GPLR;
GPDR |= 0x3fc; /* restore correct pin direction */
scr &= 0x3fc; /* save as system configuration byte. */
SCR_value = scr;
}
static void __init
fixup_assabet(struct tag *tags, char **cmdline)
{
/* This must be done before any call to machine_has_neponset() */
map_sa1100_gpio_regs();
get_assabet_scr();
if (machine_has_neponset())
printk("Neponset expansion board detected\n");
}
static void assabet_uart_pm(struct uart_port *port, u_int state, u_int oldstate)
{
if (port->mapbase == _Ser1UTCR0) {
if (state)
ASSABET_BCR_clear(ASSABET_BCR_RS232EN);
else
ASSABET_BCR_set(ASSABET_BCR_RS232EN);
}
}
static struct sa1100_port_fns assabet_port_fns __initdata = {
.pm = assabet_uart_pm,
};
static struct map_desc assabet_io_desc[] __initdata = {
{ /* Board Control Register */
.virtual = 0xf1000000,
.pfn = __phys_to_pfn(0x12000000),
.length = 0x00100000,
.type = MT_DEVICE
}, { /* MQ200 */
.virtual = 0xf2800000,
.pfn = __phys_to_pfn(0x4b800000),
.length = 0x00800000,
.type = MT_DEVICE
}
};
static void __init assabet_map_io(void)
{
sa1100_map_io();
iotable_init(assabet_io_desc, ARRAY_SIZE(assabet_io_desc));
/*
* Set SUS bit in SDCR0 so serial port 1 functions.
* Its called GPCLKR0 in my SA1110 manual.
*/
Ser1SDCR0 |= SDCR0_SUS;
MSC1 = (MSC1 & ~0xffff) |
MSC_NonBrst | MSC_32BitStMem |
MSC_RdAcc(2) | MSC_WrAcc(2) | MSC_Rec(0);
if (!machine_has_neponset())
sa1100_register_uart_fns(&assabet_port_fns);
/*
* When Neponset is attached, the first UART should be
* UART3. That's what Angel is doing and many documents
* are stating this.
*
* We do the Neponset mapping even if Neponset support
* isn't compiled in so the user will still get something on
* the expected physical serial port.
*
* We no longer do this; not all boot loaders support it,
* and UART3 appears to be somewhat unreliable with blob.
*/
sa1100_register_uart(0, 1);
sa1100_register_uart(2, 3);
}
static void __init assabet_init_irq(void)
{
u32 def_val;
sa1100_init_irq();
if (machine_has_neponset())
def_val = ASSABET_BCR_DB1111;
else
def_val = ASSABET_BCR_DB1110;
/*
* Angel sets this, but other bootloaders may not.
*
* This must precede any driver calls to BCR_set() or BCR_clear().
*/
assabet_init_gpio((void *)&ASSABET_BCR, def_val);
}
MACHINE_START(ASSABET, "Intel-Assabet")
.atag_offset = 0x100,
.fixup = fixup_assabet,
.map_io = assabet_map_io,
.nr_irqs = SA1100_NR_IRQS,
.init_irq = assabet_init_irq,
.init_time = sa1100_timer_init,
.init_machine = assabet_init,
.init_late = sa11x0_init_late,
#ifdef CONFIG_SA1111
.dma_zone_size = SZ_1M,
#endif
.restart = sa11x0_restart,
MACHINE_END
| linux-master | arch/arm/mach-sa1100/assabet.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/arch/arm/lib/xor-neon.c
*
* Copyright (C) 2013 Linaro Ltd <[email protected]>
*/
#include <linux/raid/xor.h>
#include <linux/module.h>
MODULE_LICENSE("GPL");
#ifndef __ARM_NEON__
#error You should compile this file with '-march=armv7-a -mfloat-abi=softfp -mfpu=neon'
#endif
/*
* Pull in the reference implementations while instructing GCC (through
* -ftree-vectorize) to attempt to exploit implicit parallelism and emit
* NEON instructions. Clang does this by default at O2 so no pragma is
* needed.
*/
#ifdef CONFIG_CC_IS_GCC
#pragma GCC optimize "tree-vectorize"
#endif
#pragma GCC diagnostic ignored "-Wunused-variable"
#include <asm-generic/xor.h>
struct xor_block_template const xor_block_neon_inner = {
.name = "__inner_neon__",
.do_2 = xor_8regs_2,
.do_3 = xor_8regs_3,
.do_4 = xor_8regs_4,
.do_5 = xor_8regs_5,
};
EXPORT_SYMBOL(xor_block_neon_inner);
| linux-master | arch/arm/lib/xor-neon.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Delay loops based on the OpenRISC implementation.
*
* Copyright (C) 2012 ARM Limited
*
* Author: Will Deacon <[email protected]>
*/
#include <linux/clocksource.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/timex.h>
/*
* Default to the loop-based delay implementation.
*/
struct arm_delay_ops arm_delay_ops __ro_after_init = {
.delay = __loop_delay,
.const_udelay = __loop_const_udelay,
.udelay = __loop_udelay,
};
static const struct delay_timer *delay_timer;
static bool delay_calibrated;
static u64 delay_res;
int read_current_timer(unsigned long *timer_val)
{
if (!delay_timer)
return -ENXIO;
*timer_val = delay_timer->read_current_timer();
return 0;
}
EXPORT_SYMBOL_GPL(read_current_timer);
static inline u64 cyc_to_ns(u64 cyc, u32 mult, u32 shift)
{
return (cyc * mult) >> shift;
}
static void __timer_delay(unsigned long cycles)
{
cycles_t start = get_cycles();
while ((get_cycles() - start) < cycles)
cpu_relax();
}
static void __timer_const_udelay(unsigned long xloops)
{
unsigned long long loops = xloops;
loops *= arm_delay_ops.ticks_per_jiffy;
__timer_delay(loops >> UDELAY_SHIFT);
}
static void __timer_udelay(unsigned long usecs)
{
__timer_const_udelay(usecs * UDELAY_MULT);
}
void __init register_current_timer_delay(const struct delay_timer *timer)
{
u32 new_mult, new_shift;
u64 res;
clocks_calc_mult_shift(&new_mult, &new_shift, timer->freq,
NSEC_PER_SEC, 3600);
res = cyc_to_ns(1ULL, new_mult, new_shift);
if (res > 1000) {
pr_err("Ignoring delay timer %ps, which has insufficient resolution of %lluns\n",
timer, res);
return;
}
if (!delay_calibrated && (!delay_res || (res < delay_res))) {
pr_info("Switching to timer-based delay loop, resolution %lluns\n", res);
delay_timer = timer;
lpj_fine = timer->freq / HZ;
delay_res = res;
/* cpufreq may scale loops_per_jiffy, so keep a private copy */
arm_delay_ops.ticks_per_jiffy = lpj_fine;
arm_delay_ops.delay = __timer_delay;
arm_delay_ops.const_udelay = __timer_const_udelay;
arm_delay_ops.udelay = __timer_udelay;
} else {
pr_info("Ignoring duplicate/late registration of read_current_timer delay\n");
}
}
unsigned long calibrate_delay_is_known(void)
{
delay_calibrated = true;
return lpj_fine;
}
void calibration_delay_done(void)
{
delay_calibrated = true;
}
| linux-master | arch/arm/lib/delay.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/arch/arm/lib/uaccess_with_memcpy.c
*
* Written by: Lennert Buytenhek and Nicolas Pitre
* Copyright (C) 2009 Marvell Semiconductor
*/
#include <linux/kernel.h>
#include <linux/ctype.h>
#include <linux/uaccess.h>
#include <linux/rwsem.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/hardirq.h> /* for in_atomic() */
#include <linux/gfp.h>
#include <linux/highmem.h>
#include <linux/hugetlb.h>
#include <asm/current.h>
#include <asm/page.h>
static int
pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp)
{
unsigned long addr = (unsigned long)_addr;
pgd_t *pgd;
p4d_t *p4d;
pmd_t *pmd;
pte_t *pte;
pud_t *pud;
spinlock_t *ptl;
pgd = pgd_offset(current->mm, addr);
if (unlikely(pgd_none(*pgd) || pgd_bad(*pgd)))
return 0;
p4d = p4d_offset(pgd, addr);
if (unlikely(p4d_none(*p4d) || p4d_bad(*p4d)))
return 0;
pud = pud_offset(p4d, addr);
if (unlikely(pud_none(*pud) || pud_bad(*pud)))
return 0;
pmd = pmd_offset(pud, addr);
if (unlikely(pmd_none(*pmd)))
return 0;
/*
* A pmd can be bad if it refers to a HugeTLB or THP page.
*
* Both THP and HugeTLB pages have the same pmd layout
* and should not be manipulated by the pte functions.
*
* Lock the page table for the destination and check
* to see that it's still huge and whether or not we will
* need to fault on write.
*/
if (unlikely(pmd_thp_or_huge(*pmd))) {
ptl = ¤t->mm->page_table_lock;
spin_lock(ptl);
if (unlikely(!pmd_thp_or_huge(*pmd)
|| pmd_hugewillfault(*pmd))) {
spin_unlock(ptl);
return 0;
}
*ptep = NULL;
*ptlp = ptl;
return 1;
}
if (unlikely(pmd_bad(*pmd)))
return 0;
pte = pte_offset_map_lock(current->mm, pmd, addr, &ptl);
if (unlikely(!pte))
return 0;
if (unlikely(!pte_present(*pte) || !pte_young(*pte) ||
!pte_write(*pte) || !pte_dirty(*pte))) {
pte_unmap_unlock(pte, ptl);
return 0;
}
*ptep = pte;
*ptlp = ptl;
return 1;
}
static unsigned long noinline
__copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
{
unsigned long ua_flags;
int atomic;
/* the mmap semaphore is taken only if not in an atomic context */
atomic = faulthandler_disabled();
if (!atomic)
mmap_read_lock(current->mm);
while (n) {
pte_t *pte;
spinlock_t *ptl;
int tocopy;
while (!pin_page_for_write(to, &pte, &ptl)) {
if (!atomic)
mmap_read_unlock(current->mm);
if (__put_user(0, (char __user *)to))
goto out;
if (!atomic)
mmap_read_lock(current->mm);
}
tocopy = (~(unsigned long)to & ~PAGE_MASK) + 1;
if (tocopy > n)
tocopy = n;
ua_flags = uaccess_save_and_enable();
__memcpy((void *)to, from, tocopy);
uaccess_restore(ua_flags);
to += tocopy;
from += tocopy;
n -= tocopy;
if (pte)
pte_unmap_unlock(pte, ptl);
else
spin_unlock(ptl);
}
if (!atomic)
mmap_read_unlock(current->mm);
out:
return n;
}
unsigned long
arm_copy_to_user(void __user *to, const void *from, unsigned long n)
{
/*
* This test is stubbed out of the main function above to keep
* the overhead for small copies low by avoiding a large
* register dump on the stack just to reload them right away.
* With frame pointer disabled, tail call optimization kicks in
* as well making this test almost invisible.
*/
if (n < 64) {
unsigned long ua_flags = uaccess_save_and_enable();
n = __copy_to_user_std(to, from, n);
uaccess_restore(ua_flags);
} else {
n = __copy_to_user_memcpy(uaccess_mask_range_ptr(to, n),
from, n);
}
return n;
}
static unsigned long noinline
__clear_user_memset(void __user *addr, unsigned long n)
{
unsigned long ua_flags;
mmap_read_lock(current->mm);
while (n) {
pte_t *pte;
spinlock_t *ptl;
int tocopy;
while (!pin_page_for_write(addr, &pte, &ptl)) {
mmap_read_unlock(current->mm);
if (__put_user(0, (char __user *)addr))
goto out;
mmap_read_lock(current->mm);
}
tocopy = (~(unsigned long)addr & ~PAGE_MASK) + 1;
if (tocopy > n)
tocopy = n;
ua_flags = uaccess_save_and_enable();
__memset((void *)addr, 0, tocopy);
uaccess_restore(ua_flags);
addr += tocopy;
n -= tocopy;
if (pte)
pte_unmap_unlock(pte, ptl);
else
spin_unlock(ptl);
}
mmap_read_unlock(current->mm);
out:
return n;
}
unsigned long arm_clear_user(void __user *addr, unsigned long n)
{
/* See rational for this in __copy_to_user() above. */
if (n < 64) {
unsigned long ua_flags = uaccess_save_and_enable();
n = __clear_user_std(addr, n);
uaccess_restore(ua_flags);
} else {
n = __clear_user_memset(addr, n);
}
return n;
}
#if 0
/*
* This code is disabled by default, but kept around in case the chosen
* thresholds need to be revalidated. Some overhead (small but still)
* would be implied by a runtime determined variable threshold, and
* so far the measurement on concerned targets didn't show a worthwhile
* variation.
*
* Note that a fairly precise sched_clock() implementation is needed
* for results to make some sense.
*/
#include <linux/vmalloc.h>
static int __init test_size_treshold(void)
{
struct page *src_page, *dst_page;
void *user_ptr, *kernel_ptr;
unsigned long long t0, t1, t2;
int size, ret;
ret = -ENOMEM;
src_page = alloc_page(GFP_KERNEL);
if (!src_page)
goto no_src;
dst_page = alloc_page(GFP_KERNEL);
if (!dst_page)
goto no_dst;
kernel_ptr = page_address(src_page);
user_ptr = vmap(&dst_page, 1, VM_IOREMAP, __pgprot(__PAGE_COPY));
if (!user_ptr)
goto no_vmap;
/* warm up the src page dcache */
ret = __copy_to_user_memcpy(user_ptr, kernel_ptr, PAGE_SIZE);
for (size = PAGE_SIZE; size >= 4; size /= 2) {
t0 = sched_clock();
ret |= __copy_to_user_memcpy(user_ptr, kernel_ptr, size);
t1 = sched_clock();
ret |= __copy_to_user_std(user_ptr, kernel_ptr, size);
t2 = sched_clock();
printk("copy_to_user: %d %llu %llu\n", size, t1 - t0, t2 - t1);
}
for (size = PAGE_SIZE; size >= 4; size /= 2) {
t0 = sched_clock();
ret |= __clear_user_memset(user_ptr, size);
t1 = sched_clock();
ret |= __clear_user_std(user_ptr, size);
t2 = sched_clock();
printk("clear_user: %d %llu %llu\n", size, t1 - t0, t2 - t1);
}
if (ret)
ret = -EFAULT;
vunmap(user_ptr);
no_vmap:
put_page(dst_page);
no_dst:
put_page(src_page);
no_src:
return ret;
}
subsys_initcall(test_size_treshold);
#endif
| linux-master | arch/arm/lib/uaccess_with_memcpy.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/error-injection.h>
#include <linux/kprobes.h>
void override_function_with_return(struct pt_regs *regs)
{
instruction_pointer_set(regs, regs->ARM_lr);
}
NOKPROBE_SYMBOL(override_function_with_return);
| linux-master | arch/arm/lib/error-inject.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/arch/arm/mm/pgd.c
*
* Copyright (C) 1998-2005 Russell King
*/
#include <linux/mm.h>
#include <linux/gfp.h>
#include <linux/highmem.h>
#include <linux/slab.h>
#include <asm/cp15.h>
#include <asm/pgalloc.h>
#include <asm/page.h>
#include <asm/tlbflush.h>
#include "mm.h"
#ifdef CONFIG_ARM_LPAE
#define __pgd_alloc() kmalloc_array(PTRS_PER_PGD, sizeof(pgd_t), GFP_KERNEL)
#define __pgd_free(pgd) kfree(pgd)
#else
#define __pgd_alloc() (pgd_t *)__get_free_pages(GFP_KERNEL, 2)
#define __pgd_free(pgd) free_pages((unsigned long)pgd, 2)
#endif
/*
* need to get a 16k page for level 1
*/
pgd_t *pgd_alloc(struct mm_struct *mm)
{
pgd_t *new_pgd, *init_pgd;
p4d_t *new_p4d, *init_p4d;
pud_t *new_pud, *init_pud;
pmd_t *new_pmd, *init_pmd;
pte_t *new_pte, *init_pte;
new_pgd = __pgd_alloc();
if (!new_pgd)
goto no_pgd;
memset(new_pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
/*
* Copy over the kernel and IO PGD entries
*/
init_pgd = pgd_offset_k(0);
memcpy(new_pgd + USER_PTRS_PER_PGD, init_pgd + USER_PTRS_PER_PGD,
(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t));
#ifdef CONFIG_ARM_LPAE
/*
* Allocate PMD table for modules and pkmap mappings.
*/
new_p4d = p4d_alloc(mm, new_pgd + pgd_index(MODULES_VADDR),
MODULES_VADDR);
if (!new_p4d)
goto no_p4d;
new_pud = pud_alloc(mm, new_p4d, MODULES_VADDR);
if (!new_pud)
goto no_pud;
new_pmd = pmd_alloc(mm, new_pud, 0);
if (!new_pmd)
goto no_pmd;
#ifdef CONFIG_KASAN
/*
* Copy PMD table for KASAN shadow mappings.
*/
init_pgd = pgd_offset_k(TASK_SIZE);
init_p4d = p4d_offset(init_pgd, TASK_SIZE);
init_pud = pud_offset(init_p4d, TASK_SIZE);
init_pmd = pmd_offset(init_pud, TASK_SIZE);
new_pmd = pmd_offset(new_pud, TASK_SIZE);
memcpy(new_pmd, init_pmd,
(pmd_index(MODULES_VADDR) - pmd_index(TASK_SIZE))
* sizeof(pmd_t));
clean_dcache_area(new_pmd, PTRS_PER_PMD * sizeof(pmd_t));
#endif /* CONFIG_KASAN */
#endif /* CONFIG_LPAE */
if (!vectors_high()) {
/*
* On ARM, first page must always be allocated since it
* contains the machine vectors. The vectors are always high
* with LPAE.
*/
new_p4d = p4d_alloc(mm, new_pgd, 0);
if (!new_p4d)
goto no_p4d;
new_pud = pud_alloc(mm, new_p4d, 0);
if (!new_pud)
goto no_pud;
new_pmd = pmd_alloc(mm, new_pud, 0);
if (!new_pmd)
goto no_pmd;
new_pte = pte_alloc_map(mm, new_pmd, 0);
if (!new_pte)
goto no_pte;
#ifndef CONFIG_ARM_LPAE
/*
* Modify the PTE pointer to have the correct domain. This
* needs to be the vectors domain to avoid the low vectors
* being unmapped.
*/
pmd_val(*new_pmd) &= ~PMD_DOMAIN_MASK;
pmd_val(*new_pmd) |= PMD_DOMAIN(DOMAIN_VECTORS);
#endif
init_p4d = p4d_offset(init_pgd, 0);
init_pud = pud_offset(init_p4d, 0);
init_pmd = pmd_offset(init_pud, 0);
init_pte = pte_offset_map(init_pmd, 0);
set_pte_ext(new_pte + 0, init_pte[0], 0);
set_pte_ext(new_pte + 1, init_pte[1], 0);
pte_unmap(init_pte);
pte_unmap(new_pte);
}
return new_pgd;
no_pte:
pmd_free(mm, new_pmd);
mm_dec_nr_pmds(mm);
no_pmd:
pud_free(mm, new_pud);
no_pud:
p4d_free(mm, new_p4d);
no_p4d:
__pgd_free(new_pgd);
no_pgd:
return NULL;
}
void pgd_free(struct mm_struct *mm, pgd_t *pgd_base)
{
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pgtable_t pte;
if (!pgd_base)
return;
pgd = pgd_base + pgd_index(0);
if (pgd_none_or_clear_bad(pgd))
goto no_pgd;
p4d = p4d_offset(pgd, 0);
if (p4d_none_or_clear_bad(p4d))
goto no_p4d;
pud = pud_offset(p4d, 0);
if (pud_none_or_clear_bad(pud))
goto no_pud;
pmd = pmd_offset(pud, 0);
if (pmd_none_or_clear_bad(pmd))
goto no_pmd;
pte = pmd_pgtable(*pmd);
pmd_clear(pmd);
pte_free(mm, pte);
mm_dec_nr_ptes(mm);
no_pmd:
pud_clear(pud);
pmd_free(mm, pmd);
mm_dec_nr_pmds(mm);
no_pud:
p4d_clear(p4d);
pud_free(mm, pud);
no_p4d:
pgd_clear(pgd);
p4d_free(mm, p4d);
no_pgd:
#ifdef CONFIG_ARM_LPAE
/*
* Free modules/pkmap or identity pmd tables.
*/
for (pgd = pgd_base; pgd < pgd_base + PTRS_PER_PGD; pgd++) {
if (pgd_none_or_clear_bad(pgd))
continue;
if (pgd_val(*pgd) & L_PGD_SWAPPER)
continue;
p4d = p4d_offset(pgd, 0);
if (p4d_none_or_clear_bad(p4d))
continue;
pud = pud_offset(p4d, 0);
if (pud_none_or_clear_bad(pud))
continue;
pmd = pmd_offset(pud, 0);
pud_clear(pud);
pmd_free(mm, pmd);
mm_dec_nr_pmds(mm);
p4d_clear(p4d);
pud_free(mm, pud);
mm_dec_nr_puds(mm);
pgd_clear(pgd);
p4d_free(mm, p4d);
}
#endif
__pgd_free(pgd_base);
}
| linux-master | arch/arm/mm/pgd.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2014, The Linux Foundation. All rights reserved.
*/
#include <linux/mm.h>
#include <linux/module.h>
#include <asm/tlbflush.h>
#include <asm/set_memory.h>
struct page_change_data {
pgprot_t set_mask;
pgprot_t clear_mask;
};
static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
{
struct page_change_data *cdata = data;
pte_t pte = *ptep;
pte = clear_pte_bit(pte, cdata->clear_mask);
pte = set_pte_bit(pte, cdata->set_mask);
set_pte_ext(ptep, pte, 0);
return 0;
}
static bool range_in_range(unsigned long start, unsigned long size,
unsigned long range_start, unsigned long range_end)
{
return start >= range_start && start < range_end &&
size <= range_end - start;
}
/*
* This function assumes that the range is mapped with PAGE_SIZE pages.
*/
static int __change_memory_common(unsigned long start, unsigned long size,
pgprot_t set_mask, pgprot_t clear_mask)
{
struct page_change_data data;
int ret;
data.set_mask = set_mask;
data.clear_mask = clear_mask;
ret = apply_to_page_range(&init_mm, start, size, change_page_range,
&data);
flush_tlb_kernel_range(start, start + size);
return ret;
}
static int change_memory_common(unsigned long addr, int numpages,
pgprot_t set_mask, pgprot_t clear_mask)
{
unsigned long start = addr & PAGE_MASK;
unsigned long end = PAGE_ALIGN(addr) + numpages * PAGE_SIZE;
unsigned long size = end - start;
WARN_ON_ONCE(start != addr);
if (!size)
return 0;
if (!range_in_range(start, size, MODULES_VADDR, MODULES_END) &&
!range_in_range(start, size, VMALLOC_START, VMALLOC_END))
return -EINVAL;
return __change_memory_common(start, size, set_mask, clear_mask);
}
int set_memory_ro(unsigned long addr, int numpages)
{
return change_memory_common(addr, numpages,
__pgprot(L_PTE_RDONLY),
__pgprot(0));
}
int set_memory_rw(unsigned long addr, int numpages)
{
return change_memory_common(addr, numpages,
__pgprot(0),
__pgprot(L_PTE_RDONLY));
}
int set_memory_nx(unsigned long addr, int numpages)
{
return change_memory_common(addr, numpages,
__pgprot(L_PTE_XN),
__pgprot(0));
}
int set_memory_x(unsigned long addr, int numpages)
{
return change_memory_common(addr, numpages,
__pgprot(0),
__pgprot(L_PTE_XN));
}
int set_memory_valid(unsigned long addr, int numpages, int enable)
{
if (enable)
return __change_memory_common(addr, PAGE_SIZE * numpages,
__pgprot(L_PTE_VALID),
__pgprot(0));
else
return __change_memory_common(addr, PAGE_SIZE * numpages,
__pgprot(0),
__pgprot(L_PTE_VALID));
}
| linux-master | arch/arm/mm/pageattr.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* arch/arm/mm/cache-xsc3l2.c - XScale3 L2 cache controller support
*
* Copyright (C) 2007 ARM Limited
*/
#include <linux/init.h>
#include <linux/highmem.h>
#include <asm/cp15.h>
#include <asm/cputype.h>
#include <asm/cacheflush.h>
#define CR_L2 (1 << 26)
#define CACHE_LINE_SIZE 32
#define CACHE_LINE_SHIFT 5
#define CACHE_WAY_PER_SET 8
#define CACHE_WAY_SIZE(l2ctype) (8192 << (((l2ctype) >> 8) & 0xf))
#define CACHE_SET_SIZE(l2ctype) (CACHE_WAY_SIZE(l2ctype) >> CACHE_LINE_SHIFT)
static inline int xsc3_l2_present(void)
{
unsigned long l2ctype;
__asm__("mrc p15, 1, %0, c0, c0, 1" : "=r" (l2ctype));
return !!(l2ctype & 0xf8);
}
static inline void xsc3_l2_clean_mva(unsigned long addr)
{
__asm__("mcr p15, 1, %0, c7, c11, 1" : : "r" (addr));
}
static inline void xsc3_l2_inv_mva(unsigned long addr)
{
__asm__("mcr p15, 1, %0, c7, c7, 1" : : "r" (addr));
}
static inline void xsc3_l2_inv_all(void)
{
unsigned long l2ctype, set_way;
int set, way;
__asm__("mrc p15, 1, %0, c0, c0, 1" : "=r" (l2ctype));
for (set = 0; set < CACHE_SET_SIZE(l2ctype); set++) {
for (way = 0; way < CACHE_WAY_PER_SET; way++) {
set_way = (way << 29) | (set << 5);
__asm__("mcr p15, 1, %0, c7, c11, 2" : : "r"(set_way));
}
}
dsb();
}
static inline void l2_unmap_va(unsigned long va)
{
#ifdef CONFIG_HIGHMEM
if (va != -1)
kunmap_atomic((void *)va);
#endif
}
static inline unsigned long l2_map_va(unsigned long pa, unsigned long prev_va)
{
#ifdef CONFIG_HIGHMEM
unsigned long va = prev_va & PAGE_MASK;
unsigned long pa_offset = pa << (32 - PAGE_SHIFT);
if (unlikely(pa_offset < (prev_va << (32 - PAGE_SHIFT)))) {
/*
* Switching to a new page. Because cache ops are
* using virtual addresses only, we must put a mapping
* in place for it.
*/
l2_unmap_va(prev_va);
va = (unsigned long)kmap_atomic_pfn(pa >> PAGE_SHIFT);
}
return va + (pa_offset >> (32 - PAGE_SHIFT));
#else
return __phys_to_virt(pa);
#endif
}
static void xsc3_l2_inv_range(unsigned long start, unsigned long end)
{
unsigned long vaddr;
if (start == 0 && end == -1ul) {
xsc3_l2_inv_all();
return;
}
vaddr = -1; /* to force the first mapping */
/*
* Clean and invalidate partial first cache line.
*/
if (start & (CACHE_LINE_SIZE - 1)) {
vaddr = l2_map_va(start & ~(CACHE_LINE_SIZE - 1), vaddr);
xsc3_l2_clean_mva(vaddr);
xsc3_l2_inv_mva(vaddr);
start = (start | (CACHE_LINE_SIZE - 1)) + 1;
}
/*
* Invalidate all full cache lines between 'start' and 'end'.
*/
while (start < (end & ~(CACHE_LINE_SIZE - 1))) {
vaddr = l2_map_va(start, vaddr);
xsc3_l2_inv_mva(vaddr);
start += CACHE_LINE_SIZE;
}
/*
* Clean and invalidate partial last cache line.
*/
if (start < end) {
vaddr = l2_map_va(start, vaddr);
xsc3_l2_clean_mva(vaddr);
xsc3_l2_inv_mva(vaddr);
}
l2_unmap_va(vaddr);
dsb();
}
static void xsc3_l2_clean_range(unsigned long start, unsigned long end)
{
unsigned long vaddr;
vaddr = -1; /* to force the first mapping */
start &= ~(CACHE_LINE_SIZE - 1);
while (start < end) {
vaddr = l2_map_va(start, vaddr);
xsc3_l2_clean_mva(vaddr);
start += CACHE_LINE_SIZE;
}
l2_unmap_va(vaddr);
dsb();
}
/*
* optimize L2 flush all operation by set/way format
*/
static inline void xsc3_l2_flush_all(void)
{
unsigned long l2ctype, set_way;
int set, way;
__asm__("mrc p15, 1, %0, c0, c0, 1" : "=r" (l2ctype));
for (set = 0; set < CACHE_SET_SIZE(l2ctype); set++) {
for (way = 0; way < CACHE_WAY_PER_SET; way++) {
set_way = (way << 29) | (set << 5);
__asm__("mcr p15, 1, %0, c7, c15, 2" : : "r"(set_way));
}
}
dsb();
}
static void xsc3_l2_flush_range(unsigned long start, unsigned long end)
{
unsigned long vaddr;
if (start == 0 && end == -1ul) {
xsc3_l2_flush_all();
return;
}
vaddr = -1; /* to force the first mapping */
start &= ~(CACHE_LINE_SIZE - 1);
while (start < end) {
vaddr = l2_map_va(start, vaddr);
xsc3_l2_clean_mva(vaddr);
xsc3_l2_inv_mva(vaddr);
start += CACHE_LINE_SIZE;
}
l2_unmap_va(vaddr);
dsb();
}
static int __init xsc3_l2_init(void)
{
if (!cpu_is_xsc3() || !xsc3_l2_present())
return 0;
if (get_cr() & CR_L2) {
pr_info("XScale3 L2 cache enabled.\n");
xsc3_l2_inv_all();
outer_cache.inv_range = xsc3_l2_inv_range;
outer_cache.clean_range = xsc3_l2_clean_range;
outer_cache.flush_range = xsc3_l2_flush_range;
}
return 0;
}
core_initcall(xsc3_l2_init);
| linux-master | arch/arm/mm/cache-xsc3l2.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* arch/arm/mm/hugetlbpage.c
*
* Copyright (C) 2012 ARM Ltd.
*
* Based on arch/x86/include/asm/hugetlb.h and Bill Carson's patches
*/
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/pagemap.h>
#include <linux/err.h>
#include <linux/sysctl.h>
#include <asm/mman.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
/*
* On ARM, huge pages are backed by pmd's rather than pte's, so we do a lot
* of type casting from pmd_t * to pte_t *.
*/
int pud_huge(pud_t pud)
{
return 0;
}
int pmd_huge(pmd_t pmd)
{
return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
}
| linux-master | arch/arm/mm/hugetlbpage.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* arch/arm/mm/cache-feroceon-l2.c - Feroceon L2 cache controller support
*
* Copyright (C) 2008 Marvell Semiconductor
*
* References:
* - Unified Layer 2 Cache for Feroceon CPU Cores,
* Document ID MV-S104858-00, Rev. A, October 23 2007.
*/
#include <linux/init.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/highmem.h>
#include <linux/io.h>
#include <asm/cacheflush.h>
#include <asm/cp15.h>
#include <asm/hardware/cache-feroceon-l2.h>
#define L2_WRITETHROUGH_KIRKWOOD BIT(4)
/*
* Low-level cache maintenance operations.
*
* As well as the regular 'clean/invalidate/flush L2 cache line by
* MVA' instructions, the Feroceon L2 cache controller also features
* 'clean/invalidate L2 range by MVA' operations.
*
* Cache range operations are initiated by writing the start and
* end addresses to successive cp15 registers, and process every
* cache line whose first byte address lies in the inclusive range
* [start:end].
*
* The cache range operations stall the CPU pipeline until completion.
*
* The range operations require two successive cp15 writes, in
* between which we don't want to be preempted.
*/
static inline unsigned long l2_get_va(unsigned long paddr)
{
#ifdef CONFIG_HIGHMEM
/*
* Because range ops can't be done on physical addresses,
* we simply install a virtual mapping for it only for the
* TLB lookup to occur, hence no need to flush the untouched
* memory mapping afterwards (note: a cache flush may happen
* in some circumstances depending on the path taken in kunmap_atomic).
*/
void *vaddr = kmap_atomic_pfn(paddr >> PAGE_SHIFT);
return (unsigned long)vaddr + (paddr & ~PAGE_MASK);
#else
return __phys_to_virt(paddr);
#endif
}
static inline void l2_put_va(unsigned long vaddr)
{
#ifdef CONFIG_HIGHMEM
kunmap_atomic((void *)vaddr);
#endif
}
static inline void l2_clean_pa(unsigned long addr)
{
__asm__("mcr p15, 1, %0, c15, c9, 3" : : "r" (addr));
}
static inline void l2_clean_pa_range(unsigned long start, unsigned long end)
{
unsigned long va_start, va_end, flags;
/*
* Make sure 'start' and 'end' reference the same page, as
* L2 is PIPT and range operations only do a TLB lookup on
* the start address.
*/
BUG_ON((start ^ end) >> PAGE_SHIFT);
va_start = l2_get_va(start);
va_end = va_start + (end - start);
raw_local_irq_save(flags);
__asm__("mcr p15, 1, %0, c15, c9, 4\n\t"
"mcr p15, 1, %1, c15, c9, 5"
: : "r" (va_start), "r" (va_end));
raw_local_irq_restore(flags);
l2_put_va(va_start);
}
static inline void l2_clean_inv_pa(unsigned long addr)
{
__asm__("mcr p15, 1, %0, c15, c10, 3" : : "r" (addr));
}
static inline void l2_inv_pa(unsigned long addr)
{
__asm__("mcr p15, 1, %0, c15, c11, 3" : : "r" (addr));
}
static inline void l2_inv_pa_range(unsigned long start, unsigned long end)
{
unsigned long va_start, va_end, flags;
/*
* Make sure 'start' and 'end' reference the same page, as
* L2 is PIPT and range operations only do a TLB lookup on
* the start address.
*/
BUG_ON((start ^ end) >> PAGE_SHIFT);
va_start = l2_get_va(start);
va_end = va_start + (end - start);
raw_local_irq_save(flags);
__asm__("mcr p15, 1, %0, c15, c11, 4\n\t"
"mcr p15, 1, %1, c15, c11, 5"
: : "r" (va_start), "r" (va_end));
raw_local_irq_restore(flags);
l2_put_va(va_start);
}
static inline void l2_inv_all(void)
{
__asm__("mcr p15, 1, %0, c15, c11, 0" : : "r" (0));
}
/*
* Linux primitives.
*
* Note that the end addresses passed to Linux primitives are
* noninclusive, while the hardware cache range operations use
* inclusive start and end addresses.
*/
#define CACHE_LINE_SIZE 32
#define MAX_RANGE_SIZE 1024
static int l2_wt_override;
static unsigned long calc_range_end(unsigned long start, unsigned long end)
{
unsigned long range_end;
BUG_ON(start & (CACHE_LINE_SIZE - 1));
BUG_ON(end & (CACHE_LINE_SIZE - 1));
/*
* Try to process all cache lines between 'start' and 'end'.
*/
range_end = end;
/*
* Limit the number of cache lines processed at once,
* since cache range operations stall the CPU pipeline
* until completion.
*/
if (range_end > start + MAX_RANGE_SIZE)
range_end = start + MAX_RANGE_SIZE;
/*
* Cache range operations can't straddle a page boundary.
*/
if (range_end > (start | (PAGE_SIZE - 1)) + 1)
range_end = (start | (PAGE_SIZE - 1)) + 1;
return range_end;
}
static void feroceon_l2_inv_range(unsigned long start, unsigned long end)
{
/*
* Clean and invalidate partial first cache line.
*/
if (start & (CACHE_LINE_SIZE - 1)) {
l2_clean_inv_pa(start & ~(CACHE_LINE_SIZE - 1));
start = (start | (CACHE_LINE_SIZE - 1)) + 1;
}
/*
* Clean and invalidate partial last cache line.
*/
if (start < end && end & (CACHE_LINE_SIZE - 1)) {
l2_clean_inv_pa(end & ~(CACHE_LINE_SIZE - 1));
end &= ~(CACHE_LINE_SIZE - 1);
}
/*
* Invalidate all full cache lines between 'start' and 'end'.
*/
while (start < end) {
unsigned long range_end = calc_range_end(start, end);
l2_inv_pa_range(start, range_end - CACHE_LINE_SIZE);
start = range_end;
}
dsb();
}
static void feroceon_l2_clean_range(unsigned long start, unsigned long end)
{
/*
* If L2 is forced to WT, the L2 will always be clean and we
* don't need to do anything here.
*/
if (!l2_wt_override) {
start &= ~(CACHE_LINE_SIZE - 1);
end = (end + CACHE_LINE_SIZE - 1) & ~(CACHE_LINE_SIZE - 1);
while (start != end) {
unsigned long range_end = calc_range_end(start, end);
l2_clean_pa_range(start, range_end - CACHE_LINE_SIZE);
start = range_end;
}
}
dsb();
}
static void feroceon_l2_flush_range(unsigned long start, unsigned long end)
{
start &= ~(CACHE_LINE_SIZE - 1);
end = (end + CACHE_LINE_SIZE - 1) & ~(CACHE_LINE_SIZE - 1);
while (start != end) {
unsigned long range_end = calc_range_end(start, end);
if (!l2_wt_override)
l2_clean_pa_range(start, range_end - CACHE_LINE_SIZE);
l2_inv_pa_range(start, range_end - CACHE_LINE_SIZE);
start = range_end;
}
dsb();
}
/*
* Routines to disable and re-enable the D-cache and I-cache at run
* time. These are necessary because the L2 cache can only be enabled
* or disabled while the L1 Dcache and Icache are both disabled.
*/
static int __init flush_and_disable_dcache(void)
{
u32 cr;
cr = get_cr();
if (cr & CR_C) {
unsigned long flags;
raw_local_irq_save(flags);
flush_cache_all();
set_cr(cr & ~CR_C);
raw_local_irq_restore(flags);
return 1;
}
return 0;
}
static void __init enable_dcache(void)
{
u32 cr;
cr = get_cr();
set_cr(cr | CR_C);
}
static void __init __invalidate_icache(void)
{
__asm__("mcr p15, 0, %0, c7, c5, 0" : : "r" (0));
}
static int __init invalidate_and_disable_icache(void)
{
u32 cr;
cr = get_cr();
if (cr & CR_I) {
set_cr(cr & ~CR_I);
__invalidate_icache();
return 1;
}
return 0;
}
static void __init enable_icache(void)
{
u32 cr;
cr = get_cr();
set_cr(cr | CR_I);
}
static inline u32 read_extra_features(void)
{
u32 u;
__asm__("mrc p15, 1, %0, c15, c1, 0" : "=r" (u));
return u;
}
static inline void write_extra_features(u32 u)
{
__asm__("mcr p15, 1, %0, c15, c1, 0" : : "r" (u));
}
static void __init disable_l2_prefetch(void)
{
u32 u;
/*
* Read the CPU Extra Features register and verify that the
* Disable L2 Prefetch bit is set.
*/
u = read_extra_features();
if (!(u & 0x01000000)) {
pr_info("Feroceon L2: Disabling L2 prefetch.\n");
write_extra_features(u | 0x01000000);
}
}
static void __init enable_l2(void)
{
u32 u;
u = read_extra_features();
if (!(u & 0x00400000)) {
int i, d;
pr_info("Feroceon L2: Enabling L2\n");
d = flush_and_disable_dcache();
i = invalidate_and_disable_icache();
l2_inv_all();
write_extra_features(u | 0x00400000);
if (i)
enable_icache();
if (d)
enable_dcache();
} else
pr_err(FW_BUG
"Feroceon L2: bootloader left the L2 cache on!\n");
}
void __init feroceon_l2_init(int __l2_wt_override)
{
l2_wt_override = __l2_wt_override;
disable_l2_prefetch();
outer_cache.inv_range = feroceon_l2_inv_range;
outer_cache.clean_range = feroceon_l2_clean_range;
outer_cache.flush_range = feroceon_l2_flush_range;
enable_l2();
pr_info("Feroceon L2: Cache support initialised%s.\n",
l2_wt_override ? ", in WT override mode" : "");
}
#ifdef CONFIG_OF
static const struct of_device_id feroceon_ids[] __initconst = {
{ .compatible = "marvell,kirkwood-cache"},
{ .compatible = "marvell,feroceon-cache"},
{}
};
int __init feroceon_of_init(void)
{
struct device_node *node;
void __iomem *base;
bool l2_wt_override = false;
#if defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH)
l2_wt_override = true;
#endif
node = of_find_matching_node(NULL, feroceon_ids);
if (node && of_device_is_compatible(node, "marvell,kirkwood-cache")) {
base = of_iomap(node, 0);
if (!base)
return -ENOMEM;
if (l2_wt_override)
writel(readl(base) | L2_WRITETHROUGH_KIRKWOOD, base);
else
writel(readl(base) & ~L2_WRITETHROUGH_KIRKWOOD, base);
}
feroceon_l2_init(l2_wt_override);
return 0;
}
#endif
| linux-master | arch/arm/mm/cache-feroceon-l2.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/arch/arm/mm/copypage-xsc3.S
*
* Copyright (C) 2004 Intel Corp.
*
* Adapted for 3rd gen XScale core, no more mini-dcache
* Author: Matt Gilbert ([email protected])
*/
#include <linux/init.h>
#include <linux/highmem.h>
/*
* General note:
* We don't really want write-allocate cache behaviour for these functions
* since that will just eat through 8K of the cache.
*/
/*
* XSC3 optimised copy_user_highpage
*
* The source page may have some clean entries in the cache already, but we
* can safely ignore them - break_cow() will flush them out of the cache
* if we eventually end up using our copied page.
*
*/
static void xsc3_mc_copy_user_page(void *kto, const void *kfrom)
{
int tmp;
asm volatile ("\
.arch xscale \n\
pld [%1, #0] \n\
pld [%1, #32] \n\
1: pld [%1, #64] \n\
pld [%1, #96] \n\
\n\
2: ldrd r2, r3, [%1], #8 \n\
ldrd r4, r5, [%1], #8 \n\
mcr p15, 0, %0, c7, c6, 1 @ invalidate\n\
strd r2, r3, [%0], #8 \n\
ldrd r2, r3, [%1], #8 \n\
strd r4, r5, [%0], #8 \n\
ldrd r4, r5, [%1], #8 \n\
strd r2, r3, [%0], #8 \n\
strd r4, r5, [%0], #8 \n\
ldrd r2, r3, [%1], #8 \n\
ldrd r4, r5, [%1], #8 \n\
mcr p15, 0, %0, c7, c6, 1 @ invalidate\n\
strd r2, r3, [%0], #8 \n\
ldrd r2, r3, [%1], #8 \n\
subs %2, %2, #1 \n\
strd r4, r5, [%0], #8 \n\
ldrd r4, r5, [%1], #8 \n\
strd r2, r3, [%0], #8 \n\
strd r4, r5, [%0], #8 \n\
bgt 1b \n\
beq 2b "
: "+&r" (kto), "+&r" (kfrom), "=&r" (tmp)
: "2" (PAGE_SIZE / 64 - 1)
: "r2", "r3", "r4", "r5");
}
void xsc3_mc_copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr, struct vm_area_struct *vma)
{
void *kto, *kfrom;
kto = kmap_atomic(to);
kfrom = kmap_atomic(from);
flush_cache_page(vma, vaddr, page_to_pfn(from));
xsc3_mc_copy_user_page(kto, kfrom);
kunmap_atomic(kfrom);
kunmap_atomic(kto);
}
/*
* XScale optimised clear_user_page
*/
void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
{
void *ptr, *kaddr = kmap_atomic(page);
asm volatile ("\
.arch xscale \n\
mov r1, %2 \n\
mov r2, #0 \n\
mov r3, #0 \n\
1: mcr p15, 0, %0, c7, c6, 1 @ invalidate line\n\
strd r2, r3, [%0], #8 \n\
strd r2, r3, [%0], #8 \n\
strd r2, r3, [%0], #8 \n\
strd r2, r3, [%0], #8 \n\
subs r1, r1, #1 \n\
bne 1b"
: "=r" (ptr)
: "0" (kaddr), "I" (PAGE_SIZE / 32)
: "r1", "r2", "r3");
kunmap_atomic(kaddr);
}
struct cpu_user_fns xsc3_mc_user_fns __initdata = {
.cpu_clear_user_highpage = xsc3_mc_clear_user_highpage,
.cpu_copy_user_highpage = xsc3_mc_copy_user_highpage,
};
| linux-master | arch/arm/mm/copypage-xsc3.c |
/*
* Based on linux/arch/arm/mm/nommu.c
*
* ARM PMSAv7 supporting functions.
*/
#include <linux/bitops.h>
#include <linux/memblock.h>
#include <linux/string.h>
#include <asm/cacheflush.h>
#include <asm/cp15.h>
#include <asm/cputype.h>
#include <asm/mpu.h>
#include <asm/sections.h>
#include "mm.h"
struct region {
phys_addr_t base;
phys_addr_t size;
unsigned long subreg;
};
static struct region __initdata mem[MPU_MAX_REGIONS];
#ifdef CONFIG_XIP_KERNEL
static struct region __initdata xip[MPU_MAX_REGIONS];
#endif
static unsigned int __initdata mpu_min_region_order;
static unsigned int __initdata mpu_max_regions;
static int __init __mpu_min_region_order(void);
static int __init __mpu_max_regions(void);
#ifndef CONFIG_CPU_V7M
#define DRBAR __ACCESS_CP15(c6, 0, c1, 0)
#define IRBAR __ACCESS_CP15(c6, 0, c1, 1)
#define DRSR __ACCESS_CP15(c6, 0, c1, 2)
#define IRSR __ACCESS_CP15(c6, 0, c1, 3)
#define DRACR __ACCESS_CP15(c6, 0, c1, 4)
#define IRACR __ACCESS_CP15(c6, 0, c1, 5)
#define RNGNR __ACCESS_CP15(c6, 0, c2, 0)
/* Region number */
static inline void rgnr_write(u32 v)
{
write_sysreg(v, RNGNR);
}
/* Data-side / unified region attributes */
/* Region access control register */
static inline void dracr_write(u32 v)
{
write_sysreg(v, DRACR);
}
/* Region size register */
static inline void drsr_write(u32 v)
{
write_sysreg(v, DRSR);
}
/* Region base address register */
static inline void drbar_write(u32 v)
{
write_sysreg(v, DRBAR);
}
static inline u32 drbar_read(void)
{
return read_sysreg(DRBAR);
}
/* Optional instruction-side region attributes */
/* I-side Region access control register */
static inline void iracr_write(u32 v)
{
write_sysreg(v, IRACR);
}
/* I-side Region size register */
static inline void irsr_write(u32 v)
{
write_sysreg(v, IRSR);
}
/* I-side Region base address register */
static inline void irbar_write(u32 v)
{
write_sysreg(v, IRBAR);
}
static inline u32 irbar_read(void)
{
return read_sysreg(IRBAR);
}
#else
static inline void rgnr_write(u32 v)
{
writel_relaxed(v, BASEADDR_V7M_SCB + PMSAv7_RNR);
}
/* Data-side / unified region attributes */
/* Region access control register */
static inline void dracr_write(u32 v)
{
u32 rsr = readl_relaxed(BASEADDR_V7M_SCB + PMSAv7_RASR) & GENMASK(15, 0);
writel_relaxed((v << 16) | rsr, BASEADDR_V7M_SCB + PMSAv7_RASR);
}
/* Region size register */
static inline void drsr_write(u32 v)
{
u32 racr = readl_relaxed(BASEADDR_V7M_SCB + PMSAv7_RASR) & GENMASK(31, 16);
writel_relaxed(v | racr, BASEADDR_V7M_SCB + PMSAv7_RASR);
}
/* Region base address register */
static inline void drbar_write(u32 v)
{
writel_relaxed(v, BASEADDR_V7M_SCB + PMSAv7_RBAR);
}
static inline u32 drbar_read(void)
{
return readl_relaxed(BASEADDR_V7M_SCB + PMSAv7_RBAR);
}
/* ARMv7-M only supports a unified MPU, so I-side operations are nop */
static inline void iracr_write(u32 v) {}
static inline void irsr_write(u32 v) {}
static inline void irbar_write(u32 v) {}
static inline unsigned long irbar_read(void) {return 0;}
#endif
static bool __init try_split_region(phys_addr_t base, phys_addr_t size, struct region *region)
{
unsigned long subreg, bslots, sslots;
phys_addr_t abase = base & ~(size - 1);
phys_addr_t asize = base + size - abase;
phys_addr_t p2size = 1 << __fls(asize);
phys_addr_t bdiff, sdiff;
if (p2size != asize)
p2size *= 2;
bdiff = base - abase;
sdiff = p2size - asize;
subreg = p2size / PMSAv7_NR_SUBREGS;
if ((bdiff % subreg) || (sdiff % subreg))
return false;
bslots = bdiff / subreg;
sslots = sdiff / subreg;
if (bslots || sslots) {
int i;
if (subreg < PMSAv7_MIN_SUBREG_SIZE)
return false;
if (bslots + sslots > PMSAv7_NR_SUBREGS)
return false;
for (i = 0; i < bslots; i++)
_set_bit(i, ®ion->subreg);
for (i = 1; i <= sslots; i++)
_set_bit(PMSAv7_NR_SUBREGS - i, ®ion->subreg);
}
region->base = abase;
region->size = p2size;
return true;
}
static int __init allocate_region(phys_addr_t base, phys_addr_t size,
unsigned int limit, struct region *regions)
{
int count = 0;
phys_addr_t diff = size;
int attempts = MPU_MAX_REGIONS;
while (diff) {
/* Try cover region as is (maybe with help of subregions) */
if (try_split_region(base, size, ®ions[count])) {
count++;
base += size;
diff -= size;
size = diff;
} else {
/*
* Maximum aligned region might overflow phys_addr_t
* if "base" is 0. Hence we keep everything below 4G
* until we take the smaller of the aligned region
* size ("asize") and rounded region size ("p2size"),
* one of which is guaranteed to be smaller than the
* maximum physical address.
*/
phys_addr_t asize = (base - 1) ^ base;
phys_addr_t p2size = (1 << __fls(diff)) - 1;
size = asize < p2size ? asize + 1 : p2size + 1;
}
if (count > limit)
break;
if (!attempts)
break;
attempts--;
}
return count;
}
/* MPU initialisation functions */
void __init pmsav7_adjust_lowmem_bounds(void)
{
phys_addr_t specified_mem_size = 0, total_mem_size = 0;
phys_addr_t mem_start;
phys_addr_t mem_end;
phys_addr_t reg_start, reg_end;
unsigned int mem_max_regions;
bool first = true;
int num;
u64 i;
/* Free-up PMSAv7_PROBE_REGION */
mpu_min_region_order = __mpu_min_region_order();
/* How many regions are supported */
mpu_max_regions = __mpu_max_regions();
mem_max_regions = min((unsigned int)MPU_MAX_REGIONS, mpu_max_regions);
/* We need to keep one slot for background region */
mem_max_regions--;
#ifndef CONFIG_CPU_V7M
/* ... and one for vectors */
mem_max_regions--;
#endif
#ifdef CONFIG_XIP_KERNEL
/* plus some regions to cover XIP ROM */
num = allocate_region(CONFIG_XIP_PHYS_ADDR, __pa(_exiprom) - CONFIG_XIP_PHYS_ADDR,
mem_max_regions, xip);
mem_max_regions -= num;
#endif
for_each_mem_range(i, ®_start, ®_end) {
if (first) {
phys_addr_t phys_offset = PHYS_OFFSET;
/*
* Initially only use memory continuous from
* PHYS_OFFSET */
if (reg_start != phys_offset)
panic("First memory bank must be contiguous from PHYS_OFFSET");
mem_start = reg_start;
mem_end = reg_end;
specified_mem_size = mem_end - mem_start;
first = false;
} else {
/*
* memblock auto merges contiguous blocks, remove
* all blocks afterwards in one go (we can't remove
* blocks separately while iterating)
*/
pr_notice("Ignoring RAM after %pa, memory at %pa ignored\n",
&mem_end, ®_start);
memblock_remove(reg_start, 0 - reg_start);
break;
}
}
memset(mem, 0, sizeof(mem));
num = allocate_region(mem_start, specified_mem_size, mem_max_regions, mem);
for (i = 0; i < num; i++) {
unsigned long subreg = mem[i].size / PMSAv7_NR_SUBREGS;
total_mem_size += mem[i].size - subreg * hweight_long(mem[i].subreg);
pr_debug("MPU: base %pa size %pa disable subregions: %*pbl\n",
&mem[i].base, &mem[i].size, PMSAv7_NR_SUBREGS, &mem[i].subreg);
}
if (total_mem_size != specified_mem_size) {
pr_warn("Truncating memory from %pa to %pa (MPU region constraints)",
&specified_mem_size, &total_mem_size);
memblock_remove(mem_start + total_mem_size,
specified_mem_size - total_mem_size);
}
}
static int __init __mpu_max_regions(void)
{
/*
* We don't support a different number of I/D side regions so if we
* have separate instruction and data memory maps then return
* whichever side has a smaller number of supported regions.
*/
u32 dregions, iregions, mpuir;
mpuir = read_cpuid_mputype();
dregions = iregions = (mpuir & MPUIR_DREGION_SZMASK) >> MPUIR_DREGION;
/* Check for separate d-side and i-side memory maps */
if (mpuir & MPUIR_nU)
iregions = (mpuir & MPUIR_IREGION_SZMASK) >> MPUIR_IREGION;
/* Use the smallest of the two maxima */
return min(dregions, iregions);
}
static int __init mpu_iside_independent(void)
{
/* MPUIR.nU specifies whether there is *not* a unified memory map */
return read_cpuid_mputype() & MPUIR_nU;
}
static int __init __mpu_min_region_order(void)
{
u32 drbar_result, irbar_result;
/* We've kept a region free for this probing */
rgnr_write(PMSAv7_PROBE_REGION);
isb();
/*
* As per ARM ARM, write 0xFFFFFFFC to DRBAR to find the minimum
* region order
*/
drbar_write(0xFFFFFFFC);
drbar_result = irbar_result = drbar_read();
drbar_write(0x0);
/* If the MPU is non-unified, we use the larger of the two minima*/
if (mpu_iside_independent()) {
irbar_write(0xFFFFFFFC);
irbar_result = irbar_read();
irbar_write(0x0);
}
isb(); /* Ensure that MPU region operations have completed */
/* Return whichever result is larger */
return __ffs(max(drbar_result, irbar_result));
}
static int __init mpu_setup_region(unsigned int number, phys_addr_t start,
unsigned int size_order, unsigned int properties,
unsigned int subregions, bool need_flush)
{
u32 size_data;
/* We kept a region free for probing resolution of MPU regions*/
if (number > mpu_max_regions
|| number >= MPU_MAX_REGIONS)
return -ENOENT;
if (size_order > 32)
return -ENOMEM;
if (size_order < mpu_min_region_order)
return -ENOMEM;
/* Writing N to bits 5:1 (RSR_SZ) specifies region size 2^N+1 */
size_data = ((size_order - 1) << PMSAv7_RSR_SZ) | 1 << PMSAv7_RSR_EN;
size_data |= subregions << PMSAv7_RSR_SD;
if (need_flush)
flush_cache_all();
dsb(); /* Ensure all previous data accesses occur with old mappings */
rgnr_write(number);
isb();
drbar_write(start);
dracr_write(properties);
isb(); /* Propagate properties before enabling region */
drsr_write(size_data);
/* Check for independent I-side registers */
if (mpu_iside_independent()) {
irbar_write(start);
iracr_write(properties);
isb();
irsr_write(size_data);
}
isb();
/* Store region info (we treat i/d side the same, so only store d) */
mpu_rgn_info.rgns[number].dracr = properties;
mpu_rgn_info.rgns[number].drbar = start;
mpu_rgn_info.rgns[number].drsr = size_data;
mpu_rgn_info.used++;
return 0;
}
/*
* Set up default MPU regions, doing nothing if there is no MPU
*/
void __init pmsav7_setup(void)
{
int i, region = 0, err = 0;
/* Setup MPU (order is important) */
/* Background */
err |= mpu_setup_region(region++, 0, 32,
PMSAv7_ACR_XN | PMSAv7_RGN_STRONGLY_ORDERED | PMSAv7_AP_PL1RW_PL0RW,
0, false);
#ifdef CONFIG_XIP_KERNEL
/* ROM */
for (i = 0; i < ARRAY_SIZE(xip); i++) {
/*
* In case we overwrite RAM region we set earlier in
* head-nommu.S (which is cachable) all subsequent
* data access till we setup RAM bellow would be done
* with BG region (which is uncachable), thus we need
* to clean and invalidate cache.
*/
bool need_flush = region == PMSAv7_RAM_REGION;
if (!xip[i].size)
continue;
err |= mpu_setup_region(region++, xip[i].base, ilog2(xip[i].size),
PMSAv7_AP_PL1RO_PL0NA | PMSAv7_RGN_NORMAL,
xip[i].subreg, need_flush);
}
#endif
/* RAM */
for (i = 0; i < ARRAY_SIZE(mem); i++) {
if (!mem[i].size)
continue;
err |= mpu_setup_region(region++, mem[i].base, ilog2(mem[i].size),
PMSAv7_AP_PL1RW_PL0RW | PMSAv7_RGN_NORMAL,
mem[i].subreg, false);
}
/* Vectors */
#ifndef CONFIG_CPU_V7M
err |= mpu_setup_region(region++, vectors_base, ilog2(2 * PAGE_SIZE),
PMSAv7_AP_PL1RW_PL0NA | PMSAv7_RGN_NORMAL,
0, false);
#endif
if (err) {
panic("MPU region initialization failure! %d", err);
} else {
pr_info("Using ARMv7 PMSA Compliant MPU. "
"Region independence: %s, Used %d of %d regions\n",
mpu_iside_independent() ? "Yes" : "No",
mpu_rgn_info.used, mpu_max_regions);
}
}
| linux-master | arch/arm/mm/pmsa-v7.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* This file contains kasan initialization code for ARM.
*
* Copyright (c) 2018 Samsung Electronics Co., Ltd.
* Author: Andrey Ryabinin <[email protected]>
* Author: Linus Walleij <[email protected]>
*/
#define pr_fmt(fmt) "kasan: " fmt
#include <linux/kasan.h>
#include <linux/kernel.h>
#include <linux/memblock.h>
#include <linux/sched/task.h>
#include <linux/start_kernel.h>
#include <linux/pgtable.h>
#include <asm/cputype.h>
#include <asm/highmem.h>
#include <asm/mach/map.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/procinfo.h>
#include <asm/proc-fns.h>
#include "mm.h"
static pgd_t tmp_pgd_table[PTRS_PER_PGD] __initdata __aligned(PGD_SIZE);
pmd_t tmp_pmd_table[PTRS_PER_PMD] __page_aligned_bss;
static __init void *kasan_alloc_block(size_t size)
{
return memblock_alloc_try_nid(size, size, __pa(MAX_DMA_ADDRESS),
MEMBLOCK_ALLOC_NOLEAKTRACE, NUMA_NO_NODE);
}
static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr,
unsigned long end, bool early)
{
unsigned long next;
pte_t *ptep = pte_offset_kernel(pmdp, addr);
do {
pte_t entry;
void *p;
next = addr + PAGE_SIZE;
if (!early) {
if (!pte_none(READ_ONCE(*ptep)))
continue;
p = kasan_alloc_block(PAGE_SIZE);
if (!p) {
panic("%s failed to allocate shadow page for address 0x%lx\n",
__func__, addr);
return;
}
memset(p, KASAN_SHADOW_INIT, PAGE_SIZE);
entry = pfn_pte(virt_to_pfn(p),
__pgprot(pgprot_val(PAGE_KERNEL)));
} else if (pte_none(READ_ONCE(*ptep))) {
/*
* The early shadow memory is mapping all KASan
* operations to one and the same page in memory,
* "kasan_early_shadow_page" so that the instrumentation
* will work on a scratch area until we can set up the
* proper KASan shadow memory.
*/
entry = pfn_pte(virt_to_pfn(kasan_early_shadow_page),
__pgprot(_L_PTE_DEFAULT | L_PTE_DIRTY | L_PTE_XN));
} else {
/*
* Early shadow mappings are PMD_SIZE aligned, so if the
* first entry is already set, they must all be set.
*/
return;
}
set_pte_at(&init_mm, addr, ptep, entry);
} while (ptep++, addr = next, addr != end);
}
/*
* The pmd (page middle directory) is only used on LPAE
*/
static void __init kasan_pmd_populate(pud_t *pudp, unsigned long addr,
unsigned long end, bool early)
{
unsigned long next;
pmd_t *pmdp = pmd_offset(pudp, addr);
do {
if (pmd_none(*pmdp)) {
/*
* We attempt to allocate a shadow block for the PMDs
* used by the PTEs for this address if it isn't already
* allocated.
*/
void *p = early ? kasan_early_shadow_pte :
kasan_alloc_block(PAGE_SIZE);
if (!p) {
panic("%s failed to allocate shadow block for address 0x%lx\n",
__func__, addr);
return;
}
pmd_populate_kernel(&init_mm, pmdp, p);
flush_pmd_entry(pmdp);
}
next = pmd_addr_end(addr, end);
kasan_pte_populate(pmdp, addr, next, early);
} while (pmdp++, addr = next, addr != end);
}
static void __init kasan_pgd_populate(unsigned long addr, unsigned long end,
bool early)
{
unsigned long next;
pgd_t *pgdp;
p4d_t *p4dp;
pud_t *pudp;
pgdp = pgd_offset_k(addr);
do {
/*
* Allocate and populate the shadow block of p4d folded into
* pud folded into pmd if it doesn't already exist
*/
if (!early && pgd_none(*pgdp)) {
void *p = kasan_alloc_block(PAGE_SIZE);
if (!p) {
panic("%s failed to allocate shadow block for address 0x%lx\n",
__func__, addr);
return;
}
pgd_populate(&init_mm, pgdp, p);
}
next = pgd_addr_end(addr, end);
/*
* We just immediately jump over the p4d and pud page
* directories since we believe ARM32 will never gain four
* nor five level page tables.
*/
p4dp = p4d_offset(pgdp, addr);
pudp = pud_offset(p4dp, addr);
kasan_pmd_populate(pudp, addr, next, early);
} while (pgdp++, addr = next, addr != end);
}
extern struct proc_info_list *lookup_processor_type(unsigned int);
void __init kasan_early_init(void)
{
struct proc_info_list *list;
/*
* locate processor in the list of supported processor
* types. The linker builds this table for us from the
* entries in arch/arm/mm/proc-*.S
*/
list = lookup_processor_type(read_cpuid_id());
if (list) {
#ifdef MULTI_CPU
processor = *list->proc;
#endif
}
BUILD_BUG_ON((KASAN_SHADOW_END - (1UL << 29)) != KASAN_SHADOW_OFFSET);
/*
* We walk the page table and set all of the shadow memory to point
* to the scratch page.
*/
kasan_pgd_populate(KASAN_SHADOW_START, KASAN_SHADOW_END, true);
}
static void __init clear_pgds(unsigned long start,
unsigned long end)
{
for (; start && start < end; start += PMD_SIZE)
pmd_clear(pmd_off_k(start));
}
static int __init create_mapping(void *start, void *end)
{
void *shadow_start, *shadow_end;
shadow_start = kasan_mem_to_shadow(start);
shadow_end = kasan_mem_to_shadow(end);
pr_info("Mapping kernel virtual memory block: %px-%px at shadow: %px-%px\n",
start, end, shadow_start, shadow_end);
kasan_pgd_populate((unsigned long)shadow_start & PAGE_MASK,
PAGE_ALIGN((unsigned long)shadow_end), false);
return 0;
}
void __init kasan_init(void)
{
phys_addr_t pa_start, pa_end;
u64 i;
/*
* We are going to perform proper setup of shadow memory.
*
* At first we should unmap early shadow (clear_pgds() call bellow).
* However, instrumented code can't execute without shadow memory.
*
* To keep the early shadow memory MMU tables around while setting up
* the proper shadow memory, we copy swapper_pg_dir (the initial page
* table) to tmp_pgd_table and use that to keep the early shadow memory
* mapped until the full shadow setup is finished. Then we swap back
* to the proper swapper_pg_dir.
*/
memcpy(tmp_pgd_table, swapper_pg_dir, sizeof(tmp_pgd_table));
#ifdef CONFIG_ARM_LPAE
/* We need to be in the same PGD or this won't work */
BUILD_BUG_ON(pgd_index(KASAN_SHADOW_START) !=
pgd_index(KASAN_SHADOW_END));
memcpy(tmp_pmd_table,
(void*)pgd_page_vaddr(*pgd_offset_k(KASAN_SHADOW_START)),
sizeof(tmp_pmd_table));
set_pgd(&tmp_pgd_table[pgd_index(KASAN_SHADOW_START)],
__pgd(__pa(tmp_pmd_table) | PMD_TYPE_TABLE | L_PGD_SWAPPER));
#endif
cpu_switch_mm(tmp_pgd_table, &init_mm);
local_flush_tlb_all();
clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
if (!IS_ENABLED(CONFIG_KASAN_VMALLOC))
kasan_populate_early_shadow(kasan_mem_to_shadow((void *)VMALLOC_START),
kasan_mem_to_shadow((void *)VMALLOC_END));
kasan_populate_early_shadow(kasan_mem_to_shadow((void *)VMALLOC_END),
kasan_mem_to_shadow((void *)-1UL) + 1);
for_each_mem_range(i, &pa_start, &pa_end) {
void *start = __va(pa_start);
void *end = __va(pa_end);
/* Do not attempt to shadow highmem */
if (pa_start >= arm_lowmem_limit) {
pr_info("Skip highmem block at %pa-%pa\n", &pa_start, &pa_end);
continue;
}
if (pa_end > arm_lowmem_limit) {
pr_info("Truncating shadow for memory block at %pa-%pa to lowmem region at %pa\n",
&pa_start, &pa_end, &arm_lowmem_limit);
end = __va(arm_lowmem_limit);
}
if (start >= end) {
pr_info("Skipping invalid memory block %pa-%pa (virtual %p-%p)\n",
&pa_start, &pa_end, start, end);
continue;
}
create_mapping(start, end);
}
/*
* 1. The module global variables are in MODULES_VADDR ~ MODULES_END,
* so we need to map this area if CONFIG_KASAN_VMALLOC=n. With
* VMALLOC support KASAN will manage this region dynamically,
* refer to kasan_populate_vmalloc() and ARM's implementation of
* module_alloc().
* 2. PKMAP_BASE ~ PKMAP_BASE+PMD_SIZE's shadow and MODULES_VADDR
* ~ MODULES_END's shadow is in the same PMD_SIZE, so we can't
* use kasan_populate_zero_shadow.
*/
if (!IS_ENABLED(CONFIG_KASAN_VMALLOC) && IS_ENABLED(CONFIG_MODULES))
create_mapping((void *)MODULES_VADDR, (void *)(MODULES_END));
create_mapping((void *)PKMAP_BASE, (void *)(PKMAP_BASE + PMD_SIZE));
/*
* KAsan may reuse the contents of kasan_early_shadow_pte directly, so
* we should make sure that it maps the zero page read-only.
*/
for (i = 0; i < PTRS_PER_PTE; i++)
set_pte_at(&init_mm, KASAN_SHADOW_START + i*PAGE_SIZE,
&kasan_early_shadow_pte[i],
pfn_pte(virt_to_pfn(kasan_early_shadow_page),
__pgprot(pgprot_val(PAGE_KERNEL)
| L_PTE_RDONLY)));
cpu_switch_mm(swapper_pg_dir, &init_mm);
local_flush_tlb_all();
memset(kasan_early_shadow_page, 0, PAGE_SIZE);
pr_info("Kernel address sanitizer initialized\n");
init_task.kasan_depth = 0;
}
| linux-master | arch/arm/mm/kasan_init.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/arch/arm/mm/flush.c
*
* Copyright (C) 1995-2002 Russell King
*/
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/highmem.h>
#include <asm/cacheflush.h>
#include <asm/cachetype.h>
#include <asm/highmem.h>
#include <asm/smp_plat.h>
#include <asm/tlbflush.h>
#include <linux/hugetlb.h>
#include "mm.h"
#ifdef CONFIG_ARM_HEAVY_MB
void (*soc_mb)(void);
void arm_heavy_mb(void)
{
#ifdef CONFIG_OUTER_CACHE_SYNC
if (outer_cache.sync)
outer_cache.sync();
#endif
if (soc_mb)
soc_mb();
}
EXPORT_SYMBOL(arm_heavy_mb);
#endif
#ifdef CONFIG_CPU_CACHE_VIPT
static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
{
unsigned long to = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
const int zero = 0;
set_top_pte(to, pfn_pte(pfn, PAGE_KERNEL));
asm( "mcrr p15, 0, %1, %0, c14\n"
" mcr p15, 0, %2, c7, c10, 4"
:
: "r" (to), "r" (to + PAGE_SIZE - 1), "r" (zero)
: "cc");
}
static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned long len)
{
unsigned long va = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
unsigned long offset = vaddr & (PAGE_SIZE - 1);
unsigned long to;
set_top_pte(va, pfn_pte(pfn, PAGE_KERNEL));
to = va + offset;
flush_icache_range(to, to + len);
}
void flush_cache_mm(struct mm_struct *mm)
{
if (cache_is_vivt()) {
vivt_flush_cache_mm(mm);
return;
}
if (cache_is_vipt_aliasing()) {
asm( "mcr p15, 0, %0, c7, c14, 0\n"
" mcr p15, 0, %0, c7, c10, 4"
:
: "r" (0)
: "cc");
}
}
void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{
if (cache_is_vivt()) {
vivt_flush_cache_range(vma, start, end);
return;
}
if (cache_is_vipt_aliasing()) {
asm( "mcr p15, 0, %0, c7, c14, 0\n"
" mcr p15, 0, %0, c7, c10, 4"
:
: "r" (0)
: "cc");
}
if (vma->vm_flags & VM_EXEC)
__flush_icache_all();
}
void flush_cache_pages(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn, unsigned int nr)
{
if (cache_is_vivt()) {
vivt_flush_cache_pages(vma, user_addr, pfn, nr);
return;
}
if (cache_is_vipt_aliasing()) {
flush_pfn_alias(pfn, user_addr);
__flush_icache_all();
}
if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged())
__flush_icache_all();
}
#else
#define flush_pfn_alias(pfn,vaddr) do { } while (0)
#define flush_icache_alias(pfn,vaddr,len) do { } while (0)
#endif
#define FLAG_PA_IS_EXEC 1
#define FLAG_PA_CORE_IN_MM 2
static void flush_ptrace_access_other(void *args)
{
__flush_icache_all();
}
static inline
void __flush_ptrace_access(struct page *page, unsigned long uaddr, void *kaddr,
unsigned long len, unsigned int flags)
{
if (cache_is_vivt()) {
if (flags & FLAG_PA_CORE_IN_MM) {
unsigned long addr = (unsigned long)kaddr;
__cpuc_coherent_kern_range(addr, addr + len);
}
return;
}
if (cache_is_vipt_aliasing()) {
flush_pfn_alias(page_to_pfn(page), uaddr);
__flush_icache_all();
return;
}
/* VIPT non-aliasing D-cache */
if (flags & FLAG_PA_IS_EXEC) {
unsigned long addr = (unsigned long)kaddr;
if (icache_is_vipt_aliasing())
flush_icache_alias(page_to_pfn(page), uaddr, len);
else
__cpuc_coherent_kern_range(addr, addr + len);
if (cache_ops_need_broadcast())
smp_call_function(flush_ptrace_access_other,
NULL, 1);
}
}
static
void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
unsigned long uaddr, void *kaddr, unsigned long len)
{
unsigned int flags = 0;
if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)))
flags |= FLAG_PA_CORE_IN_MM;
if (vma->vm_flags & VM_EXEC)
flags |= FLAG_PA_IS_EXEC;
__flush_ptrace_access(page, uaddr, kaddr, len, flags);
}
void flush_uprobe_xol_access(struct page *page, unsigned long uaddr,
void *kaddr, unsigned long len)
{
unsigned int flags = FLAG_PA_CORE_IN_MM|FLAG_PA_IS_EXEC;
__flush_ptrace_access(page, uaddr, kaddr, len, flags);
}
/*
* Copy user data from/to a page which is mapped into a different
* processes address space. Really, we want to allow our "user
* space" model to handle this.
*
* Note that this code needs to run on the current CPU.
*/
void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long uaddr, void *dst, const void *src,
unsigned long len)
{
#ifdef CONFIG_SMP
preempt_disable();
#endif
memcpy(dst, src, len);
flush_ptrace_access(vma, page, uaddr, dst, len);
#ifdef CONFIG_SMP
preempt_enable();
#endif
}
void __flush_dcache_folio(struct address_space *mapping, struct folio *folio)
{
/*
* Writeback any data associated with the kernel mapping of this
* page. This ensures that data in the physical page is mutually
* coherent with the kernels mapping.
*/
if (!folio_test_highmem(folio)) {
__cpuc_flush_dcache_area(folio_address(folio),
folio_size(folio));
} else {
unsigned long i;
if (cache_is_vipt_nonaliasing()) {
for (i = 0; i < folio_nr_pages(folio); i++) {
void *addr = kmap_local_folio(folio,
i * PAGE_SIZE);
__cpuc_flush_dcache_area(addr, PAGE_SIZE);
kunmap_local(addr);
}
} else {
for (i = 0; i < folio_nr_pages(folio); i++) {
void *addr = kmap_high_get(folio_page(folio, i));
if (addr) {
__cpuc_flush_dcache_area(addr, PAGE_SIZE);
kunmap_high(folio_page(folio, i));
}
}
}
}
/*
* If this is a page cache page, and we have an aliasing VIPT cache,
* we only need to do one flush - which would be at the relevant
* userspace colour, which is congruent with page->index.
*/
if (mapping && cache_is_vipt_aliasing())
flush_pfn_alias(folio_pfn(folio), folio_pos(folio));
}
static void __flush_dcache_aliases(struct address_space *mapping, struct folio *folio)
{
struct mm_struct *mm = current->active_mm;
struct vm_area_struct *vma;
pgoff_t pgoff, pgoff_end;
/*
* There are possible user space mappings of this page:
* - VIVT cache: we need to also write back and invalidate all user
* data in the current VM view associated with this page.
* - aliasing VIPT: we only need to find one mapping of this page.
*/
pgoff = folio->index;
pgoff_end = pgoff + folio_nr_pages(folio) - 1;
flush_dcache_mmap_lock(mapping);
vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff_end) {
unsigned long start, offset, pfn;
unsigned int nr;
/*
* If this VMA is not in our MM, we can ignore it.
*/
if (vma->vm_mm != mm)
continue;
if (!(vma->vm_flags & VM_MAYSHARE))
continue;
start = vma->vm_start;
pfn = folio_pfn(folio);
nr = folio_nr_pages(folio);
offset = pgoff - vma->vm_pgoff;
if (offset > -nr) {
pfn -= offset;
nr += offset;
} else {
start += offset * PAGE_SIZE;
}
if (start + nr * PAGE_SIZE > vma->vm_end)
nr = (vma->vm_end - start) / PAGE_SIZE;
flush_cache_pages(vma, start, pfn, nr);
}
flush_dcache_mmap_unlock(mapping);
}
#if __LINUX_ARM_ARCH__ >= 6
void __sync_icache_dcache(pte_t pteval)
{
unsigned long pfn;
struct folio *folio;
struct address_space *mapping;
if (cache_is_vipt_nonaliasing() && !pte_exec(pteval))
/* only flush non-aliasing VIPT caches for exec mappings */
return;
pfn = pte_pfn(pteval);
if (!pfn_valid(pfn))
return;
folio = page_folio(pfn_to_page(pfn));
if (cache_is_vipt_aliasing())
mapping = folio_flush_mapping(folio);
else
mapping = NULL;
if (!test_and_set_bit(PG_dcache_clean, &folio->flags))
__flush_dcache_folio(mapping, folio);
if (pte_exec(pteval))
__flush_icache_all();
}
#endif
/*
* Ensure cache coherency between kernel mapping and userspace mapping
* of this page.
*
* We have three cases to consider:
* - VIPT non-aliasing cache: fully coherent so nothing required.
* - VIVT: fully aliasing, so we need to handle every alias in our
* current VM view.
* - VIPT aliasing: need to handle one alias in our current VM view.
*
* If we need to handle aliasing:
* If the page only exists in the page cache and there are no user
* space mappings, we can be lazy and remember that we may have dirty
* kernel cache lines for later. Otherwise, we assume we have
* aliasing mappings.
*
* Note that we disable the lazy flush for SMP configurations where
* the cache maintenance operations are not automatically broadcasted.
*/
void flush_dcache_folio(struct folio *folio)
{
struct address_space *mapping;
/*
* The zero page is never written to, so never has any dirty
* cache lines, and therefore never needs to be flushed.
*/
if (is_zero_pfn(folio_pfn(folio)))
return;
if (!cache_ops_need_broadcast() && cache_is_vipt_nonaliasing()) {
if (test_bit(PG_dcache_clean, &folio->flags))
clear_bit(PG_dcache_clean, &folio->flags);
return;
}
mapping = folio_flush_mapping(folio);
if (!cache_ops_need_broadcast() &&
mapping && !folio_mapped(folio))
clear_bit(PG_dcache_clean, &folio->flags);
else {
__flush_dcache_folio(mapping, folio);
if (mapping && cache_is_vivt())
__flush_dcache_aliases(mapping, folio);
else if (mapping)
__flush_icache_all();
set_bit(PG_dcache_clean, &folio->flags);
}
}
EXPORT_SYMBOL(flush_dcache_folio);
void flush_dcache_page(struct page *page)
{
flush_dcache_folio(page_folio(page));
}
EXPORT_SYMBOL(flush_dcache_page);
/*
* Flush an anonymous page so that users of get_user_pages()
* can safely access the data. The expected sequence is:
*
* get_user_pages()
* -> flush_anon_page
* memcpy() to/from page
* if written to page, flush_dcache_page()
*/
void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr);
void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
{
unsigned long pfn;
/* VIPT non-aliasing caches need do nothing */
if (cache_is_vipt_nonaliasing())
return;
/*
* Write back and invalidate userspace mapping.
*/
pfn = page_to_pfn(page);
if (cache_is_vivt()) {
flush_cache_page(vma, vmaddr, pfn);
} else {
/*
* For aliasing VIPT, we can flush an alias of the
* userspace address only.
*/
flush_pfn_alias(pfn, vmaddr);
__flush_icache_all();
}
/*
* Invalidate kernel mapping. No data should be contained
* in this mapping of the page. FIXME: this is overkill
* since we actually ask for a write-back and invalidate.
*/
__cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
}
| linux-master | arch/arm/mm/flush.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <asm/ptdump.h>
static int ptdump_show(struct seq_file *m, void *v)
{
struct ptdump_info *info = m->private;
ptdump_walk_pgd(m, info);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(ptdump);
void __init ptdump_debugfs_register(struct ptdump_info *info, const char *name)
{
debugfs_create_file(name, 0400, NULL, info, &ptdump_fops);
}
| linux-master | arch/arm/mm/ptdump_debugfs.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/arch/arm/mm/dma-mapping.c
*
* Copyright (C) 2000-2004 Russell King
*
* DMA uncached mapping support.
*/
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/genalloc.h>
#include <linux/gfp.h>
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/dma-direct.h>
#include <linux/dma-map-ops.h>
#include <linux/highmem.h>
#include <linux/memblock.h>
#include <linux/slab.h>
#include <linux/iommu.h>
#include <linux/io.h>
#include <linux/vmalloc.h>
#include <linux/sizes.h>
#include <linux/cma.h>
#include <asm/page.h>
#include <asm/highmem.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <asm/mach/arch.h>
#include <asm/dma-iommu.h>
#include <asm/mach/map.h>
#include <asm/system_info.h>
#include <asm/xen/xen-ops.h>
#include "dma.h"
#include "mm.h"
struct arm_dma_alloc_args {
struct device *dev;
size_t size;
gfp_t gfp;
pgprot_t prot;
const void *caller;
bool want_vaddr;
int coherent_flag;
};
struct arm_dma_free_args {
struct device *dev;
size_t size;
void *cpu_addr;
struct page *page;
bool want_vaddr;
};
#define NORMAL 0
#define COHERENT 1
struct arm_dma_allocator {
void *(*alloc)(struct arm_dma_alloc_args *args,
struct page **ret_page);
void (*free)(struct arm_dma_free_args *args);
};
struct arm_dma_buffer {
struct list_head list;
void *virt;
struct arm_dma_allocator *allocator;
};
static LIST_HEAD(arm_dma_bufs);
static DEFINE_SPINLOCK(arm_dma_bufs_lock);
static struct arm_dma_buffer *arm_dma_buffer_find(void *virt)
{
struct arm_dma_buffer *buf, *found = NULL;
unsigned long flags;
spin_lock_irqsave(&arm_dma_bufs_lock, flags);
list_for_each_entry(buf, &arm_dma_bufs, list) {
if (buf->virt == virt) {
list_del(&buf->list);
found = buf;
break;
}
}
spin_unlock_irqrestore(&arm_dma_bufs_lock, flags);
return found;
}
/*
* The DMA API is built upon the notion of "buffer ownership". A buffer
* is either exclusively owned by the CPU (and therefore may be accessed
* by it) or exclusively owned by the DMA device. These helper functions
* represent the transitions between these two ownership states.
*
* Note, however, that on later ARMs, this notion does not work due to
* speculative prefetches. We model our approach on the assumption that
* the CPU does do speculative prefetches, which means we clean caches
* before transfers and delay cache invalidation until transfer completion.
*
*/
static void __dma_clear_buffer(struct page *page, size_t size, int coherent_flag)
{
/*
* Ensure that the allocated pages are zeroed, and that any data
* lurking in the kernel direct-mapped region is invalidated.
*/
if (PageHighMem(page)) {
phys_addr_t base = __pfn_to_phys(page_to_pfn(page));
phys_addr_t end = base + size;
while (size > 0) {
void *ptr = kmap_atomic(page);
memset(ptr, 0, PAGE_SIZE);
if (coherent_flag != COHERENT)
dmac_flush_range(ptr, ptr + PAGE_SIZE);
kunmap_atomic(ptr);
page++;
size -= PAGE_SIZE;
}
if (coherent_flag != COHERENT)
outer_flush_range(base, end);
} else {
void *ptr = page_address(page);
memset(ptr, 0, size);
if (coherent_flag != COHERENT) {
dmac_flush_range(ptr, ptr + size);
outer_flush_range(__pa(ptr), __pa(ptr) + size);
}
}
}
/*
* Allocate a DMA buffer for 'dev' of size 'size' using the
* specified gfp mask. Note that 'size' must be page aligned.
*/
static struct page *__dma_alloc_buffer(struct device *dev, size_t size,
gfp_t gfp, int coherent_flag)
{
unsigned long order = get_order(size);
struct page *page, *p, *e;
page = alloc_pages(gfp, order);
if (!page)
return NULL;
/*
* Now split the huge page and free the excess pages
*/
split_page(page, order);
for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
__free_page(p);
__dma_clear_buffer(page, size, coherent_flag);
return page;
}
/*
* Free a DMA buffer. 'size' must be page aligned.
*/
static void __dma_free_buffer(struct page *page, size_t size)
{
struct page *e = page + (size >> PAGE_SHIFT);
while (page < e) {
__free_page(page);
page++;
}
}
static void *__alloc_from_contiguous(struct device *dev, size_t size,
pgprot_t prot, struct page **ret_page,
const void *caller, bool want_vaddr,
int coherent_flag, gfp_t gfp);
static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
pgprot_t prot, struct page **ret_page,
const void *caller, bool want_vaddr);
#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
static struct gen_pool *atomic_pool __ro_after_init;
static size_t atomic_pool_size __initdata = DEFAULT_DMA_COHERENT_POOL_SIZE;
static int __init early_coherent_pool(char *p)
{
atomic_pool_size = memparse(p, &p);
return 0;
}
early_param("coherent_pool", early_coherent_pool);
/*
* Initialise the coherent pool for atomic allocations.
*/
static int __init atomic_pool_init(void)
{
pgprot_t prot = pgprot_dmacoherent(PAGE_KERNEL);
gfp_t gfp = GFP_KERNEL | GFP_DMA;
struct page *page;
void *ptr;
atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
if (!atomic_pool)
goto out;
/*
* The atomic pool is only used for non-coherent allocations
* so we must pass NORMAL for coherent_flag.
*/
if (dev_get_cma_area(NULL))
ptr = __alloc_from_contiguous(NULL, atomic_pool_size, prot,
&page, atomic_pool_init, true, NORMAL,
GFP_KERNEL);
else
ptr = __alloc_remap_buffer(NULL, atomic_pool_size, gfp, prot,
&page, atomic_pool_init, true);
if (ptr) {
int ret;
ret = gen_pool_add_virt(atomic_pool, (unsigned long)ptr,
page_to_phys(page),
atomic_pool_size, -1);
if (ret)
goto destroy_genpool;
gen_pool_set_algo(atomic_pool,
gen_pool_first_fit_order_align,
NULL);
pr_info("DMA: preallocated %zu KiB pool for atomic coherent allocations\n",
atomic_pool_size / 1024);
return 0;
}
destroy_genpool:
gen_pool_destroy(atomic_pool);
atomic_pool = NULL;
out:
pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
atomic_pool_size / 1024);
return -ENOMEM;
}
/*
* CMA is activated by core_initcall, so we must be called after it.
*/
postcore_initcall(atomic_pool_init);
#ifdef CONFIG_CMA_AREAS
struct dma_contig_early_reserve {
phys_addr_t base;
unsigned long size;
};
static struct dma_contig_early_reserve dma_mmu_remap[MAX_CMA_AREAS] __initdata;
static int dma_mmu_remap_num __initdata;
#ifdef CONFIG_DMA_CMA
void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size)
{
dma_mmu_remap[dma_mmu_remap_num].base = base;
dma_mmu_remap[dma_mmu_remap_num].size = size;
dma_mmu_remap_num++;
}
#endif
void __init dma_contiguous_remap(void)
{
int i;
for (i = 0; i < dma_mmu_remap_num; i++) {
phys_addr_t start = dma_mmu_remap[i].base;
phys_addr_t end = start + dma_mmu_remap[i].size;
struct map_desc map;
unsigned long addr;
if (end > arm_lowmem_limit)
end = arm_lowmem_limit;
if (start >= end)
continue;
map.pfn = __phys_to_pfn(start);
map.virtual = __phys_to_virt(start);
map.length = end - start;
map.type = MT_MEMORY_DMA_READY;
/*
* Clear previous low-memory mapping to ensure that the
* TLB does not see any conflicting entries, then flush
* the TLB of the old entries before creating new mappings.
*
* This ensures that any speculatively loaded TLB entries
* (even though they may be rare) can not cause any problems,
* and ensures that this code is architecturally compliant.
*/
for (addr = __phys_to_virt(start); addr < __phys_to_virt(end);
addr += PMD_SIZE)
pmd_clear(pmd_off_k(addr));
flush_tlb_kernel_range(__phys_to_virt(start),
__phys_to_virt(end));
iotable_init(&map, 1);
}
}
#endif
static int __dma_update_pte(pte_t *pte, unsigned long addr, void *data)
{
struct page *page = virt_to_page((void *)addr);
pgprot_t prot = *(pgprot_t *)data;
set_pte_ext(pte, mk_pte(page, prot), 0);
return 0;
}
static void __dma_remap(struct page *page, size_t size, pgprot_t prot)
{
unsigned long start = (unsigned long) page_address(page);
unsigned end = start + size;
apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot);
flush_tlb_kernel_range(start, end);
}
static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
pgprot_t prot, struct page **ret_page,
const void *caller, bool want_vaddr)
{
struct page *page;
void *ptr = NULL;
/*
* __alloc_remap_buffer is only called when the device is
* non-coherent
*/
page = __dma_alloc_buffer(dev, size, gfp, NORMAL);
if (!page)
return NULL;
if (!want_vaddr)
goto out;
ptr = dma_common_contiguous_remap(page, size, prot, caller);
if (!ptr) {
__dma_free_buffer(page, size);
return NULL;
}
out:
*ret_page = page;
return ptr;
}
static void *__alloc_from_pool(size_t size, struct page **ret_page)
{
unsigned long val;
void *ptr = NULL;
if (!atomic_pool) {
WARN(1, "coherent pool not initialised!\n");
return NULL;
}
val = gen_pool_alloc(atomic_pool, size);
if (val) {
phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);
*ret_page = phys_to_page(phys);
ptr = (void *)val;
}
return ptr;
}
static bool __in_atomic_pool(void *start, size_t size)
{
return gen_pool_has_addr(atomic_pool, (unsigned long)start, size);
}
static int __free_from_pool(void *start, size_t size)
{
if (!__in_atomic_pool(start, size))
return 0;
gen_pool_free(atomic_pool, (unsigned long)start, size);
return 1;
}
static void *__alloc_from_contiguous(struct device *dev, size_t size,
pgprot_t prot, struct page **ret_page,
const void *caller, bool want_vaddr,
int coherent_flag, gfp_t gfp)
{
unsigned long order = get_order(size);
size_t count = size >> PAGE_SHIFT;
struct page *page;
void *ptr = NULL;
page = dma_alloc_from_contiguous(dev, count, order, gfp & __GFP_NOWARN);
if (!page)
return NULL;
__dma_clear_buffer(page, size, coherent_flag);
if (!want_vaddr)
goto out;
if (PageHighMem(page)) {
ptr = dma_common_contiguous_remap(page, size, prot, caller);
if (!ptr) {
dma_release_from_contiguous(dev, page, count);
return NULL;
}
} else {
__dma_remap(page, size, prot);
ptr = page_address(page);
}
out:
*ret_page = page;
return ptr;
}
static void __free_from_contiguous(struct device *dev, struct page *page,
void *cpu_addr, size_t size, bool want_vaddr)
{
if (want_vaddr) {
if (PageHighMem(page))
dma_common_free_remap(cpu_addr, size);
else
__dma_remap(page, size, PAGE_KERNEL);
}
dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
}
static inline pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot)
{
prot = (attrs & DMA_ATTR_WRITE_COMBINE) ?
pgprot_writecombine(prot) :
pgprot_dmacoherent(prot);
return prot;
}
static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp,
struct page **ret_page)
{
struct page *page;
/* __alloc_simple_buffer is only called when the device is coherent */
page = __dma_alloc_buffer(dev, size, gfp, COHERENT);
if (!page)
return NULL;
*ret_page = page;
return page_address(page);
}
static void *simple_allocator_alloc(struct arm_dma_alloc_args *args,
struct page **ret_page)
{
return __alloc_simple_buffer(args->dev, args->size, args->gfp,
ret_page);
}
static void simple_allocator_free(struct arm_dma_free_args *args)
{
__dma_free_buffer(args->page, args->size);
}
static struct arm_dma_allocator simple_allocator = {
.alloc = simple_allocator_alloc,
.free = simple_allocator_free,
};
static void *cma_allocator_alloc(struct arm_dma_alloc_args *args,
struct page **ret_page)
{
return __alloc_from_contiguous(args->dev, args->size, args->prot,
ret_page, args->caller,
args->want_vaddr, args->coherent_flag,
args->gfp);
}
static void cma_allocator_free(struct arm_dma_free_args *args)
{
__free_from_contiguous(args->dev, args->page, args->cpu_addr,
args->size, args->want_vaddr);
}
static struct arm_dma_allocator cma_allocator = {
.alloc = cma_allocator_alloc,
.free = cma_allocator_free,
};
static void *pool_allocator_alloc(struct arm_dma_alloc_args *args,
struct page **ret_page)
{
return __alloc_from_pool(args->size, ret_page);
}
static void pool_allocator_free(struct arm_dma_free_args *args)
{
__free_from_pool(args->cpu_addr, args->size);
}
static struct arm_dma_allocator pool_allocator = {
.alloc = pool_allocator_alloc,
.free = pool_allocator_free,
};
static void *remap_allocator_alloc(struct arm_dma_alloc_args *args,
struct page **ret_page)
{
return __alloc_remap_buffer(args->dev, args->size, args->gfp,
args->prot, ret_page, args->caller,
args->want_vaddr);
}
static void remap_allocator_free(struct arm_dma_free_args *args)
{
if (args->want_vaddr)
dma_common_free_remap(args->cpu_addr, args->size);
__dma_free_buffer(args->page, args->size);
}
static struct arm_dma_allocator remap_allocator = {
.alloc = remap_allocator_alloc,
.free = remap_allocator_free,
};
static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
gfp_t gfp, pgprot_t prot, bool is_coherent,
unsigned long attrs, const void *caller)
{
u64 mask = min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit);
struct page *page = NULL;
void *addr;
bool allowblock, cma;
struct arm_dma_buffer *buf;
struct arm_dma_alloc_args args = {
.dev = dev,
.size = PAGE_ALIGN(size),
.gfp = gfp,
.prot = prot,
.caller = caller,
.want_vaddr = ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0),
.coherent_flag = is_coherent ? COHERENT : NORMAL,
};
#ifdef CONFIG_DMA_API_DEBUG
u64 limit = (mask + 1) & ~mask;
if (limit && size >= limit) {
dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n",
size, mask);
return NULL;
}
#endif
buf = kzalloc(sizeof(*buf),
gfp & ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM));
if (!buf)
return NULL;
if (mask < 0xffffffffULL)
gfp |= GFP_DMA;
args.gfp = gfp;
*handle = DMA_MAPPING_ERROR;
allowblock = gfpflags_allow_blocking(gfp);
cma = allowblock ? dev_get_cma_area(dev) : NULL;
if (cma)
buf->allocator = &cma_allocator;
else if (is_coherent)
buf->allocator = &simple_allocator;
else if (allowblock)
buf->allocator = &remap_allocator;
else
buf->allocator = &pool_allocator;
addr = buf->allocator->alloc(&args, &page);
if (page) {
unsigned long flags;
*handle = phys_to_dma(dev, page_to_phys(page));
buf->virt = args.want_vaddr ? addr : page;
spin_lock_irqsave(&arm_dma_bufs_lock, flags);
list_add(&buf->list, &arm_dma_bufs);
spin_unlock_irqrestore(&arm_dma_bufs_lock, flags);
} else {
kfree(buf);
}
return args.want_vaddr ? addr : page;
}
/*
* Free a buffer as defined by the above mapping.
*/
static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t handle, unsigned long attrs,
bool is_coherent)
{
struct page *page = phys_to_page(dma_to_phys(dev, handle));
struct arm_dma_buffer *buf;
struct arm_dma_free_args args = {
.dev = dev,
.size = PAGE_ALIGN(size),
.cpu_addr = cpu_addr,
.page = page,
.want_vaddr = ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0),
};
buf = arm_dma_buffer_find(cpu_addr);
if (WARN(!buf, "Freeing invalid buffer %p\n", cpu_addr))
return;
buf->allocator->free(&args);
kfree(buf);
}
static void dma_cache_maint_page(struct page *page, unsigned long offset,
size_t size, enum dma_data_direction dir,
void (*op)(const void *, size_t, int))
{
unsigned long pfn;
size_t left = size;
pfn = page_to_pfn(page) + offset / PAGE_SIZE;
offset %= PAGE_SIZE;
/*
* A single sg entry may refer to multiple physically contiguous
* pages. But we still need to process highmem pages individually.
* If highmem is not configured then the bulk of this loop gets
* optimized out.
*/
do {
size_t len = left;
void *vaddr;
page = pfn_to_page(pfn);
if (PageHighMem(page)) {
if (len + offset > PAGE_SIZE)
len = PAGE_SIZE - offset;
if (cache_is_vipt_nonaliasing()) {
vaddr = kmap_atomic(page);
op(vaddr + offset, len, dir);
kunmap_atomic(vaddr);
} else {
vaddr = kmap_high_get(page);
if (vaddr) {
op(vaddr + offset, len, dir);
kunmap_high(page);
}
}
} else {
vaddr = page_address(page) + offset;
op(vaddr, len, dir);
}
offset = 0;
pfn++;
left -= len;
} while (left);
}
/*
* Make an area consistent for devices.
* Note: Drivers should NOT use this function directly.
* Use the driver DMA support - see dma-mapping.h (dma_sync_*)
*/
static void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
size_t size, enum dma_data_direction dir)
{
phys_addr_t paddr;
dma_cache_maint_page(page, off, size, dir, dmac_map_area);
paddr = page_to_phys(page) + off;
if (dir == DMA_FROM_DEVICE) {
outer_inv_range(paddr, paddr + size);
} else {
outer_clean_range(paddr, paddr + size);
}
/* FIXME: non-speculating: flush on bidirectional mappings? */
}
static void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
size_t size, enum dma_data_direction dir)
{
phys_addr_t paddr = page_to_phys(page) + off;
/* FIXME: non-speculating: not required */
/* in any case, don't bother invalidating if DMA to device */
if (dir != DMA_TO_DEVICE) {
outer_inv_range(paddr, paddr + size);
dma_cache_maint_page(page, off, size, dir, dmac_unmap_area);
}
/*
* Mark the D-cache clean for these pages to avoid extra flushing.
*/
if (dir != DMA_TO_DEVICE && size >= PAGE_SIZE) {
struct folio *folio = pfn_folio(paddr / PAGE_SIZE);
size_t offset = offset_in_folio(folio, paddr);
for (;;) {
size_t sz = folio_size(folio) - offset;
if (size < sz)
break;
if (!offset)
set_bit(PG_dcache_clean, &folio->flags);
offset = 0;
size -= sz;
if (!size)
break;
folio = folio_next(folio);
}
}
}
#ifdef CONFIG_ARM_DMA_USE_IOMMU
static int __dma_info_to_prot(enum dma_data_direction dir, unsigned long attrs)
{
int prot = 0;
if (attrs & DMA_ATTR_PRIVILEGED)
prot |= IOMMU_PRIV;
switch (dir) {
case DMA_BIDIRECTIONAL:
return prot | IOMMU_READ | IOMMU_WRITE;
case DMA_TO_DEVICE:
return prot | IOMMU_READ;
case DMA_FROM_DEVICE:
return prot | IOMMU_WRITE;
default:
return prot;
}
}
/* IOMMU */
static int extend_iommu_mapping(struct dma_iommu_mapping *mapping);
static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
size_t size)
{
unsigned int order = get_order(size);
unsigned int align = 0;
unsigned int count, start;
size_t mapping_size = mapping->bits << PAGE_SHIFT;
unsigned long flags;
dma_addr_t iova;
int i;
if (order > CONFIG_ARM_DMA_IOMMU_ALIGNMENT)
order = CONFIG_ARM_DMA_IOMMU_ALIGNMENT;
count = PAGE_ALIGN(size) >> PAGE_SHIFT;
align = (1 << order) - 1;
spin_lock_irqsave(&mapping->lock, flags);
for (i = 0; i < mapping->nr_bitmaps; i++) {
start = bitmap_find_next_zero_area(mapping->bitmaps[i],
mapping->bits, 0, count, align);
if (start > mapping->bits)
continue;
bitmap_set(mapping->bitmaps[i], start, count);
break;
}
/*
* No unused range found. Try to extend the existing mapping
* and perform a second attempt to reserve an IO virtual
* address range of size bytes.
*/
if (i == mapping->nr_bitmaps) {
if (extend_iommu_mapping(mapping)) {
spin_unlock_irqrestore(&mapping->lock, flags);
return DMA_MAPPING_ERROR;
}
start = bitmap_find_next_zero_area(mapping->bitmaps[i],
mapping->bits, 0, count, align);
if (start > mapping->bits) {
spin_unlock_irqrestore(&mapping->lock, flags);
return DMA_MAPPING_ERROR;
}
bitmap_set(mapping->bitmaps[i], start, count);
}
spin_unlock_irqrestore(&mapping->lock, flags);
iova = mapping->base + (mapping_size * i);
iova += start << PAGE_SHIFT;
return iova;
}
static inline void __free_iova(struct dma_iommu_mapping *mapping,
dma_addr_t addr, size_t size)
{
unsigned int start, count;
size_t mapping_size = mapping->bits << PAGE_SHIFT;
unsigned long flags;
dma_addr_t bitmap_base;
u32 bitmap_index;
if (!size)
return;
bitmap_index = (u32) (addr - mapping->base) / (u32) mapping_size;
BUG_ON(addr < mapping->base || bitmap_index > mapping->extensions);
bitmap_base = mapping->base + mapping_size * bitmap_index;
start = (addr - bitmap_base) >> PAGE_SHIFT;
if (addr + size > bitmap_base + mapping_size) {
/*
* The address range to be freed reaches into the iova
* range of the next bitmap. This should not happen as
* we don't allow this in __alloc_iova (at the
* moment).
*/
BUG();
} else
count = size >> PAGE_SHIFT;
spin_lock_irqsave(&mapping->lock, flags);
bitmap_clear(mapping->bitmaps[bitmap_index], start, count);
spin_unlock_irqrestore(&mapping->lock, flags);
}
/* We'll try 2M, 1M, 64K, and finally 4K; array must end with 0! */
static const int iommu_order_array[] = { 9, 8, 4, 0 };
static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
gfp_t gfp, unsigned long attrs,
int coherent_flag)
{
struct page **pages;
int count = size >> PAGE_SHIFT;
int array_size = count * sizeof(struct page *);
int i = 0;
int order_idx = 0;
if (array_size <= PAGE_SIZE)
pages = kzalloc(array_size, GFP_KERNEL);
else
pages = vzalloc(array_size);
if (!pages)
return NULL;
if (attrs & DMA_ATTR_FORCE_CONTIGUOUS)
{
unsigned long order = get_order(size);
struct page *page;
page = dma_alloc_from_contiguous(dev, count, order,
gfp & __GFP_NOWARN);
if (!page)
goto error;
__dma_clear_buffer(page, size, coherent_flag);
for (i = 0; i < count; i++)
pages[i] = page + i;
return pages;
}
/* Go straight to 4K chunks if caller says it's OK. */
if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES)
order_idx = ARRAY_SIZE(iommu_order_array) - 1;
/*
* IOMMU can map any pages, so himem can also be used here
*/
gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
while (count) {
int j, order;
order = iommu_order_array[order_idx];
/* Drop down when we get small */
if (__fls(count) < order) {
order_idx++;
continue;
}
if (order) {
/* See if it's easy to allocate a high-order chunk */
pages[i] = alloc_pages(gfp | __GFP_NORETRY, order);
/* Go down a notch at first sign of pressure */
if (!pages[i]) {
order_idx++;
continue;
}
} else {
pages[i] = alloc_pages(gfp, 0);
if (!pages[i])
goto error;
}
if (order) {
split_page(pages[i], order);
j = 1 << order;
while (--j)
pages[i + j] = pages[i] + j;
}
__dma_clear_buffer(pages[i], PAGE_SIZE << order, coherent_flag);
i += 1 << order;
count -= 1 << order;
}
return pages;
error:
while (i--)
if (pages[i])
__free_pages(pages[i], 0);
kvfree(pages);
return NULL;
}
static int __iommu_free_buffer(struct device *dev, struct page **pages,
size_t size, unsigned long attrs)
{
int count = size >> PAGE_SHIFT;
int i;
if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
dma_release_from_contiguous(dev, pages[0], count);
} else {
for (i = 0; i < count; i++)
if (pages[i])
__free_pages(pages[i], 0);
}
kvfree(pages);
return 0;
}
/*
* Create a mapping in device IO address space for specified pages
*/
static dma_addr_t
__iommu_create_mapping(struct device *dev, struct page **pages, size_t size,
unsigned long attrs)
{
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
dma_addr_t dma_addr, iova;
int i;
dma_addr = __alloc_iova(mapping, size);
if (dma_addr == DMA_MAPPING_ERROR)
return dma_addr;
iova = dma_addr;
for (i = 0; i < count; ) {
int ret;
unsigned int next_pfn = page_to_pfn(pages[i]) + 1;
phys_addr_t phys = page_to_phys(pages[i]);
unsigned int len, j;
for (j = i + 1; j < count; j++, next_pfn++)
if (page_to_pfn(pages[j]) != next_pfn)
break;
len = (j - i) << PAGE_SHIFT;
ret = iommu_map(mapping->domain, iova, phys, len,
__dma_info_to_prot(DMA_BIDIRECTIONAL, attrs),
GFP_KERNEL);
if (ret < 0)
goto fail;
iova += len;
i = j;
}
return dma_addr;
fail:
iommu_unmap(mapping->domain, dma_addr, iova-dma_addr);
__free_iova(mapping, dma_addr, size);
return DMA_MAPPING_ERROR;
}
static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size)
{
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
/*
* add optional in-page offset from iova to size and align
* result to page size
*/
size = PAGE_ALIGN((iova & ~PAGE_MASK) + size);
iova &= PAGE_MASK;
iommu_unmap(mapping->domain, iova, size);
__free_iova(mapping, iova, size);
return 0;
}
static struct page **__atomic_get_pages(void *addr)
{
struct page *page;
phys_addr_t phys;
phys = gen_pool_virt_to_phys(atomic_pool, (unsigned long)addr);
page = phys_to_page(phys);
return (struct page **)page;
}
static struct page **__iommu_get_pages(void *cpu_addr, unsigned long attrs)
{
if (__in_atomic_pool(cpu_addr, PAGE_SIZE))
return __atomic_get_pages(cpu_addr);
if (attrs & DMA_ATTR_NO_KERNEL_MAPPING)
return cpu_addr;
return dma_common_find_pages(cpu_addr);
}
static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp,
dma_addr_t *handle, int coherent_flag,
unsigned long attrs)
{
struct page *page;
void *addr;
if (coherent_flag == COHERENT)
addr = __alloc_simple_buffer(dev, size, gfp, &page);
else
addr = __alloc_from_pool(size, &page);
if (!addr)
return NULL;
*handle = __iommu_create_mapping(dev, &page, size, attrs);
if (*handle == DMA_MAPPING_ERROR)
goto err_mapping;
return addr;
err_mapping:
__free_from_pool(addr, size);
return NULL;
}
static void __iommu_free_atomic(struct device *dev, void *cpu_addr,
dma_addr_t handle, size_t size, int coherent_flag)
{
__iommu_remove_mapping(dev, handle, size);
if (coherent_flag == COHERENT)
__dma_free_buffer(virt_to_page(cpu_addr), size);
else
__free_from_pool(cpu_addr, size);
}
static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
{
pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
struct page **pages;
void *addr = NULL;
int coherent_flag = dev->dma_coherent ? COHERENT : NORMAL;
*handle = DMA_MAPPING_ERROR;
size = PAGE_ALIGN(size);
if (coherent_flag == COHERENT || !gfpflags_allow_blocking(gfp))
return __iommu_alloc_simple(dev, size, gfp, handle,
coherent_flag, attrs);
pages = __iommu_alloc_buffer(dev, size, gfp, attrs, coherent_flag);
if (!pages)
return NULL;
*handle = __iommu_create_mapping(dev, pages, size, attrs);
if (*handle == DMA_MAPPING_ERROR)
goto err_buffer;
if (attrs & DMA_ATTR_NO_KERNEL_MAPPING)
return pages;
addr = dma_common_pages_remap(pages, size, prot,
__builtin_return_address(0));
if (!addr)
goto err_mapping;
return addr;
err_mapping:
__iommu_remove_mapping(dev, *handle, size);
err_buffer:
__iommu_free_buffer(dev, pages, size, attrs);
return NULL;
}
static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs)
{
struct page **pages = __iommu_get_pages(cpu_addr, attrs);
unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
int err;
if (!pages)
return -ENXIO;
if (vma->vm_pgoff >= nr_pages)
return -ENXIO;
if (!dev->dma_coherent)
vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
err = vm_map_pages(vma, pages, nr_pages);
if (err)
pr_err("Remapping memory failed: %d\n", err);
return err;
}
/*
* free a page as defined by the above mapping.
* Must not be called with IRQs disabled.
*/
static void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t handle, unsigned long attrs)
{
int coherent_flag = dev->dma_coherent ? COHERENT : NORMAL;
struct page **pages;
size = PAGE_ALIGN(size);
if (coherent_flag == COHERENT || __in_atomic_pool(cpu_addr, size)) {
__iommu_free_atomic(dev, cpu_addr, handle, size, coherent_flag);
return;
}
pages = __iommu_get_pages(cpu_addr, attrs);
if (!pages) {
WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
return;
}
if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0)
dma_common_free_remap(cpu_addr, size);
__iommu_remove_mapping(dev, handle, size);
__iommu_free_buffer(dev, pages, size, attrs);
}
static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr,
size_t size, unsigned long attrs)
{
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
struct page **pages = __iommu_get_pages(cpu_addr, attrs);
if (!pages)
return -ENXIO;
return sg_alloc_table_from_pages(sgt, pages, count, 0, size,
GFP_KERNEL);
}
/*
* Map a part of the scatter-gather list into contiguous io address space
*/
static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
size_t size, dma_addr_t *handle,
enum dma_data_direction dir, unsigned long attrs)
{
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
dma_addr_t iova, iova_base;
int ret = 0;
unsigned int count;
struct scatterlist *s;
int prot;
size = PAGE_ALIGN(size);
*handle = DMA_MAPPING_ERROR;
iova_base = iova = __alloc_iova(mapping, size);
if (iova == DMA_MAPPING_ERROR)
return -ENOMEM;
for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) {
phys_addr_t phys = page_to_phys(sg_page(s));
unsigned int len = PAGE_ALIGN(s->offset + s->length);
if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
prot = __dma_info_to_prot(dir, attrs);
ret = iommu_map(mapping->domain, iova, phys, len, prot,
GFP_KERNEL);
if (ret < 0)
goto fail;
count += len >> PAGE_SHIFT;
iova += len;
}
*handle = iova_base;
return 0;
fail:
iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE);
__free_iova(mapping, iova_base, size);
return ret;
}
/**
* arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA
* @dev: valid struct device pointer
* @sg: list of buffers
* @nents: number of buffers to map
* @dir: DMA transfer direction
*
* Map a set of buffers described by scatterlist in streaming mode for DMA.
* The scatter gather list elements are merged together (if possible) and
* tagged with the appropriate dma address and length. They are obtained via
* sg_dma_{address,length}.
*/
static int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir, unsigned long attrs)
{
struct scatterlist *s = sg, *dma = sg, *start = sg;
int i, count = 0, ret;
unsigned int offset = s->offset;
unsigned int size = s->offset + s->length;
unsigned int max = dma_get_max_seg_size(dev);
for (i = 1; i < nents; i++) {
s = sg_next(s);
s->dma_length = 0;
if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) {
ret = __map_sg_chunk(dev, start, size,
&dma->dma_address, dir, attrs);
if (ret < 0)
goto bad_mapping;
dma->dma_address += offset;
dma->dma_length = size - offset;
size = offset = s->offset;
start = s;
dma = sg_next(dma);
count += 1;
}
size += s->length;
}
ret = __map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs);
if (ret < 0)
goto bad_mapping;
dma->dma_address += offset;
dma->dma_length = size - offset;
return count+1;
bad_mapping:
for_each_sg(sg, s, count, i)
__iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s));
if (ret == -ENOMEM)
return ret;
return -EINVAL;
}
/**
* arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
* @dev: valid struct device pointer
* @sg: list of buffers
* @nents: number of buffers to unmap (same as was passed to dma_map_sg)
* @dir: DMA transfer direction (same as was passed to dma_map_sg)
*
* Unmap a set of streaming mode DMA translations. Again, CPU access
* rules concerning calls here are the same as for dma_unmap_single().
*/
static void arm_iommu_unmap_sg(struct device *dev,
struct scatterlist *sg, int nents,
enum dma_data_direction dir,
unsigned long attrs)
{
struct scatterlist *s;
int i;
for_each_sg(sg, s, nents, i) {
if (sg_dma_len(s))
__iommu_remove_mapping(dev, sg_dma_address(s),
sg_dma_len(s));
if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
__dma_page_dev_to_cpu(sg_page(s), s->offset,
s->length, dir);
}
}
/**
* arm_iommu_sync_sg_for_cpu
* @dev: valid struct device pointer
* @sg: list of buffers
* @nents: number of buffers to map (returned from dma_map_sg)
* @dir: DMA transfer direction (same as was passed to dma_map_sg)
*/
static void arm_iommu_sync_sg_for_cpu(struct device *dev,
struct scatterlist *sg,
int nents, enum dma_data_direction dir)
{
struct scatterlist *s;
int i;
if (dev->dma_coherent)
return;
for_each_sg(sg, s, nents, i)
__dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir);
}
/**
* arm_iommu_sync_sg_for_device
* @dev: valid struct device pointer
* @sg: list of buffers
* @nents: number of buffers to map (returned from dma_map_sg)
* @dir: DMA transfer direction (same as was passed to dma_map_sg)
*/
static void arm_iommu_sync_sg_for_device(struct device *dev,
struct scatterlist *sg,
int nents, enum dma_data_direction dir)
{
struct scatterlist *s;
int i;
if (dev->dma_coherent)
return;
for_each_sg(sg, s, nents, i)
__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
}
/**
* arm_iommu_map_page
* @dev: valid struct device pointer
* @page: page that buffer resides in
* @offset: offset into page for start of buffer
* @size: size of buffer to map
* @dir: DMA transfer direction
*
* IOMMU aware version of arm_dma_map_page()
*/
static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
dma_addr_t dma_addr;
int ret, prot, len = PAGE_ALIGN(size + offset);
if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
__dma_page_cpu_to_dev(page, offset, size, dir);
dma_addr = __alloc_iova(mapping, len);
if (dma_addr == DMA_MAPPING_ERROR)
return dma_addr;
prot = __dma_info_to_prot(dir, attrs);
ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len,
prot, GFP_KERNEL);
if (ret < 0)
goto fail;
return dma_addr + offset;
fail:
__free_iova(mapping, dma_addr, len);
return DMA_MAPPING_ERROR;
}
/**
* arm_iommu_unmap_page
* @dev: valid struct device pointer
* @handle: DMA address of buffer
* @size: size of buffer (same as passed to dma_map_page)
* @dir: DMA transfer direction (same as passed to dma_map_page)
*
* IOMMU aware version of arm_dma_unmap_page()
*/
static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
dma_addr_t iova = handle & PAGE_MASK;
struct page *page;
int offset = handle & ~PAGE_MASK;
int len = PAGE_ALIGN(size + offset);
if (!iova)
return;
if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
__dma_page_dev_to_cpu(page, offset, size, dir);
}
iommu_unmap(mapping->domain, iova, len);
__free_iova(mapping, iova, len);
}
/**
* arm_iommu_map_resource - map a device resource for DMA
* @dev: valid struct device pointer
* @phys_addr: physical address of resource
* @size: size of resource to map
* @dir: DMA transfer direction
*/
static dma_addr_t arm_iommu_map_resource(struct device *dev,
phys_addr_t phys_addr, size_t size,
enum dma_data_direction dir, unsigned long attrs)
{
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
dma_addr_t dma_addr;
int ret, prot;
phys_addr_t addr = phys_addr & PAGE_MASK;
unsigned int offset = phys_addr & ~PAGE_MASK;
size_t len = PAGE_ALIGN(size + offset);
dma_addr = __alloc_iova(mapping, len);
if (dma_addr == DMA_MAPPING_ERROR)
return dma_addr;
prot = __dma_info_to_prot(dir, attrs) | IOMMU_MMIO;
ret = iommu_map(mapping->domain, dma_addr, addr, len, prot, GFP_KERNEL);
if (ret < 0)
goto fail;
return dma_addr + offset;
fail:
__free_iova(mapping, dma_addr, len);
return DMA_MAPPING_ERROR;
}
/**
* arm_iommu_unmap_resource - unmap a device DMA resource
* @dev: valid struct device pointer
* @dma_handle: DMA address to resource
* @size: size of resource to map
* @dir: DMA transfer direction
*/
static void arm_iommu_unmap_resource(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
dma_addr_t iova = dma_handle & PAGE_MASK;
unsigned int offset = dma_handle & ~PAGE_MASK;
size_t len = PAGE_ALIGN(size + offset);
if (!iova)
return;
iommu_unmap(mapping->domain, iova, len);
__free_iova(mapping, iova, len);
}
static void arm_iommu_sync_single_for_cpu(struct device *dev,
dma_addr_t handle, size_t size, enum dma_data_direction dir)
{
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
dma_addr_t iova = handle & PAGE_MASK;
struct page *page;
unsigned int offset = handle & ~PAGE_MASK;
if (dev->dma_coherent || !iova)
return;
page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
__dma_page_dev_to_cpu(page, offset, size, dir);
}
static void arm_iommu_sync_single_for_device(struct device *dev,
dma_addr_t handle, size_t size, enum dma_data_direction dir)
{
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
dma_addr_t iova = handle & PAGE_MASK;
struct page *page;
unsigned int offset = handle & ~PAGE_MASK;
if (dev->dma_coherent || !iova)
return;
page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
__dma_page_cpu_to_dev(page, offset, size, dir);
}
static const struct dma_map_ops iommu_ops = {
.alloc = arm_iommu_alloc_attrs,
.free = arm_iommu_free_attrs,
.mmap = arm_iommu_mmap_attrs,
.get_sgtable = arm_iommu_get_sgtable,
.map_page = arm_iommu_map_page,
.unmap_page = arm_iommu_unmap_page,
.sync_single_for_cpu = arm_iommu_sync_single_for_cpu,
.sync_single_for_device = arm_iommu_sync_single_for_device,
.map_sg = arm_iommu_map_sg,
.unmap_sg = arm_iommu_unmap_sg,
.sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu,
.sync_sg_for_device = arm_iommu_sync_sg_for_device,
.map_resource = arm_iommu_map_resource,
.unmap_resource = arm_iommu_unmap_resource,
};
/**
* arm_iommu_create_mapping
* @bus: pointer to the bus holding the client device (for IOMMU calls)
* @base: start address of the valid IO address space
* @size: maximum size of the valid IO address space
*
* Creates a mapping structure which holds information about used/unused
* IO address ranges, which is required to perform memory allocation and
* mapping with IOMMU aware functions.
*
* The client device need to be attached to the mapping with
* arm_iommu_attach_device function.
*/
struct dma_iommu_mapping *
arm_iommu_create_mapping(const struct bus_type *bus, dma_addr_t base, u64 size)
{
unsigned int bits = size >> PAGE_SHIFT;
unsigned int bitmap_size = BITS_TO_LONGS(bits) * sizeof(long);
struct dma_iommu_mapping *mapping;
int extensions = 1;
int err = -ENOMEM;
/* currently only 32-bit DMA address space is supported */
if (size > DMA_BIT_MASK(32) + 1)
return ERR_PTR(-ERANGE);
if (!bitmap_size)
return ERR_PTR(-EINVAL);
if (bitmap_size > PAGE_SIZE) {
extensions = bitmap_size / PAGE_SIZE;
bitmap_size = PAGE_SIZE;
}
mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL);
if (!mapping)
goto err;
mapping->bitmap_size = bitmap_size;
mapping->bitmaps = kcalloc(extensions, sizeof(unsigned long *),
GFP_KERNEL);
if (!mapping->bitmaps)
goto err2;
mapping->bitmaps[0] = kzalloc(bitmap_size, GFP_KERNEL);
if (!mapping->bitmaps[0])
goto err3;
mapping->nr_bitmaps = 1;
mapping->extensions = extensions;
mapping->base = base;
mapping->bits = BITS_PER_BYTE * bitmap_size;
spin_lock_init(&mapping->lock);
mapping->domain = iommu_domain_alloc(bus);
if (!mapping->domain)
goto err4;
kref_init(&mapping->kref);
return mapping;
err4:
kfree(mapping->bitmaps[0]);
err3:
kfree(mapping->bitmaps);
err2:
kfree(mapping);
err:
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(arm_iommu_create_mapping);
static void release_iommu_mapping(struct kref *kref)
{
int i;
struct dma_iommu_mapping *mapping =
container_of(kref, struct dma_iommu_mapping, kref);
iommu_domain_free(mapping->domain);
for (i = 0; i < mapping->nr_bitmaps; i++)
kfree(mapping->bitmaps[i]);
kfree(mapping->bitmaps);
kfree(mapping);
}
static int extend_iommu_mapping(struct dma_iommu_mapping *mapping)
{
int next_bitmap;
if (mapping->nr_bitmaps >= mapping->extensions)
return -EINVAL;
next_bitmap = mapping->nr_bitmaps;
mapping->bitmaps[next_bitmap] = kzalloc(mapping->bitmap_size,
GFP_ATOMIC);
if (!mapping->bitmaps[next_bitmap])
return -ENOMEM;
mapping->nr_bitmaps++;
return 0;
}
void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
{
if (mapping)
kref_put(&mapping->kref, release_iommu_mapping);
}
EXPORT_SYMBOL_GPL(arm_iommu_release_mapping);
static int __arm_iommu_attach_device(struct device *dev,
struct dma_iommu_mapping *mapping)
{
int err;
err = iommu_attach_device(mapping->domain, dev);
if (err)
return err;
kref_get(&mapping->kref);
to_dma_iommu_mapping(dev) = mapping;
pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev));
return 0;
}
/**
* arm_iommu_attach_device
* @dev: valid struct device pointer
* @mapping: io address space mapping structure (returned from
* arm_iommu_create_mapping)
*
* Attaches specified io address space mapping to the provided device.
* This replaces the dma operations (dma_map_ops pointer) with the
* IOMMU aware version.
*
* More than one client might be attached to the same io address space
* mapping.
*/
int arm_iommu_attach_device(struct device *dev,
struct dma_iommu_mapping *mapping)
{
int err;
err = __arm_iommu_attach_device(dev, mapping);
if (err)
return err;
set_dma_ops(dev, &iommu_ops);
return 0;
}
EXPORT_SYMBOL_GPL(arm_iommu_attach_device);
/**
* arm_iommu_detach_device
* @dev: valid struct device pointer
*
* Detaches the provided device from a previously attached map.
* This overwrites the dma_ops pointer with appropriate non-IOMMU ops.
*/
void arm_iommu_detach_device(struct device *dev)
{
struct dma_iommu_mapping *mapping;
mapping = to_dma_iommu_mapping(dev);
if (!mapping) {
dev_warn(dev, "Not attached\n");
return;
}
iommu_detach_device(mapping->domain, dev);
kref_put(&mapping->kref, release_iommu_mapping);
to_dma_iommu_mapping(dev) = NULL;
set_dma_ops(dev, NULL);
pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev));
}
EXPORT_SYMBOL_GPL(arm_iommu_detach_device);
static void arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
const struct iommu_ops *iommu, bool coherent)
{
struct dma_iommu_mapping *mapping;
mapping = arm_iommu_create_mapping(dev->bus, dma_base, size);
if (IS_ERR(mapping)) {
pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n",
size, dev_name(dev));
return;
}
if (__arm_iommu_attach_device(dev, mapping)) {
pr_warn("Failed to attached device %s to IOMMU_mapping\n",
dev_name(dev));
arm_iommu_release_mapping(mapping);
return;
}
set_dma_ops(dev, &iommu_ops);
}
static void arm_teardown_iommu_dma_ops(struct device *dev)
{
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
if (!mapping)
return;
arm_iommu_detach_device(dev);
arm_iommu_release_mapping(mapping);
}
#else
static void arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
const struct iommu_ops *iommu, bool coherent)
{
}
static void arm_teardown_iommu_dma_ops(struct device *dev) { }
#endif /* CONFIG_ARM_DMA_USE_IOMMU */
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
const struct iommu_ops *iommu, bool coherent)
{
/*
* Due to legacy code that sets the ->dma_coherent flag from a bus
* notifier we can't just assign coherent to the ->dma_coherent flag
* here, but instead have to make sure we only set but never clear it
* for now.
*/
if (coherent)
dev->dma_coherent = true;
/*
* Don't override the dma_ops if they have already been set. Ideally
* this should be the only location where dma_ops are set, remove this
* check when all other callers of set_dma_ops will have disappeared.
*/
if (dev->dma_ops)
return;
if (iommu)
arm_setup_iommu_dma_ops(dev, dma_base, size, iommu, coherent);
xen_setup_dma_ops(dev);
dev->archdata.dma_ops_setup = true;
}
void arch_teardown_dma_ops(struct device *dev)
{
if (!dev->archdata.dma_ops_setup)
return;
arm_teardown_iommu_dma_ops(dev);
/* Let arch_setup_dma_ops() start again from scratch upon re-probe */
set_dma_ops(dev, NULL);
}
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
__dma_page_cpu_to_dev(phys_to_page(paddr), paddr & (PAGE_SIZE - 1),
size, dir);
}
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
__dma_page_dev_to_cpu(phys_to_page(paddr), paddr & (PAGE_SIZE - 1),
size, dir);
}
void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t gfp, unsigned long attrs)
{
return __dma_alloc(dev, size, dma_handle, gfp,
__get_dma_pgprot(attrs, PAGE_KERNEL), false,
attrs, __builtin_return_address(0));
}
void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_handle, unsigned long attrs)
{
__arm_dma_free(dev, size, cpu_addr, dma_handle, attrs, false);
}
| linux-master | arch/arm/mm/dma-mapping.c |
// SPDX-License-Identifier: GPL-2.0
/*
* linux/arch/arm/mm/mmap.c
*/
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/shm.h>
#include <linux/sched/signal.h>
#include <linux/sched/mm.h>
#include <linux/io.h>
#include <linux/personality.h>
#include <linux/random.h>
#include <asm/cachetype.h>
#define COLOUR_ALIGN(addr,pgoff) \
((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \
(((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
/*
* We need to ensure that shared mappings are correctly aligned to
* avoid aliasing issues with VIPT caches. We need to ensure that
* a specific page of an object is always mapped at a multiple of
* SHMLBA bytes.
*
* We unconditionally provide this function for all cases, however
* in the VIVT case, we optimise out the alignment rules.
*/
unsigned long
arch_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
int do_align = 0;
int aliasing = cache_is_vipt_aliasing();
struct vm_unmapped_area_info info;
/*
* We only need to do colour alignment if either the I or D
* caches alias.
*/
if (aliasing)
do_align = filp || (flags & MAP_SHARED);
/*
* We enforce the MAP_FIXED case.
*/
if (flags & MAP_FIXED) {
if (aliasing && flags & MAP_SHARED &&
(addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
return -EINVAL;
return addr;
}
if (len > TASK_SIZE)
return -ENOMEM;
if (addr) {
if (do_align)
addr = COLOUR_ALIGN(addr, pgoff);
else
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
(!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
info.flags = 0;
info.length = len;
info.low_limit = mm->mmap_base;
info.high_limit = TASK_SIZE;
info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
info.align_offset = pgoff << PAGE_SHIFT;
return vm_unmapped_area(&info);
}
unsigned long
arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
const unsigned long len, const unsigned long pgoff,
const unsigned long flags)
{
struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
unsigned long addr = addr0;
int do_align = 0;
int aliasing = cache_is_vipt_aliasing();
struct vm_unmapped_area_info info;
/*
* We only need to do colour alignment if either the I or D
* caches alias.
*/
if (aliasing)
do_align = filp || (flags & MAP_SHARED);
/* requested length too big for entire address space */
if (len > TASK_SIZE)
return -ENOMEM;
if (flags & MAP_FIXED) {
if (aliasing && flags & MAP_SHARED &&
(addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
return -EINVAL;
return addr;
}
/* requesting a specific address */
if (addr) {
if (do_align)
addr = COLOUR_ALIGN(addr, pgoff);
else
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
(!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
info.length = len;
info.low_limit = FIRST_USER_ADDRESS;
info.high_limit = mm->mmap_base;
info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
info.align_offset = pgoff << PAGE_SHIFT;
addr = vm_unmapped_area(&info);
/*
* A failed mmap() very likely causes application failure,
* so fall back to the bottom-up function here. This scenario
* can happen with large stack limits and large mmap()
* allocations.
*/
if (addr & ~PAGE_MASK) {
VM_BUG_ON(addr != -ENOMEM);
info.flags = 0;
info.low_limit = mm->mmap_base;
info.high_limit = TASK_SIZE;
addr = vm_unmapped_area(&info);
}
return addr;
}
/*
* You really shouldn't be using read() or write() on /dev/mem. This
* might go away in the future.
*/
int valid_phys_addr_range(phys_addr_t addr, size_t size)
{
if (addr < PHYS_OFFSET)
return 0;
if (addr + size > __pa(high_memory - 1) + 1)
return 0;
return 1;
}
/*
* Do not allow /dev/mem mappings beyond the supported physical range.
*/
int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
{
return (pfn + (size >> PAGE_SHIFT)) <= (1 + (PHYS_MASK >> PAGE_SHIFT));
}
| linux-master | arch/arm/mm/mmap.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/arch/arm/mm/init.c
*
* Copyright (C) 1995-2005 Russell King
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/swap.h>
#include <linux/init.h>
#include <linux/mman.h>
#include <linux/sched/signal.h>
#include <linux/sched/task.h>
#include <linux/export.h>
#include <linux/nodemask.h>
#include <linux/initrd.h>
#include <linux/of_fdt.h>
#include <linux/highmem.h>
#include <linux/gfp.h>
#include <linux/memblock.h>
#include <linux/dma-map-ops.h>
#include <linux/sizes.h>
#include <linux/stop_machine.h>
#include <linux/swiotlb.h>
#include <asm/cp15.h>
#include <asm/mach-types.h>
#include <asm/memblock.h>
#include <asm/page.h>
#include <asm/prom.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/set_memory.h>
#include <asm/system_info.h>
#include <asm/tlb.h>
#include <asm/fixmap.h>
#include <asm/ptdump.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include "mm.h"
#ifdef CONFIG_CPU_CP15_MMU
unsigned long __init __clear_cr(unsigned long mask)
{
cr_alignment = cr_alignment & ~mask;
return cr_alignment;
}
#endif
#ifdef CONFIG_BLK_DEV_INITRD
static int __init parse_tag_initrd(const struct tag *tag)
{
pr_warn("ATAG_INITRD is deprecated; "
"please update your bootloader.\n");
phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
phys_initrd_size = tag->u.initrd.size;
return 0;
}
__tagtable(ATAG_INITRD, parse_tag_initrd);
static int __init parse_tag_initrd2(const struct tag *tag)
{
phys_initrd_start = tag->u.initrd.start;
phys_initrd_size = tag->u.initrd.size;
return 0;
}
__tagtable(ATAG_INITRD2, parse_tag_initrd2);
#endif
static void __init find_limits(unsigned long *min, unsigned long *max_low,
unsigned long *max_high)
{
*max_low = PFN_DOWN(memblock_get_current_limit());
*min = PFN_UP(memblock_start_of_DRAM());
*max_high = PFN_DOWN(memblock_end_of_DRAM());
}
#ifdef CONFIG_ZONE_DMA
phys_addr_t arm_dma_zone_size __read_mostly;
EXPORT_SYMBOL(arm_dma_zone_size);
/*
* The DMA mask corresponding to the maximum bus address allocatable
* using GFP_DMA. The default here places no restriction on DMA
* allocations. This must be the smallest DMA mask in the system,
* so a successful GFP_DMA allocation will always satisfy this.
*/
phys_addr_t arm_dma_limit;
unsigned long arm_dma_pfn_limit;
#endif
void __init setup_dma_zone(const struct machine_desc *mdesc)
{
#ifdef CONFIG_ZONE_DMA
if (mdesc->dma_zone_size) {
arm_dma_zone_size = mdesc->dma_zone_size;
arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
} else
arm_dma_limit = 0xffffffff;
arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT;
#endif
}
static void __init zone_sizes_init(unsigned long min, unsigned long max_low,
unsigned long max_high)
{
unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
#ifdef CONFIG_ZONE_DMA
max_zone_pfn[ZONE_DMA] = min(arm_dma_pfn_limit, max_low);
#endif
max_zone_pfn[ZONE_NORMAL] = max_low;
#ifdef CONFIG_HIGHMEM
max_zone_pfn[ZONE_HIGHMEM] = max_high;
#endif
free_area_init(max_zone_pfn);
}
#ifdef CONFIG_HAVE_ARCH_PFN_VALID
int pfn_valid(unsigned long pfn)
{
phys_addr_t addr = __pfn_to_phys(pfn);
unsigned long pageblock_size = PAGE_SIZE * pageblock_nr_pages;
if (__phys_to_pfn(addr) != pfn)
return 0;
/*
* If address less than pageblock_size bytes away from a present
* memory chunk there still will be a memory map entry for it
* because we round freed memory map to the pageblock boundaries.
*/
if (memblock_overlaps_region(&memblock.memory,
ALIGN_DOWN(addr, pageblock_size),
pageblock_size))
return 1;
return 0;
}
EXPORT_SYMBOL(pfn_valid);
#endif
static bool arm_memblock_steal_permitted = true;
phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align)
{
phys_addr_t phys;
BUG_ON(!arm_memblock_steal_permitted);
phys = memblock_phys_alloc(size, align);
if (!phys)
panic("Failed to steal %pa bytes at %pS\n",
&size, (void *)_RET_IP_);
memblock_phys_free(phys, size);
memblock_remove(phys, size);
return phys;
}
#ifdef CONFIG_CPU_ICACHE_MISMATCH_WORKAROUND
void check_cpu_icache_size(int cpuid)
{
u32 size, ctr;
asm("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctr));
size = 1 << ((ctr & 0xf) + 2);
if (cpuid != 0 && icache_size != size)
pr_info("CPU%u: detected I-Cache line size mismatch, workaround enabled\n",
cpuid);
if (icache_size > size)
icache_size = size;
}
#endif
void __init arm_memblock_init(const struct machine_desc *mdesc)
{
/* Register the kernel text, kernel data and initrd with memblock. */
memblock_reserve(__pa(KERNEL_START), KERNEL_END - KERNEL_START);
reserve_initrd_mem();
arm_mm_memblock_reserve();
/* reserve any platform specific memblock areas */
if (mdesc->reserve)
mdesc->reserve();
early_init_fdt_scan_reserved_mem();
/* reserve memory for DMA contiguous allocations */
dma_contiguous_reserve(arm_dma_limit);
arm_memblock_steal_permitted = false;
memblock_dump_all();
}
void __init bootmem_init(void)
{
memblock_allow_resize();
find_limits(&min_low_pfn, &max_low_pfn, &max_pfn);
early_memtest((phys_addr_t)min_low_pfn << PAGE_SHIFT,
(phys_addr_t)max_low_pfn << PAGE_SHIFT);
/*
* sparse_init() tries to allocate memory from memblock, so must be
* done after the fixed reservations
*/
sparse_init();
/*
* Now free the memory - free_area_init needs
* the sparse mem_map arrays initialized by sparse_init()
* for memmap_init_zone(), otherwise all PFNs are invalid.
*/
zone_sizes_init(min_low_pfn, max_low_pfn, max_pfn);
}
/*
* Poison init memory with an undefined instruction (ARM) or a branch to an
* undefined instruction (Thumb).
*/
static inline void poison_init_mem(void *s, size_t count)
{
u32 *p = (u32 *)s;
for (; count != 0; count -= 4)
*p++ = 0xe7fddef0;
}
static void __init free_highpages(void)
{
#ifdef CONFIG_HIGHMEM
unsigned long max_low = max_low_pfn;
phys_addr_t range_start, range_end;
u64 i;
/* set highmem page free */
for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE,
&range_start, &range_end, NULL) {
unsigned long start = PFN_UP(range_start);
unsigned long end = PFN_DOWN(range_end);
/* Ignore complete lowmem entries */
if (end <= max_low)
continue;
/* Truncate partial highmem entries */
if (start < max_low)
start = max_low;
for (; start < end; start++)
free_highmem_page(pfn_to_page(start));
}
#endif
}
/*
* mem_init() marks the free areas in the mem_map and tells us how much
* memory is free. This is done after various parts of the system have
* claimed their memory after the kernel image.
*/
void __init mem_init(void)
{
#ifdef CONFIG_ARM_LPAE
swiotlb_init(max_pfn > arm_dma_pfn_limit, SWIOTLB_VERBOSE);
#endif
set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
/* this will put all unused low memory onto the freelists */
memblock_free_all();
#ifdef CONFIG_SA1111
/* now that our DMA memory is actually so designated, we can free it */
free_reserved_area(__va(PHYS_OFFSET), swapper_pg_dir, -1, NULL);
#endif
free_highpages();
/*
* Check boundaries twice: Some fundamental inconsistencies can
* be detected at build time already.
*/
#ifdef CONFIG_MMU
BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR);
BUG_ON(TASK_SIZE > MODULES_VADDR);
#endif
#ifdef CONFIG_HIGHMEM
BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
#endif
}
#ifdef CONFIG_STRICT_KERNEL_RWX
struct section_perm {
const char *name;
unsigned long start;
unsigned long end;
pmdval_t mask;
pmdval_t prot;
pmdval_t clear;
};
/* First section-aligned location at or after __start_rodata. */
extern char __start_rodata_section_aligned[];
static struct section_perm nx_perms[] = {
/* Make pages tables, etc before _stext RW (set NX). */
{
.name = "pre-text NX",
.start = PAGE_OFFSET,
.end = (unsigned long)_stext,
.mask = ~PMD_SECT_XN,
.prot = PMD_SECT_XN,
},
/* Make init RW (set NX). */
{
.name = "init NX",
.start = (unsigned long)__init_begin,
.end = (unsigned long)_sdata,
.mask = ~PMD_SECT_XN,
.prot = PMD_SECT_XN,
},
/* Make rodata NX (set RO in ro_perms below). */
{
.name = "rodata NX",
.start = (unsigned long)__start_rodata_section_aligned,
.end = (unsigned long)__init_begin,
.mask = ~PMD_SECT_XN,
.prot = PMD_SECT_XN,
},
};
static struct section_perm ro_perms[] = {
/* Make kernel code and rodata RX (set RO). */
{
.name = "text/rodata RO",
.start = (unsigned long)_stext,
.end = (unsigned long)__init_begin,
#ifdef CONFIG_ARM_LPAE
.mask = ~(L_PMD_SECT_RDONLY | PMD_SECT_AP2),
.prot = L_PMD_SECT_RDONLY | PMD_SECT_AP2,
#else
.mask = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE),
.prot = PMD_SECT_APX | PMD_SECT_AP_WRITE,
.clear = PMD_SECT_AP_WRITE,
#endif
},
};
/*
* Updates section permissions only for the current mm (sections are
* copied into each mm). During startup, this is the init_mm. Is only
* safe to be called with preemption disabled, as under stop_machine().
*/
static inline void section_update(unsigned long addr, pmdval_t mask,
pmdval_t prot, struct mm_struct *mm)
{
pmd_t *pmd;
pmd = pmd_offset(pud_offset(p4d_offset(pgd_offset(mm, addr), addr), addr), addr);
#ifdef CONFIG_ARM_LPAE
pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
#else
if (addr & SECTION_SIZE)
pmd[1] = __pmd((pmd_val(pmd[1]) & mask) | prot);
else
pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
#endif
flush_pmd_entry(pmd);
local_flush_tlb_kernel_range(addr, addr + SECTION_SIZE);
}
/* Make sure extended page tables are in use. */
static inline bool arch_has_strict_perms(void)
{
if (cpu_architecture() < CPU_ARCH_ARMv6)
return false;
return !!(get_cr() & CR_XP);
}
static void set_section_perms(struct section_perm *perms, int n, bool set,
struct mm_struct *mm)
{
size_t i;
unsigned long addr;
if (!arch_has_strict_perms())
return;
for (i = 0; i < n; i++) {
if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) ||
!IS_ALIGNED(perms[i].end, SECTION_SIZE)) {
pr_err("BUG: %s section %lx-%lx not aligned to %lx\n",
perms[i].name, perms[i].start, perms[i].end,
SECTION_SIZE);
continue;
}
for (addr = perms[i].start;
addr < perms[i].end;
addr += SECTION_SIZE)
section_update(addr, perms[i].mask,
set ? perms[i].prot : perms[i].clear, mm);
}
}
/**
* update_sections_early intended to be called only through stop_machine
* framework and executed by only one CPU while all other CPUs will spin and
* wait, so no locking is required in this function.
*/
static void update_sections_early(struct section_perm perms[], int n)
{
struct task_struct *t, *s;
for_each_process(t) {
if (t->flags & PF_KTHREAD)
continue;
for_each_thread(t, s)
if (s->mm)
set_section_perms(perms, n, true, s->mm);
}
set_section_perms(perms, n, true, current->active_mm);
set_section_perms(perms, n, true, &init_mm);
}
static int __fix_kernmem_perms(void *unused)
{
update_sections_early(nx_perms, ARRAY_SIZE(nx_perms));
return 0;
}
static void fix_kernmem_perms(void)
{
stop_machine(__fix_kernmem_perms, NULL, NULL);
}
static int __mark_rodata_ro(void *unused)
{
update_sections_early(ro_perms, ARRAY_SIZE(ro_perms));
return 0;
}
void mark_rodata_ro(void)
{
stop_machine(__mark_rodata_ro, NULL, NULL);
debug_checkwx();
}
#else
static inline void fix_kernmem_perms(void) { }
#endif /* CONFIG_STRICT_KERNEL_RWX */
void free_initmem(void)
{
fix_kernmem_perms();
poison_init_mem(__init_begin, __init_end - __init_begin);
if (!machine_is_integrator() && !machine_is_cintegrator())
free_initmem_default(-1);
}
#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
if (start == initrd_start)
start = round_down(start, PAGE_SIZE);
if (end == initrd_end)
end = round_up(end, PAGE_SIZE);
poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
free_reserved_area((void *)start, (void *)end, -1, "initrd");
}
#endif
| linux-master | arch/arm/mm/init.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/arch/arm/mm/copypage-v6.c
*
* Copyright (C) 2002 Deep Blue Solutions Ltd, All Rights Reserved.
*/
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <asm/shmparam.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
#include <asm/cachetype.h>
#include "mm.h"
#if SHMLBA > 16384
#error FIX ME
#endif
static DEFINE_RAW_SPINLOCK(v6_lock);
/*
* Copy the user page. No aliasing to deal with so we can just
* attack the kernel's existing mapping of these pages.
*/
static void v6_copy_user_highpage_nonaliasing(struct page *to,
struct page *from, unsigned long vaddr, struct vm_area_struct *vma)
{
void *kto, *kfrom;
kfrom = kmap_atomic(from);
kto = kmap_atomic(to);
copy_page(kto, kfrom);
kunmap_atomic(kto);
kunmap_atomic(kfrom);
}
/*
* Clear the user page. No aliasing to deal with so we can just
* attack the kernel's existing mapping of this page.
*/
static void v6_clear_user_highpage_nonaliasing(struct page *page, unsigned long vaddr)
{
void *kaddr = kmap_atomic(page);
clear_page(kaddr);
kunmap_atomic(kaddr);
}
/*
* Discard data in the kernel mapping for the new page.
* FIXME: needs this MCRR to be supported.
*/
static void discard_old_kernel_data(void *kto)
{
__asm__("mcrr p15, 0, %1, %0, c6 @ 0xec401f06"
:
: "r" (kto),
"r" ((unsigned long)kto + PAGE_SIZE - 1)
: "cc");
}
/*
* Copy the page, taking account of the cache colour.
*/
static void v6_copy_user_highpage_aliasing(struct page *to,
struct page *from, unsigned long vaddr, struct vm_area_struct *vma)
{
struct folio *src = page_folio(from);
unsigned int offset = CACHE_COLOUR(vaddr);
unsigned long kfrom, kto;
if (!test_and_set_bit(PG_dcache_clean, &src->flags))
__flush_dcache_folio(folio_flush_mapping(src), src);
/* FIXME: not highmem safe */
discard_old_kernel_data(page_address(to));
/*
* Now copy the page using the same cache colour as the
* pages ultimate destination.
*/
raw_spin_lock(&v6_lock);
kfrom = COPYPAGE_V6_FROM + (offset << PAGE_SHIFT);
kto = COPYPAGE_V6_TO + (offset << PAGE_SHIFT);
set_top_pte(kfrom, mk_pte(from, PAGE_KERNEL));
set_top_pte(kto, mk_pte(to, PAGE_KERNEL));
copy_page((void *)kto, (void *)kfrom);
raw_spin_unlock(&v6_lock);
}
/*
* Clear the user page. We need to deal with the aliasing issues,
* so remap the kernel page into the same cache colour as the user
* page.
*/
static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vaddr)
{
unsigned long to = COPYPAGE_V6_TO + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
/* FIXME: not highmem safe */
discard_old_kernel_data(page_address(page));
/*
* Now clear the page using the same cache colour as
* the pages ultimate destination.
*/
raw_spin_lock(&v6_lock);
set_top_pte(to, mk_pte(page, PAGE_KERNEL));
clear_page((void *)to);
raw_spin_unlock(&v6_lock);
}
struct cpu_user_fns v6_user_fns __initdata = {
.cpu_clear_user_highpage = v6_clear_user_highpage_nonaliasing,
.cpu_copy_user_highpage = v6_copy_user_highpage_nonaliasing,
};
static int __init v6_userpage_init(void)
{
if (cache_is_vipt_aliasing()) {
cpu_user.cpu_clear_user_highpage = v6_clear_user_highpage_aliasing;
cpu_user.cpu_copy_user_highpage = v6_copy_user_highpage_aliasing;
}
return 0;
}
core_initcall(v6_userpage_init);
| linux-master | arch/arm/mm/copypage-v6.c |
// SPDX-License-Identifier: GPL-2.0
/*
* linux/arch/arm/mm/extable.c
*/
#include <linux/extable.h>
#include <linux/uaccess.h>
int fixup_exception(struct pt_regs *regs)
{
const struct exception_table_entry *fixup;
fixup = search_exception_tables(instruction_pointer(regs));
if (fixup) {
regs->ARM_pc = fixup->fixup;
#ifdef CONFIG_THUMB2_KERNEL
/* Clear the IT state to avoid nasty surprises in the fixup */
regs->ARM_cpsr &= ~PSR_IT_MASK;
#endif
}
return fixup != NULL;
}
| linux-master | arch/arm/mm/extable.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/bug.h>
#include <linux/export.h>
#include <linux/types.h>
#include <linux/mmdebug.h>
#include <linux/mm.h>
#include <asm/sections.h>
#include <asm/page.h>
#include <asm/fixmap.h>
#include <asm/dma.h>
#include "mm.h"
static inline bool __virt_addr_valid(unsigned long x)
{
/*
* high_memory does not get immediately defined, and there
* are early callers of __pa() against PAGE_OFFSET
*/
if (!high_memory && x >= PAGE_OFFSET)
return true;
if (high_memory && x >= PAGE_OFFSET && x < (unsigned long)high_memory)
return true;
/*
* MAX_DMA_ADDRESS is a virtual address that may not correspond to an
* actual physical address. Enough code relies on __pa(MAX_DMA_ADDRESS)
* that we just need to work around it and always return true.
*/
if (x == MAX_DMA_ADDRESS)
return true;
return false;
}
phys_addr_t __virt_to_phys(unsigned long x)
{
WARN(!__virt_addr_valid(x),
"virt_to_phys used for non-linear address: %pK (%pS)\n",
(void *)x, (void *)x);
return __virt_to_phys_nodebug(x);
}
EXPORT_SYMBOL(__virt_to_phys);
phys_addr_t __phys_addr_symbol(unsigned long x)
{
/* This is bounds checking against the kernel image only.
* __pa_symbol should only be used on kernel symbol addresses.
*/
VIRTUAL_BUG_ON(x < (unsigned long)KERNEL_START ||
x > (unsigned long)KERNEL_END);
return __pa_symbol_nodebug(x);
}
EXPORT_SYMBOL(__phys_addr_symbol);
| linux-master | arch/arm/mm/physaddr.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Based on linux/arch/arm/mm/dma-mapping.c
*
* Copyright (C) 2000-2004 Russell King
*/
#include <linux/dma-map-ops.h>
#include <asm/cachetype.h>
#include <asm/cacheflush.h>
#include <asm/outercache.h>
#include <asm/cp15.h>
#include "dma.h"
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
dmac_map_area(__va(paddr), size, dir);
if (dir == DMA_FROM_DEVICE)
outer_inv_range(paddr, paddr + size);
else
outer_clean_range(paddr, paddr + size);
}
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
if (dir != DMA_TO_DEVICE) {
outer_inv_range(paddr, paddr + size);
dmac_unmap_area(__va(paddr), size, dir);
}
}
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
const struct iommu_ops *iommu, bool coherent)
{
if (IS_ENABLED(CONFIG_CPU_V7M)) {
/*
* Cache support for v7m is optional, so can be treated as
* coherent if no cache has been detected. Note that it is not
* enough to check if MPU is in use or not since in absense of
* MPU system memory map is used.
*/
dev->dma_coherent = cacheid ? coherent : true;
} else {
/*
* Assume coherent DMA in case MMU/MPU has not been set up.
*/
dev->dma_coherent = (get_cr() & CR_M) ? coherent : true;
}
}
| linux-master | arch/arm/mm/dma-mapping-nommu.c |
// SPDX-License-Identifier: GPL-2.0
static struct fsr_info fsr_info[] = {
{ do_bad, SIGBUS, 0, "unknown 0" },
{ do_bad, SIGBUS, 0, "unknown 1" },
{ do_bad, SIGBUS, 0, "unknown 2" },
{ do_bad, SIGBUS, 0, "unknown 3" },
{ do_bad, SIGBUS, 0, "reserved translation fault" },
{ do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 1 translation fault" },
{ do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" },
{ do_page_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" },
{ do_bad, SIGBUS, 0, "reserved access flag fault" },
{ do_bad, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 access flag fault" },
{ do_bad, SIGBUS, 0, "reserved permission fault" },
{ do_bad, SIGSEGV, SEGV_ACCERR, "level 1 permission fault" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 permission fault" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 permission fault" },
{ do_bad, SIGBUS, 0, "synchronous external abort" },
{ do_bad, SIGBUS, 0, "asynchronous external abort" },
{ do_bad, SIGBUS, 0, "unknown 18" },
{ do_bad, SIGBUS, 0, "unknown 19" },
{ do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
{ do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
{ do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
{ do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
{ do_bad, SIGBUS, 0, "synchronous parity error" },
{ do_bad, SIGBUS, 0, "asynchronous parity error" },
{ do_bad, SIGBUS, 0, "unknown 26" },
{ do_bad, SIGBUS, 0, "unknown 27" },
{ do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" },
{ do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" },
{ do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" },
{ do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" },
{ do_bad, SIGBUS, 0, "unknown 32" },
{ do_bad, SIGBUS, BUS_ADRALN, "alignment fault" },
{ do_bad, SIGBUS, 0, "debug event" },
{ do_bad, SIGBUS, 0, "unknown 35" },
{ do_bad, SIGBUS, 0, "unknown 36" },
{ do_bad, SIGBUS, 0, "unknown 37" },
{ do_bad, SIGBUS, 0, "unknown 38" },
{ do_bad, SIGBUS, 0, "unknown 39" },
{ do_bad, SIGBUS, 0, "unknown 40" },
{ do_bad, SIGBUS, 0, "unknown 41" },
{ do_bad, SIGBUS, 0, "unknown 42" },
{ do_bad, SIGBUS, 0, "unknown 43" },
{ do_bad, SIGBUS, 0, "unknown 44" },
{ do_bad, SIGBUS, 0, "unknown 45" },
{ do_bad, SIGBUS, 0, "unknown 46" },
{ do_bad, SIGBUS, 0, "unknown 47" },
{ do_bad, SIGBUS, 0, "unknown 48" },
{ do_bad, SIGBUS, 0, "unknown 49" },
{ do_bad, SIGBUS, 0, "unknown 50" },
{ do_bad, SIGBUS, 0, "unknown 51" },
{ do_bad, SIGBUS, 0, "implementation fault (lockdown abort)" },
{ do_bad, SIGBUS, 0, "unknown 53" },
{ do_bad, SIGBUS, 0, "unknown 54" },
{ do_bad, SIGBUS, 0, "unknown 55" },
{ do_bad, SIGBUS, 0, "unknown 56" },
{ do_bad, SIGBUS, 0, "unknown 57" },
{ do_bad, SIGBUS, 0, "implementation fault (coprocessor abort)" },
{ do_bad, SIGBUS, 0, "unknown 59" },
{ do_bad, SIGBUS, 0, "unknown 60" },
{ do_bad, SIGBUS, 0, "unknown 61" },
{ do_bad, SIGBUS, 0, "unknown 62" },
{ do_bad, SIGBUS, 0, "unknown 63" },
};
#define ifsr_info fsr_info
| linux-master | arch/arm/mm/fsr-3level.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/arch/arm/mm/copypage-feroceon.S
*
* Copyright (C) 2008 Marvell Semiconductors
*
* This handles copy_user_highpage and clear_user_page on Feroceon
* more optimally than the generic implementations.
*/
#include <linux/init.h>
#include <linux/highmem.h>
static void feroceon_copy_user_page(void *kto, const void *kfrom)
{
int tmp;
asm volatile ("\
.arch armv5te \n\
1: ldmia %1!, {r2 - r7, ip, lr} \n\
pld [%1, #0] \n\
pld [%1, #32] \n\
pld [%1, #64] \n\
pld [%1, #96] \n\
pld [%1, #128] \n\
pld [%1, #160] \n\
pld [%1, #192] \n\
stmia %0, {r2 - r7, ip, lr} \n\
ldmia %1!, {r2 - r7, ip, lr} \n\
mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\
add %0, %0, #32 \n\
stmia %0, {r2 - r7, ip, lr} \n\
ldmia %1!, {r2 - r7, ip, lr} \n\
mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\
add %0, %0, #32 \n\
stmia %0, {r2 - r7, ip, lr} \n\
ldmia %1!, {r2 - r7, ip, lr} \n\
mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\
add %0, %0, #32 \n\
stmia %0, {r2 - r7, ip, lr} \n\
ldmia %1!, {r2 - r7, ip, lr} \n\
mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\
add %0, %0, #32 \n\
stmia %0, {r2 - r7, ip, lr} \n\
ldmia %1!, {r2 - r7, ip, lr} \n\
mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\
add %0, %0, #32 \n\
stmia %0, {r2 - r7, ip, lr} \n\
ldmia %1!, {r2 - r7, ip, lr} \n\
mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\
add %0, %0, #32 \n\
stmia %0, {r2 - r7, ip, lr} \n\
ldmia %1!, {r2 - r7, ip, lr} \n\
mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\
add %0, %0, #32 \n\
stmia %0, {r2 - r7, ip, lr} \n\
subs %2, %2, #(32 * 8) \n\
mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\
add %0, %0, #32 \n\
bne 1b \n\
mcr p15, 0, %2, c7, c10, 4 @ drain WB"
: "+&r" (kto), "+&r" (kfrom), "=&r" (tmp)
: "2" (PAGE_SIZE)
: "r2", "r3", "r4", "r5", "r6", "r7", "ip", "lr");
}
void feroceon_copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr, struct vm_area_struct *vma)
{
void *kto, *kfrom;
kto = kmap_atomic(to);
kfrom = kmap_atomic(from);
flush_cache_page(vma, vaddr, page_to_pfn(from));
feroceon_copy_user_page(kto, kfrom);
kunmap_atomic(kfrom);
kunmap_atomic(kto);
}
void feroceon_clear_user_highpage(struct page *page, unsigned long vaddr)
{
void *ptr, *kaddr = kmap_atomic(page);
asm volatile ("\
mov r1, %2 \n\
mov r2, #0 \n\
mov r3, #0 \n\
mov r4, #0 \n\
mov r5, #0 \n\
mov r6, #0 \n\
mov r7, #0 \n\
mov ip, #0 \n\
mov lr, #0 \n\
1: stmia %0, {r2-r7, ip, lr} \n\
subs r1, r1, #1 \n\
mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\
add %0, %0, #32 \n\
bne 1b \n\
mcr p15, 0, r1, c7, c10, 4 @ drain WB"
: "=r" (ptr)
: "0" (kaddr), "I" (PAGE_SIZE / 32)
: "r1", "r2", "r3", "r4", "r5", "r6", "r7", "ip", "lr");
kunmap_atomic(kaddr);
}
struct cpu_user_fns feroceon_user_fns __initdata = {
.cpu_clear_user_highpage = feroceon_clear_user_highpage,
.cpu_copy_user_highpage = feroceon_copy_user_highpage,
};
| linux-master | arch/arm/mm/copypage-feroceon.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/arch/arm/lib/copypage-xscale.S
*
* Copyright (C) 1995-2005 Russell King
*
* This handles the mini data cache, as found on SA11x0 and XScale
* processors. When we copy a user page page, we map it in such a way
* that accesses to this page will not touch the main data cache, but
* will be cached in the mini data cache. This prevents us thrashing
* the main data cache on page faults.
*/
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
#include "mm.h"
#define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \
L_PTE_MT_MINICACHE)
static DEFINE_RAW_SPINLOCK(minicache_lock);
/*
* XScale mini-dcache optimised copy_user_highpage
*
* We flush the destination cache lines just before we write the data into the
* corresponding address. Since the Dcache is read-allocate, this removes the
* Dcache aliasing issue. The writes will be forwarded to the write buffer,
* and merged as appropriate.
*/
static void mc_copy_user_page(void *from, void *to)
{
int tmp;
/*
* Strangely enough, best performance is achieved
* when prefetching destination as well. (NP)
*/
asm volatile ("\
.arch xscale \n\
pld [%0, #0] \n\
pld [%0, #32] \n\
pld [%1, #0] \n\
pld [%1, #32] \n\
1: pld [%0, #64] \n\
pld [%0, #96] \n\
pld [%1, #64] \n\
pld [%1, #96] \n\
2: ldrd r2, r3, [%0], #8 \n\
ldrd r4, r5, [%0], #8 \n\
mov ip, %1 \n\
strd r2, r3, [%1], #8 \n\
ldrd r2, r3, [%0], #8 \n\
strd r4, r5, [%1], #8 \n\
ldrd r4, r5, [%0], #8 \n\
strd r2, r3, [%1], #8 \n\
strd r4, r5, [%1], #8 \n\
mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\
ldrd r2, r3, [%0], #8 \n\
mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\
ldrd r4, r5, [%0], #8 \n\
mov ip, %1 \n\
strd r2, r3, [%1], #8 \n\
ldrd r2, r3, [%0], #8 \n\
strd r4, r5, [%1], #8 \n\
ldrd r4, r5, [%0], #8 \n\
strd r2, r3, [%1], #8 \n\
strd r4, r5, [%1], #8 \n\
mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\
subs %2, %2, #1 \n\
mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\
bgt 1b \n\
beq 2b "
: "+&r" (from), "+&r" (to), "=&r" (tmp)
: "2" (PAGE_SIZE / 64 - 1)
: "r2", "r3", "r4", "r5", "ip");
}
void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr, struct vm_area_struct *vma)
{
struct folio *src = page_folio(from);
void *kto = kmap_atomic(to);
if (!test_and_set_bit(PG_dcache_clean, &src->flags))
__flush_dcache_folio(folio_flush_mapping(src), src);
raw_spin_lock(&minicache_lock);
set_top_pte(COPYPAGE_MINICACHE, mk_pte(from, minicache_pgprot));
mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto);
raw_spin_unlock(&minicache_lock);
kunmap_atomic(kto);
}
/*
* XScale optimised clear_user_page
*/
void
xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
{
void *ptr, *kaddr = kmap_atomic(page);
asm volatile("\
.arch xscale \n\
mov r1, %2 \n\
mov r2, #0 \n\
mov r3, #0 \n\
1: mov ip, %0 \n\
strd r2, r3, [%0], #8 \n\
strd r2, r3, [%0], #8 \n\
strd r2, r3, [%0], #8 \n\
strd r2, r3, [%0], #8 \n\
mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\
subs r1, r1, #1 \n\
mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\
bne 1b"
: "=r" (ptr)
: "0" (kaddr), "I" (PAGE_SIZE / 32)
: "r1", "r2", "r3", "ip");
kunmap_atomic(kaddr);
}
struct cpu_user_fns xscale_mc_user_fns __initdata = {
.cpu_clear_user_highpage = xscale_mc_clear_user_highpage,
.cpu_copy_user_highpage = xscale_mc_copy_user_highpage,
};
| linux-master | arch/arm/mm/copypage-xscale.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Broadcom Brahma-B15 CPU read-ahead cache management functions
*
* Copyright (C) 2015-2016 Broadcom
*/
#include <linux/err.h>
#include <linux/spinlock.h>
#include <linux/io.h>
#include <linux/bitops.h>
#include <linux/of_address.h>
#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/syscore_ops.h>
#include <linux/reboot.h>
#include <asm/cacheflush.h>
#include <asm/hardware/cache-b15-rac.h>
extern void v7_flush_kern_cache_all(void);
/* RAC register offsets, relative to the HIF_CPU_BIUCTRL register base */
#define RAC_CONFIG0_REG (0x78)
#define RACENPREF_MASK (0x3)
#define RACPREFINST_SHIFT (0)
#define RACENINST_SHIFT (2)
#define RACPREFDATA_SHIFT (4)
#define RACENDATA_SHIFT (6)
#define RAC_CPU_SHIFT (8)
#define RACCFG_MASK (0xff)
#define RAC_CONFIG1_REG (0x7c)
/* Brahma-B15 is a quad-core only design */
#define B15_RAC_FLUSH_REG (0x80)
/* Brahma-B53 is an octo-core design */
#define B53_RAC_FLUSH_REG (0x84)
#define FLUSH_RAC (1 << 0)
/* Bitmask to enable instruction and data prefetching with a 256-bytes stride */
#define RAC_DATA_INST_EN_MASK (1 << RACPREFINST_SHIFT | \
RACENPREF_MASK << RACENINST_SHIFT | \
1 << RACPREFDATA_SHIFT | \
RACENPREF_MASK << RACENDATA_SHIFT)
#define RAC_ENABLED 0
/* Special state where we want to bypass the spinlock and call directly
* into the v7 cache maintenance operations during suspend/resume
*/
#define RAC_SUSPENDED 1
static void __iomem *b15_rac_base;
static DEFINE_SPINLOCK(rac_lock);
static u32 rac_config0_reg;
static u32 rac_flush_offset;
/* Initialization flag to avoid checking for b15_rac_base, and to prevent
* multi-platform kernels from crashing here as well.
*/
static unsigned long b15_rac_flags;
static inline u32 __b15_rac_disable(void)
{
u32 val = __raw_readl(b15_rac_base + RAC_CONFIG0_REG);
__raw_writel(0, b15_rac_base + RAC_CONFIG0_REG);
dmb();
return val;
}
static inline void __b15_rac_flush(void)
{
u32 reg;
__raw_writel(FLUSH_RAC, b15_rac_base + rac_flush_offset);
do {
/* This dmb() is required to force the Bus Interface Unit
* to clean outstanding writes, and forces an idle cycle
* to be inserted.
*/
dmb();
reg = __raw_readl(b15_rac_base + rac_flush_offset);
} while (reg & FLUSH_RAC);
}
static inline u32 b15_rac_disable_and_flush(void)
{
u32 reg;
reg = __b15_rac_disable();
__b15_rac_flush();
return reg;
}
static inline void __b15_rac_enable(u32 val)
{
__raw_writel(val, b15_rac_base + RAC_CONFIG0_REG);
/* dsb() is required here to be consistent with __flush_icache_all() */
dsb();
}
#define BUILD_RAC_CACHE_OP(name, bar) \
void b15_flush_##name(void) \
{ \
unsigned int do_flush; \
u32 val = 0; \
\
if (test_bit(RAC_SUSPENDED, &b15_rac_flags)) { \
v7_flush_##name(); \
bar; \
return; \
} \
\
spin_lock(&rac_lock); \
do_flush = test_bit(RAC_ENABLED, &b15_rac_flags); \
if (do_flush) \
val = b15_rac_disable_and_flush(); \
v7_flush_##name(); \
if (!do_flush) \
bar; \
else \
__b15_rac_enable(val); \
spin_unlock(&rac_lock); \
}
#define nobarrier
/* The readahead cache present in the Brahma-B15 CPU is a special piece of
* hardware after the integrated L2 cache of the B15 CPU complex whose purpose
* is to prefetch instruction and/or data with a line size of either 64 bytes
* or 256 bytes. The rationale is that the data-bus of the CPU interface is
* optimized for 256-bytes transactions, and enabling the readahead cache
* provides a significant performance boost we want it enabled (typically
* twice the performance for a memcpy benchmark application).
*
* The readahead cache is transparent for Modified Virtual Addresses
* cache maintenance operations: ICIMVAU, DCIMVAC, DCCMVAC, DCCMVAU and
* DCCIMVAC.
*
* It is however not transparent for the following cache maintenance
* operations: DCISW, DCCSW, DCCISW, ICIALLUIS and ICIALLU which is precisely
* what we are patching here with our BUILD_RAC_CACHE_OP here.
*/
BUILD_RAC_CACHE_OP(kern_cache_all, nobarrier);
static void b15_rac_enable(void)
{
unsigned int cpu;
u32 enable = 0;
for_each_possible_cpu(cpu)
enable |= (RAC_DATA_INST_EN_MASK << (cpu * RAC_CPU_SHIFT));
b15_rac_disable_and_flush();
__b15_rac_enable(enable);
}
static int b15_rac_reboot_notifier(struct notifier_block *nb,
unsigned long action,
void *data)
{
/* During kexec, we are not yet migrated on the boot CPU, so we need to
* make sure we are SMP safe here. Once the RAC is disabled, flag it as
* suspended such that the hotplug notifier returns early.
*/
if (action == SYS_RESTART) {
spin_lock(&rac_lock);
b15_rac_disable_and_flush();
clear_bit(RAC_ENABLED, &b15_rac_flags);
set_bit(RAC_SUSPENDED, &b15_rac_flags);
spin_unlock(&rac_lock);
}
return NOTIFY_DONE;
}
static struct notifier_block b15_rac_reboot_nb = {
.notifier_call = b15_rac_reboot_notifier,
};
/* The CPU hotplug case is the most interesting one, we basically need to make
* sure that the RAC is disabled for the entire system prior to having a CPU
* die, in particular prior to this dying CPU having exited the coherency
* domain.
*
* Once this CPU is marked dead, we can safely re-enable the RAC for the
* remaining CPUs in the system which are still online.
*
* Offlining a CPU is the problematic case, onlining a CPU is not much of an
* issue since the CPU and its cache-level hierarchy will start filling with
* the RAC disabled, so L1 and L2 only.
*
* In this function, we should NOT have to verify any unsafe setting/condition
* b15_rac_base:
*
* It is protected by the RAC_ENABLED flag which is cleared by default, and
* being cleared when initial procedure is done. b15_rac_base had been set at
* that time.
*
* RAC_ENABLED:
* There is a small timing windows, in b15_rac_init(), between
* cpuhp_setup_state_*()
* ...
* set RAC_ENABLED
* However, there is no hotplug activity based on the Linux booting procedure.
*
* Since we have to disable RAC for all cores, we keep RAC on as long as as
* possible (disable it as late as possible) to gain the cache benefit.
*
* Thus, dying/dead states are chosen here
*
* We are choosing not do disable the RAC on a per-CPU basis, here, if we did
* we would want to consider disabling it as early as possible to benefit the
* other active CPUs.
*/
/* Running on the dying CPU */
static int b15_rac_dying_cpu(unsigned int cpu)
{
/* During kexec/reboot, the RAC is disabled via the reboot notifier
* return early here.
*/
if (test_bit(RAC_SUSPENDED, &b15_rac_flags))
return 0;
spin_lock(&rac_lock);
/* Indicate that we are starting a hotplug procedure */
__clear_bit(RAC_ENABLED, &b15_rac_flags);
/* Disable the readahead cache and save its value to a global */
rac_config0_reg = b15_rac_disable_and_flush();
spin_unlock(&rac_lock);
return 0;
}
/* Running on a non-dying CPU */
static int b15_rac_dead_cpu(unsigned int cpu)
{
/* During kexec/reboot, the RAC is disabled via the reboot notifier
* return early here.
*/
if (test_bit(RAC_SUSPENDED, &b15_rac_flags))
return 0;
spin_lock(&rac_lock);
/* And enable it */
__b15_rac_enable(rac_config0_reg);
__set_bit(RAC_ENABLED, &b15_rac_flags);
spin_unlock(&rac_lock);
return 0;
}
static int b15_rac_suspend(void)
{
/* Suspend the read-ahead cache oeprations, forcing our cache
* implementation to fallback to the regular ARMv7 calls.
*
* We are guaranteed to be running on the boot CPU at this point and
* with every other CPU quiesced, so setting RAC_SUSPENDED is not racy
* here.
*/
rac_config0_reg = b15_rac_disable_and_flush();
set_bit(RAC_SUSPENDED, &b15_rac_flags);
return 0;
}
static void b15_rac_resume(void)
{
/* Coming out of a S3 suspend/resume cycle, the read-ahead cache
* register RAC_CONFIG0_REG will be restored to its default value, make
* sure we re-enable it and set the enable flag, we are also guaranteed
* to run on the boot CPU, so not racy again.
*/
__b15_rac_enable(rac_config0_reg);
clear_bit(RAC_SUSPENDED, &b15_rac_flags);
}
static struct syscore_ops b15_rac_syscore_ops = {
.suspend = b15_rac_suspend,
.resume = b15_rac_resume,
};
static int __init b15_rac_init(void)
{
struct device_node *dn, *cpu_dn;
int ret = 0, cpu;
u32 reg, en_mask = 0;
dn = of_find_compatible_node(NULL, NULL, "brcm,brcmstb-cpu-biu-ctrl");
if (!dn)
return -ENODEV;
if (WARN(num_possible_cpus() > 4, "RAC only supports 4 CPUs\n"))
goto out;
b15_rac_base = of_iomap(dn, 0);
if (!b15_rac_base) {
pr_err("failed to remap BIU control base\n");
ret = -ENOMEM;
goto out;
}
cpu_dn = of_get_cpu_node(0, NULL);
if (!cpu_dn) {
ret = -ENODEV;
goto out;
}
if (of_device_is_compatible(cpu_dn, "brcm,brahma-b15"))
rac_flush_offset = B15_RAC_FLUSH_REG;
else if (of_device_is_compatible(cpu_dn, "brcm,brahma-b53"))
rac_flush_offset = B53_RAC_FLUSH_REG;
else {
pr_err("Unsupported CPU\n");
of_node_put(cpu_dn);
ret = -EINVAL;
goto out;
}
of_node_put(cpu_dn);
ret = register_reboot_notifier(&b15_rac_reboot_nb);
if (ret) {
pr_err("failed to register reboot notifier\n");
iounmap(b15_rac_base);
goto out;
}
if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) {
ret = cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CACHE_B15_RAC_DEAD,
"arm/cache-b15-rac:dead",
NULL, b15_rac_dead_cpu);
if (ret)
goto out_unmap;
ret = cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CACHE_B15_RAC_DYING,
"arm/cache-b15-rac:dying",
NULL, b15_rac_dying_cpu);
if (ret)
goto out_cpu_dead;
}
if (IS_ENABLED(CONFIG_PM_SLEEP))
register_syscore_ops(&b15_rac_syscore_ops);
spin_lock(&rac_lock);
reg = __raw_readl(b15_rac_base + RAC_CONFIG0_REG);
for_each_possible_cpu(cpu)
en_mask |= ((1 << RACPREFDATA_SHIFT) << (cpu * RAC_CPU_SHIFT));
WARN(reg & en_mask, "Read-ahead cache not previously disabled\n");
b15_rac_enable();
set_bit(RAC_ENABLED, &b15_rac_flags);
spin_unlock(&rac_lock);
pr_info("%pOF: Broadcom Brahma-B15 readahead cache\n", dn);
goto out;
out_cpu_dead:
cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CACHE_B15_RAC_DYING);
out_unmap:
unregister_reboot_notifier(&b15_rac_reboot_nb);
iounmap(b15_rac_base);
out:
of_node_put(dn);
return ret;
}
arch_initcall(b15_rac_init);
| linux-master | arch/arm/mm/cache-b15-rac.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* L220/L310 cache controller support
*
* Copyright (C) 2016 ARM Limited
*/
#include <linux/errno.h>
#include <linux/hrtimer.h>
#include <linux/io.h>
#include <linux/list.h>
#include <linux/perf_event.h>
#include <linux/printk.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <asm/hardware/cache-l2x0.h>
#define PMU_NR_COUNTERS 2
static void __iomem *l2x0_base;
static struct pmu *l2x0_pmu;
static cpumask_t pmu_cpu;
static const char *l2x0_name;
static ktime_t l2x0_pmu_poll_period;
static struct hrtimer l2x0_pmu_hrtimer;
/*
* The L220/PL310 PMU has two equivalent counters, Counter1 and Counter0.
* Registers controlling these are laid out in pairs, in descending order, i.e.
* the register for Counter1 comes first, followed by the register for
* Counter0.
* We ensure that idx 0 -> Counter0, and idx1 -> Counter1.
*/
static struct perf_event *events[PMU_NR_COUNTERS];
/* Find an unused counter */
static int l2x0_pmu_find_idx(void)
{
int i;
for (i = 0; i < PMU_NR_COUNTERS; i++) {
if (!events[i])
return i;
}
return -1;
}
/* How many counters are allocated? */
static int l2x0_pmu_num_active_counters(void)
{
int i, cnt = 0;
for (i = 0; i < PMU_NR_COUNTERS; i++) {
if (events[i])
cnt++;
}
return cnt;
}
static void l2x0_pmu_counter_config_write(int idx, u32 val)
{
writel_relaxed(val, l2x0_base + L2X0_EVENT_CNT0_CFG - 4 * idx);
}
static u32 l2x0_pmu_counter_read(int idx)
{
return readl_relaxed(l2x0_base + L2X0_EVENT_CNT0_VAL - 4 * idx);
}
static void l2x0_pmu_counter_write(int idx, u32 val)
{
writel_relaxed(val, l2x0_base + L2X0_EVENT_CNT0_VAL - 4 * idx);
}
static void __l2x0_pmu_enable(void)
{
u32 val = readl_relaxed(l2x0_base + L2X0_EVENT_CNT_CTRL);
val |= L2X0_EVENT_CNT_CTRL_ENABLE;
writel_relaxed(val, l2x0_base + L2X0_EVENT_CNT_CTRL);
}
static void __l2x0_pmu_disable(void)
{
u32 val = readl_relaxed(l2x0_base + L2X0_EVENT_CNT_CTRL);
val &= ~L2X0_EVENT_CNT_CTRL_ENABLE;
writel_relaxed(val, l2x0_base + L2X0_EVENT_CNT_CTRL);
}
static void l2x0_pmu_enable(struct pmu *pmu)
{
if (l2x0_pmu_num_active_counters() == 0)
return;
__l2x0_pmu_enable();
}
static void l2x0_pmu_disable(struct pmu *pmu)
{
if (l2x0_pmu_num_active_counters() == 0)
return;
__l2x0_pmu_disable();
}
static void warn_if_saturated(u32 count)
{
if (count != 0xffffffff)
return;
pr_warn_ratelimited("L2X0 counter saturated. Poll period too long\n");
}
static void l2x0_pmu_event_read(struct perf_event *event)
{
struct hw_perf_event *hw = &event->hw;
u64 prev_count, new_count, mask;
do {
prev_count = local64_read(&hw->prev_count);
new_count = l2x0_pmu_counter_read(hw->idx);
} while (local64_xchg(&hw->prev_count, new_count) != prev_count);
mask = GENMASK_ULL(31, 0);
local64_add((new_count - prev_count) & mask, &event->count);
warn_if_saturated(new_count);
}
static void l2x0_pmu_event_configure(struct perf_event *event)
{
struct hw_perf_event *hw = &event->hw;
/*
* The L2X0 counters saturate at 0xffffffff rather than wrapping, so we
* will *always* lose some number of events when a counter saturates,
* and have no way of detecting how many were lost.
*
* To minimize the impact of this, we try to maximize the period by
* always starting counters at zero. To ensure that group ratios are
* representative, we poll periodically to avoid counters saturating.
* See l2x0_pmu_poll().
*/
local64_set(&hw->prev_count, 0);
l2x0_pmu_counter_write(hw->idx, 0);
}
static enum hrtimer_restart l2x0_pmu_poll(struct hrtimer *hrtimer)
{
unsigned long flags;
int i;
local_irq_save(flags);
__l2x0_pmu_disable();
for (i = 0; i < PMU_NR_COUNTERS; i++) {
struct perf_event *event = events[i];
if (!event)
continue;
l2x0_pmu_event_read(event);
l2x0_pmu_event_configure(event);
}
__l2x0_pmu_enable();
local_irq_restore(flags);
hrtimer_forward_now(hrtimer, l2x0_pmu_poll_period);
return HRTIMER_RESTART;
}
static void __l2x0_pmu_event_enable(int idx, u32 event)
{
u32 val;
val = event << L2X0_EVENT_CNT_CFG_SRC_SHIFT;
val |= L2X0_EVENT_CNT_CFG_INT_DISABLED;
l2x0_pmu_counter_config_write(idx, val);
}
static void l2x0_pmu_event_start(struct perf_event *event, int flags)
{
struct hw_perf_event *hw = &event->hw;
if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
return;
if (flags & PERF_EF_RELOAD) {
WARN_ON_ONCE(!(hw->state & PERF_HES_UPTODATE));
l2x0_pmu_event_configure(event);
}
hw->state = 0;
__l2x0_pmu_event_enable(hw->idx, hw->config_base);
}
static void __l2x0_pmu_event_disable(int idx)
{
u32 val;
val = L2X0_EVENT_CNT_CFG_SRC_DISABLED << L2X0_EVENT_CNT_CFG_SRC_SHIFT;
val |= L2X0_EVENT_CNT_CFG_INT_DISABLED;
l2x0_pmu_counter_config_write(idx, val);
}
static void l2x0_pmu_event_stop(struct perf_event *event, int flags)
{
struct hw_perf_event *hw = &event->hw;
if (WARN_ON_ONCE(event->hw.state & PERF_HES_STOPPED))
return;
__l2x0_pmu_event_disable(hw->idx);
hw->state |= PERF_HES_STOPPED;
if (flags & PERF_EF_UPDATE) {
l2x0_pmu_event_read(event);
hw->state |= PERF_HES_UPTODATE;
}
}
static int l2x0_pmu_event_add(struct perf_event *event, int flags)
{
struct hw_perf_event *hw = &event->hw;
int idx = l2x0_pmu_find_idx();
if (idx == -1)
return -EAGAIN;
/*
* Pin the timer, so that the overflows are handled by the chosen
* event->cpu (this is the same one as presented in "cpumask"
* attribute).
*/
if (l2x0_pmu_num_active_counters() == 0)
hrtimer_start(&l2x0_pmu_hrtimer, l2x0_pmu_poll_period,
HRTIMER_MODE_REL_PINNED);
events[idx] = event;
hw->idx = idx;
l2x0_pmu_event_configure(event);
hw->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
if (flags & PERF_EF_START)
l2x0_pmu_event_start(event, 0);
return 0;
}
static void l2x0_pmu_event_del(struct perf_event *event, int flags)
{
struct hw_perf_event *hw = &event->hw;
l2x0_pmu_event_stop(event, PERF_EF_UPDATE);
events[hw->idx] = NULL;
hw->idx = -1;
if (l2x0_pmu_num_active_counters() == 0)
hrtimer_cancel(&l2x0_pmu_hrtimer);
}
static bool l2x0_pmu_group_is_valid(struct perf_event *event)
{
struct pmu *pmu = event->pmu;
struct perf_event *leader = event->group_leader;
struct perf_event *sibling;
int num_hw = 0;
if (leader->pmu == pmu)
num_hw++;
else if (!is_software_event(leader))
return false;
for_each_sibling_event(sibling, leader) {
if (sibling->pmu == pmu)
num_hw++;
else if (!is_software_event(sibling))
return false;
}
return num_hw <= PMU_NR_COUNTERS;
}
static int l2x0_pmu_event_init(struct perf_event *event)
{
struct hw_perf_event *hw = &event->hw;
if (event->attr.type != l2x0_pmu->type)
return -ENOENT;
if (is_sampling_event(event) ||
event->attach_state & PERF_ATTACH_TASK)
return -EINVAL;
if (event->cpu < 0)
return -EINVAL;
if (event->attr.config & ~L2X0_EVENT_CNT_CFG_SRC_MASK)
return -EINVAL;
hw->config_base = event->attr.config;
if (!l2x0_pmu_group_is_valid(event))
return -EINVAL;
event->cpu = cpumask_first(&pmu_cpu);
return 0;
}
struct l2x0_event_attribute {
struct device_attribute attr;
unsigned int config;
bool pl310_only;
};
#define L2X0_EVENT_ATTR(_name, _config, _pl310_only) \
(&((struct l2x0_event_attribute[]) {{ \
.attr = __ATTR(_name, S_IRUGO, l2x0_pmu_event_show, NULL), \
.config = _config, \
.pl310_only = _pl310_only, \
}})[0].attr.attr)
#define L220_PLUS_EVENT_ATTR(_name, _config) \
L2X0_EVENT_ATTR(_name, _config, false)
#define PL310_EVENT_ATTR(_name, _config) \
L2X0_EVENT_ATTR(_name, _config, true)
static ssize_t l2x0_pmu_event_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct l2x0_event_attribute *lattr;
lattr = container_of(attr, typeof(*lattr), attr);
return snprintf(buf, PAGE_SIZE, "config=0x%x\n", lattr->config);
}
static umode_t l2x0_pmu_event_attr_is_visible(struct kobject *kobj,
struct attribute *attr,
int unused)
{
struct device *dev = kobj_to_dev(kobj);
struct pmu *pmu = dev_get_drvdata(dev);
struct l2x0_event_attribute *lattr;
lattr = container_of(attr, typeof(*lattr), attr.attr);
if (!lattr->pl310_only || strcmp("l2c_310", pmu->name) == 0)
return attr->mode;
return 0;
}
static struct attribute *l2x0_pmu_event_attrs[] = {
L220_PLUS_EVENT_ATTR(co, 0x1),
L220_PLUS_EVENT_ATTR(drhit, 0x2),
L220_PLUS_EVENT_ATTR(drreq, 0x3),
L220_PLUS_EVENT_ATTR(dwhit, 0x4),
L220_PLUS_EVENT_ATTR(dwreq, 0x5),
L220_PLUS_EVENT_ATTR(dwtreq, 0x6),
L220_PLUS_EVENT_ATTR(irhit, 0x7),
L220_PLUS_EVENT_ATTR(irreq, 0x8),
L220_PLUS_EVENT_ATTR(wa, 0x9),
PL310_EVENT_ATTR(ipfalloc, 0xa),
PL310_EVENT_ATTR(epfhit, 0xb),
PL310_EVENT_ATTR(epfalloc, 0xc),
PL310_EVENT_ATTR(srrcvd, 0xd),
PL310_EVENT_ATTR(srconf, 0xe),
PL310_EVENT_ATTR(epfrcvd, 0xf),
NULL
};
static struct attribute_group l2x0_pmu_event_attrs_group = {
.name = "events",
.attrs = l2x0_pmu_event_attrs,
.is_visible = l2x0_pmu_event_attr_is_visible,
};
static ssize_t l2x0_pmu_cpumask_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return cpumap_print_to_pagebuf(true, buf, &pmu_cpu);
}
static struct device_attribute l2x0_pmu_cpumask_attr =
__ATTR(cpumask, S_IRUGO, l2x0_pmu_cpumask_show, NULL);
static struct attribute *l2x0_pmu_cpumask_attrs[] = {
&l2x0_pmu_cpumask_attr.attr,
NULL,
};
static struct attribute_group l2x0_pmu_cpumask_attr_group = {
.attrs = l2x0_pmu_cpumask_attrs,
};
static const struct attribute_group *l2x0_pmu_attr_groups[] = {
&l2x0_pmu_event_attrs_group,
&l2x0_pmu_cpumask_attr_group,
NULL,
};
static void l2x0_pmu_reset(void)
{
int i;
__l2x0_pmu_disable();
for (i = 0; i < PMU_NR_COUNTERS; i++)
__l2x0_pmu_event_disable(i);
}
static int l2x0_pmu_offline_cpu(unsigned int cpu)
{
unsigned int target;
if (!cpumask_test_and_clear_cpu(cpu, &pmu_cpu))
return 0;
target = cpumask_any_but(cpu_online_mask, cpu);
if (target >= nr_cpu_ids)
return 0;
perf_pmu_migrate_context(l2x0_pmu, cpu, target);
cpumask_set_cpu(target, &pmu_cpu);
return 0;
}
void l2x0_pmu_suspend(void)
{
int i;
if (!l2x0_pmu)
return;
l2x0_pmu_disable(l2x0_pmu);
for (i = 0; i < PMU_NR_COUNTERS; i++) {
if (events[i])
l2x0_pmu_event_stop(events[i], PERF_EF_UPDATE);
}
}
void l2x0_pmu_resume(void)
{
int i;
if (!l2x0_pmu)
return;
l2x0_pmu_reset();
for (i = 0; i < PMU_NR_COUNTERS; i++) {
if (events[i])
l2x0_pmu_event_start(events[i], PERF_EF_RELOAD);
}
l2x0_pmu_enable(l2x0_pmu);
}
void __init l2x0_pmu_register(void __iomem *base, u32 part)
{
/*
* Determine whether we support the PMU, and choose the name for sysfs.
* This is also used by l2x0_pmu_event_attr_is_visible to determine
* which events to display, as the PL310 PMU supports a superset of
* L220 events.
*
* The L210 PMU has a different programmer's interface, and is not
* supported by this driver.
*
* We must defer registering the PMU until the perf subsystem is up and
* running, so just stash the name and base, and leave that to another
* initcall.
*/
switch (part & L2X0_CACHE_ID_PART_MASK) {
case L2X0_CACHE_ID_PART_L220:
l2x0_name = "l2c_220";
break;
case L2X0_CACHE_ID_PART_L310:
l2x0_name = "l2c_310";
break;
default:
return;
}
l2x0_base = base;
}
static __init int l2x0_pmu_init(void)
{
int ret;
if (!l2x0_base)
return 0;
l2x0_pmu = kzalloc(sizeof(*l2x0_pmu), GFP_KERNEL);
if (!l2x0_pmu) {
pr_warn("Unable to allocate L2x0 PMU\n");
return -ENOMEM;
}
*l2x0_pmu = (struct pmu) {
.task_ctx_nr = perf_invalid_context,
.pmu_enable = l2x0_pmu_enable,
.pmu_disable = l2x0_pmu_disable,
.read = l2x0_pmu_event_read,
.start = l2x0_pmu_event_start,
.stop = l2x0_pmu_event_stop,
.add = l2x0_pmu_event_add,
.del = l2x0_pmu_event_del,
.event_init = l2x0_pmu_event_init,
.attr_groups = l2x0_pmu_attr_groups,
.capabilities = PERF_PMU_CAP_NO_EXCLUDE,
};
l2x0_pmu_reset();
/*
* We always use a hrtimer rather than an interrupt.
* See comments in l2x0_pmu_event_configure and l2x0_pmu_poll.
*
* Polling once a second allows the counters to fill up to 1/128th on a
* quad-core test chip with cores clocked at 400MHz. Hopefully this
* leaves sufficient headroom to avoid overflow on production silicon
* at higher frequencies.
*/
l2x0_pmu_poll_period = ms_to_ktime(1000);
hrtimer_init(&l2x0_pmu_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
l2x0_pmu_hrtimer.function = l2x0_pmu_poll;
cpumask_set_cpu(0, &pmu_cpu);
ret = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_L2X0_ONLINE,
"perf/arm/l2x0:online", NULL,
l2x0_pmu_offline_cpu);
if (ret)
goto out_pmu;
ret = perf_pmu_register(l2x0_pmu, l2x0_name, -1);
if (ret)
goto out_cpuhp;
return 0;
out_cpuhp:
cpuhp_remove_state_nocalls(CPUHP_AP_PERF_ARM_L2X0_ONLINE);
out_pmu:
kfree(l2x0_pmu);
l2x0_pmu = NULL;
return ret;
}
device_initcall(l2x0_pmu_init);
| linux-master | arch/arm/mm/cache-l2x0-pmu.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/arm-smccc.h>
#include <linux/kernel.h>
#include <linux/smp.h>
#include <asm/cp15.h>
#include <asm/cputype.h>
#include <asm/proc-fns.h>
#include <asm/spectre.h>
#include <asm/system_misc.h>
#ifdef CONFIG_ARM_PSCI
static int __maybe_unused spectre_v2_get_cpu_fw_mitigation_state(void)
{
struct arm_smccc_res res;
arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
ARM_SMCCC_ARCH_WORKAROUND_1, &res);
switch ((int)res.a0) {
case SMCCC_RET_SUCCESS:
return SPECTRE_MITIGATED;
case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
return SPECTRE_UNAFFECTED;
default:
return SPECTRE_VULNERABLE;
}
}
#else
static int __maybe_unused spectre_v2_get_cpu_fw_mitigation_state(void)
{
return SPECTRE_VULNERABLE;
}
#endif
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
DEFINE_PER_CPU(harden_branch_predictor_fn_t, harden_branch_predictor_fn);
extern void cpu_v7_iciallu_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
extern void cpu_v7_bpiall_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
extern void cpu_v7_smc_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
extern void cpu_v7_hvc_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
static void harden_branch_predictor_bpiall(void)
{
write_sysreg(0, BPIALL);
}
static void harden_branch_predictor_iciallu(void)
{
write_sysreg(0, ICIALLU);
}
static void __maybe_unused call_smc_arch_workaround_1(void)
{
arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
}
static void __maybe_unused call_hvc_arch_workaround_1(void)
{
arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
}
static unsigned int spectre_v2_install_workaround(unsigned int method)
{
const char *spectre_v2_method = NULL;
int cpu = smp_processor_id();
if (per_cpu(harden_branch_predictor_fn, cpu))
return SPECTRE_MITIGATED;
switch (method) {
case SPECTRE_V2_METHOD_BPIALL:
per_cpu(harden_branch_predictor_fn, cpu) =
harden_branch_predictor_bpiall;
spectre_v2_method = "BPIALL";
break;
case SPECTRE_V2_METHOD_ICIALLU:
per_cpu(harden_branch_predictor_fn, cpu) =
harden_branch_predictor_iciallu;
spectre_v2_method = "ICIALLU";
break;
case SPECTRE_V2_METHOD_HVC:
per_cpu(harden_branch_predictor_fn, cpu) =
call_hvc_arch_workaround_1;
cpu_do_switch_mm = cpu_v7_hvc_switch_mm;
spectre_v2_method = "hypervisor";
break;
case SPECTRE_V2_METHOD_SMC:
per_cpu(harden_branch_predictor_fn, cpu) =
call_smc_arch_workaround_1;
cpu_do_switch_mm = cpu_v7_smc_switch_mm;
spectre_v2_method = "firmware";
break;
}
if (spectre_v2_method)
pr_info("CPU%u: Spectre v2: using %s workaround\n",
smp_processor_id(), spectre_v2_method);
return SPECTRE_MITIGATED;
}
#else
static unsigned int spectre_v2_install_workaround(unsigned int method)
{
pr_info_once("Spectre V2: workarounds disabled by configuration\n");
return SPECTRE_VULNERABLE;
}
#endif
static void cpu_v7_spectre_v2_init(void)
{
unsigned int state, method = 0;
switch (read_cpuid_part()) {
case ARM_CPU_PART_CORTEX_A8:
case ARM_CPU_PART_CORTEX_A9:
case ARM_CPU_PART_CORTEX_A12:
case ARM_CPU_PART_CORTEX_A17:
case ARM_CPU_PART_CORTEX_A73:
case ARM_CPU_PART_CORTEX_A75:
state = SPECTRE_MITIGATED;
method = SPECTRE_V2_METHOD_BPIALL;
break;
case ARM_CPU_PART_CORTEX_A15:
case ARM_CPU_PART_BRAHMA_B15:
state = SPECTRE_MITIGATED;
method = SPECTRE_V2_METHOD_ICIALLU;
break;
case ARM_CPU_PART_BRAHMA_B53:
/* Requires no workaround */
state = SPECTRE_UNAFFECTED;
break;
default:
/* Other ARM CPUs require no workaround */
if (read_cpuid_implementor() == ARM_CPU_IMP_ARM) {
state = SPECTRE_UNAFFECTED;
break;
}
fallthrough;
/* Cortex A57/A72 require firmware workaround */
case ARM_CPU_PART_CORTEX_A57:
case ARM_CPU_PART_CORTEX_A72:
state = spectre_v2_get_cpu_fw_mitigation_state();
if (state != SPECTRE_MITIGATED)
break;
switch (arm_smccc_1_1_get_conduit()) {
case SMCCC_CONDUIT_HVC:
method = SPECTRE_V2_METHOD_HVC;
break;
case SMCCC_CONDUIT_SMC:
method = SPECTRE_V2_METHOD_SMC;
break;
default:
state = SPECTRE_VULNERABLE;
break;
}
}
if (state == SPECTRE_MITIGATED)
state = spectre_v2_install_workaround(method);
spectre_v2_update_state(state, method);
}
#ifdef CONFIG_HARDEN_BRANCH_HISTORY
static int spectre_bhb_method;
static const char *spectre_bhb_method_name(int method)
{
switch (method) {
case SPECTRE_V2_METHOD_LOOP8:
return "loop";
case SPECTRE_V2_METHOD_BPIALL:
return "BPIALL";
default:
return "unknown";
}
}
static int spectre_bhb_install_workaround(int method)
{
if (spectre_bhb_method != method) {
if (spectre_bhb_method) {
pr_err("CPU%u: Spectre BHB: method disagreement, system vulnerable\n",
smp_processor_id());
return SPECTRE_VULNERABLE;
}
if (spectre_bhb_update_vectors(method) == SPECTRE_VULNERABLE)
return SPECTRE_VULNERABLE;
spectre_bhb_method = method;
pr_info("CPU%u: Spectre BHB: enabling %s workaround for all CPUs\n",
smp_processor_id(), spectre_bhb_method_name(method));
}
return SPECTRE_MITIGATED;
}
#else
static int spectre_bhb_install_workaround(int method)
{
return SPECTRE_VULNERABLE;
}
#endif
static void cpu_v7_spectre_bhb_init(void)
{
unsigned int state, method = 0;
switch (read_cpuid_part()) {
case ARM_CPU_PART_CORTEX_A15:
case ARM_CPU_PART_BRAHMA_B15:
case ARM_CPU_PART_CORTEX_A57:
case ARM_CPU_PART_CORTEX_A72:
state = SPECTRE_MITIGATED;
method = SPECTRE_V2_METHOD_LOOP8;
break;
case ARM_CPU_PART_CORTEX_A73:
case ARM_CPU_PART_CORTEX_A75:
state = SPECTRE_MITIGATED;
method = SPECTRE_V2_METHOD_BPIALL;
break;
default:
state = SPECTRE_UNAFFECTED;
break;
}
if (state == SPECTRE_MITIGATED)
state = spectre_bhb_install_workaround(method);
spectre_v2_update_state(state, method);
}
static __maybe_unused bool cpu_v7_check_auxcr_set(bool *warned,
u32 mask, const char *msg)
{
u32 aux_cr;
asm("mrc p15, 0, %0, c1, c0, 1" : "=r" (aux_cr));
if ((aux_cr & mask) != mask) {
if (!*warned)
pr_err("CPU%u: %s", smp_processor_id(), msg);
*warned = true;
return false;
}
return true;
}
static DEFINE_PER_CPU(bool, spectre_warned);
static bool check_spectre_auxcr(bool *warned, u32 bit)
{
return IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR) &&
cpu_v7_check_auxcr_set(warned, bit,
"Spectre v2: firmware did not set auxiliary control register IBE bit, system vulnerable\n");
}
void cpu_v7_ca8_ibe(void)
{
if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(6)))
cpu_v7_spectre_v2_init();
}
void cpu_v7_ca15_ibe(void)
{
if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(0)))
cpu_v7_spectre_v2_init();
cpu_v7_spectre_bhb_init();
}
void cpu_v7_bugs_init(void)
{
cpu_v7_spectre_v2_init();
cpu_v7_spectre_bhb_init();
}
| linux-master | arch/arm/mm/proc-v7-bugs.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/mm_types.h>
#include <linux/pgtable.h>
#include <asm/cputype.h>
#include <asm/idmap.h>
#include <asm/hwcap.h>
#include <asm/pgalloc.h>
#include <asm/sections.h>
#include <asm/system_info.h>
/*
* Note: accesses outside of the kernel image and the identity map area
* are not supported on any CPU using the idmap tables as its current
* page tables.
*/
pgd_t *idmap_pgd __ro_after_init;
long long arch_phys_to_idmap_offset __ro_after_init;
#ifdef CONFIG_ARM_LPAE
static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
unsigned long prot)
{
pmd_t *pmd;
unsigned long next;
if (pud_none_or_clear_bad(pud) || (pud_val(*pud) & L_PGD_SWAPPER)) {
pmd = pmd_alloc_one(&init_mm, addr);
if (!pmd) {
pr_warn("Failed to allocate identity pmd.\n");
return;
}
/*
* Copy the original PMD to ensure that the PMD entries for
* the kernel image are preserved.
*/
if (!pud_none(*pud))
memcpy(pmd, pmd_offset(pud, 0),
PTRS_PER_PMD * sizeof(pmd_t));
pud_populate(&init_mm, pud, pmd);
pmd += pmd_index(addr);
} else
pmd = pmd_offset(pud, addr);
do {
next = pmd_addr_end(addr, end);
*pmd = __pmd((addr & PMD_MASK) | prot);
flush_pmd_entry(pmd);
} while (pmd++, addr = next, addr != end);
}
#else /* !CONFIG_ARM_LPAE */
static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
unsigned long prot)
{
pmd_t *pmd = pmd_offset(pud, addr);
addr = (addr & PMD_MASK) | prot;
pmd[0] = __pmd(addr);
addr += SECTION_SIZE;
pmd[1] = __pmd(addr);
flush_pmd_entry(pmd);
}
#endif /* CONFIG_ARM_LPAE */
static void idmap_add_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
unsigned long prot)
{
p4d_t *p4d = p4d_offset(pgd, addr);
pud_t *pud = pud_offset(p4d, addr);
unsigned long next;
do {
next = pud_addr_end(addr, end);
idmap_add_pmd(pud, addr, next, prot);
} while (pud++, addr = next, addr != end);
}
static void identity_mapping_add(pgd_t *pgd, const char *text_start,
const char *text_end, unsigned long prot)
{
unsigned long addr, end;
unsigned long next;
addr = virt_to_idmap(text_start);
end = virt_to_idmap(text_end);
pr_info("Setting up static identity map for 0x%lx - 0x%lx\n", addr, end);
prot |= PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AF;
if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale_family())
prot |= PMD_BIT4;
pgd += pgd_index(addr);
do {
next = pgd_addr_end(addr, end);
idmap_add_pud(pgd, addr, next, prot);
} while (pgd++, addr = next, addr != end);
}
extern char __idmap_text_start[], __idmap_text_end[];
static int __init init_static_idmap(void)
{
idmap_pgd = pgd_alloc(&init_mm);
if (!idmap_pgd)
return -ENOMEM;
identity_mapping_add(idmap_pgd, __idmap_text_start,
__idmap_text_end, 0);
/* Flush L1 for the hardware to see this page table content */
if (!(elf_hwcap & HWCAP_LPAE))
flush_cache_louis();
return 0;
}
early_initcall(init_static_idmap);
/*
* In order to soft-boot, we need to switch to a 1:1 mapping for the
* cpu_reset functions. This will then ensure that we have predictable
* results when turning off the mmu.
*/
void setup_mm_for_reboot(void)
{
/* Switch to the identity mapping. */
cpu_switch_mm(idmap_pgd, &init_mm);
local_flush_bp_all();
#ifdef CONFIG_CPU_HAS_ASID
/*
* We don't have a clean ASID for the identity mapping, which
* may clash with virtual addresses of the previous page tables
* and therefore potentially in the TLB.
*/
local_flush_tlb_all();
#endif
}
| linux-master | arch/arm/mm/idmap.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/arch/arm/mm/ioremap.c
*
* Re-map IO memory to kernel address space so that we can access it.
*
* (C) Copyright 1995 1996 Linus Torvalds
*
* Hacked for ARM by Phil Blundell <[email protected]>
* Hacked to allow all architectures to build, and various cleanups
* by Russell King
*
* This allows a driver to remap an arbitrary region of bus memory into
* virtual space. One should *only* use readl, writel, memcpy_toio and
* so on with such remapped areas.
*
* Because the ARM only has a 32-bit address space we can't address the
* whole of the (physical) PCI space at once. PCI huge-mode addressing
* allows us to circumvent this restriction by splitting PCI space into
* two 2GB chunks and mapping only one at a time into processor memory.
* We use MMU protection domains to trap any attempt to access the bank
* that is not currently mapped. (This isn't fully implemented yet.)
*/
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <linux/io.h>
#include <linux/sizes.h>
#include <linux/memblock.h>
#include <asm/cp15.h>
#include <asm/cputype.h>
#include <asm/cacheflush.h>
#include <asm/early_ioremap.h>
#include <asm/mmu_context.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#include <asm/set_memory.h>
#include <asm/system_info.h>
#include <asm/mach/map.h>
#include <asm/mach/pci.h>
#include "mm.h"
LIST_HEAD(static_vmlist);
static struct static_vm *find_static_vm_paddr(phys_addr_t paddr,
size_t size, unsigned int mtype)
{
struct static_vm *svm;
struct vm_struct *vm;
list_for_each_entry(svm, &static_vmlist, list) {
vm = &svm->vm;
if (!(vm->flags & VM_ARM_STATIC_MAPPING))
continue;
if ((vm->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype))
continue;
if (vm->phys_addr > paddr ||
paddr + size - 1 > vm->phys_addr + vm->size - 1)
continue;
return svm;
}
return NULL;
}
struct static_vm *find_static_vm_vaddr(void *vaddr)
{
struct static_vm *svm;
struct vm_struct *vm;
list_for_each_entry(svm, &static_vmlist, list) {
vm = &svm->vm;
/* static_vmlist is ascending order */
if (vm->addr > vaddr)
break;
if (vm->addr <= vaddr && vm->addr + vm->size > vaddr)
return svm;
}
return NULL;
}
void __init add_static_vm_early(struct static_vm *svm)
{
struct static_vm *curr_svm;
struct vm_struct *vm;
void *vaddr;
vm = &svm->vm;
vm_area_add_early(vm);
vaddr = vm->addr;
list_for_each_entry(curr_svm, &static_vmlist, list) {
vm = &curr_svm->vm;
if (vm->addr > vaddr)
break;
}
list_add_tail(&svm->list, &curr_svm->list);
}
int ioremap_page(unsigned long virt, unsigned long phys,
const struct mem_type *mtype)
{
return ioremap_page_range(virt, virt + PAGE_SIZE, phys,
__pgprot(mtype->prot_pte));
}
EXPORT_SYMBOL(ioremap_page);
void __check_vmalloc_seq(struct mm_struct *mm)
{
int seq;
do {
seq = atomic_read(&init_mm.context.vmalloc_seq);
memcpy(pgd_offset(mm, VMALLOC_START),
pgd_offset_k(VMALLOC_START),
sizeof(pgd_t) * (pgd_index(VMALLOC_END) -
pgd_index(VMALLOC_START)));
/*
* Use a store-release so that other CPUs that observe the
* counter's new value are guaranteed to see the results of the
* memcpy as well.
*/
atomic_set_release(&mm->context.vmalloc_seq, seq);
} while (seq != atomic_read(&init_mm.context.vmalloc_seq));
}
#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
/*
* Section support is unsafe on SMP - If you iounmap and ioremap a region,
* the other CPUs will not see this change until their next context switch.
* Meanwhile, (eg) if an interrupt comes in on one of those other CPUs
* which requires the new ioremap'd region to be referenced, the CPU will
* reference the _old_ region.
*
* Note that get_vm_area_caller() allocates a guard 4K page, so we need to
* mask the size back to 1MB aligned or we will overflow in the loop below.
*/
static void unmap_area_sections(unsigned long virt, unsigned long size)
{
unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1));
pmd_t *pmdp = pmd_off_k(addr);
do {
pmd_t pmd = *pmdp;
if (!pmd_none(pmd)) {
/*
* Clear the PMD from the page table, and
* increment the vmalloc sequence so others
* notice this change.
*
* Note: this is still racy on SMP machines.
*/
pmd_clear(pmdp);
atomic_inc_return_release(&init_mm.context.vmalloc_seq);
/*
* Free the page table, if there was one.
*/
if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
}
addr += PMD_SIZE;
pmdp += 2;
} while (addr < end);
/*
* Ensure that the active_mm is up to date - we want to
* catch any use-after-iounmap cases.
*/
check_vmalloc_seq(current->active_mm);
flush_tlb_kernel_range(virt, end);
}
static int
remap_area_sections(unsigned long virt, unsigned long pfn,
size_t size, const struct mem_type *type)
{
unsigned long addr = virt, end = virt + size;
pmd_t *pmd = pmd_off_k(addr);
/*
* Remove and free any PTE-based mapping, and
* sync the current kernel mapping.
*/
unmap_area_sections(virt, size);
do {
pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
pfn += SZ_1M >> PAGE_SHIFT;
pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
pfn += SZ_1M >> PAGE_SHIFT;
flush_pmd_entry(pmd);
addr += PMD_SIZE;
pmd += 2;
} while (addr < end);
return 0;
}
static int
remap_area_supersections(unsigned long virt, unsigned long pfn,
size_t size, const struct mem_type *type)
{
unsigned long addr = virt, end = virt + size;
pmd_t *pmd = pmd_off_k(addr);
/*
* Remove and free any PTE-based mapping, and
* sync the current kernel mapping.
*/
unmap_area_sections(virt, size);
do {
unsigned long super_pmd_val, i;
super_pmd_val = __pfn_to_phys(pfn) | type->prot_sect |
PMD_SECT_SUPER;
super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20;
for (i = 0; i < 8; i++) {
pmd[0] = __pmd(super_pmd_val);
pmd[1] = __pmd(super_pmd_val);
flush_pmd_entry(pmd);
addr += PMD_SIZE;
pmd += 2;
}
pfn += SUPERSECTION_SIZE >> PAGE_SHIFT;
} while (addr < end);
return 0;
}
#endif
static void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
unsigned long offset, size_t size, unsigned int mtype, void *caller)
{
const struct mem_type *type;
int err;
unsigned long addr;
struct vm_struct *area;
phys_addr_t paddr = __pfn_to_phys(pfn);
#ifndef CONFIG_ARM_LPAE
/*
* High mappings must be supersection aligned
*/
if (pfn >= 0x100000 && (paddr & ~SUPERSECTION_MASK))
return NULL;
#endif
type = get_mem_type(mtype);
if (!type)
return NULL;
/*
* Page align the mapping size, taking account of any offset.
*/
size = PAGE_ALIGN(offset + size);
/*
* Try to reuse one of the static mapping whenever possible.
*/
if (size && !(sizeof(phys_addr_t) == 4 && pfn >= 0x100000)) {
struct static_vm *svm;
svm = find_static_vm_paddr(paddr, size, mtype);
if (svm) {
addr = (unsigned long)svm->vm.addr;
addr += paddr - svm->vm.phys_addr;
return (void __iomem *) (offset + addr);
}
}
/*
* Don't allow RAM to be mapped with mismatched attributes - this
* causes problems with ARMv6+
*/
if (WARN_ON(memblock_is_map_memory(PFN_PHYS(pfn)) &&
mtype != MT_MEMORY_RW))
return NULL;
area = get_vm_area_caller(size, VM_IOREMAP, caller);
if (!area)
return NULL;
addr = (unsigned long)area->addr;
area->phys_addr = paddr;
#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
if (DOMAIN_IO == 0 &&
(((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
cpu_is_xsc3()) && pfn >= 0x100000 &&
!((paddr | size | addr) & ~SUPERSECTION_MASK)) {
area->flags |= VM_ARM_SECTION_MAPPING;
err = remap_area_supersections(addr, pfn, size, type);
} else if (!((paddr | size | addr) & ~PMD_MASK)) {
area->flags |= VM_ARM_SECTION_MAPPING;
err = remap_area_sections(addr, pfn, size, type);
} else
#endif
err = ioremap_page_range(addr, addr + size, paddr,
__pgprot(type->prot_pte));
if (err) {
vunmap((void *)addr);
return NULL;
}
flush_cache_vmap(addr, addr + size);
return (void __iomem *) (offset + addr);
}
void __iomem *__arm_ioremap_caller(phys_addr_t phys_addr, size_t size,
unsigned int mtype, void *caller)
{
phys_addr_t last_addr;
unsigned long offset = phys_addr & ~PAGE_MASK;
unsigned long pfn = __phys_to_pfn(phys_addr);
/*
* Don't allow wraparound or zero size
*/
last_addr = phys_addr + size - 1;
if (!size || last_addr < phys_addr)
return NULL;
return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
caller);
}
/*
* Remap an arbitrary physical address space into the kernel virtual
* address space. Needed when the kernel wants to access high addresses
* directly.
*
* NOTE! We need to allow non-page-aligned mappings too: we will obviously
* have to convert them into an offset in a page-aligned mapping, but the
* caller shouldn't need to know that small detail.
*/
void __iomem *
__arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
unsigned int mtype)
{
return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
__builtin_return_address(0));
}
EXPORT_SYMBOL(__arm_ioremap_pfn);
void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t,
unsigned int, void *) =
__arm_ioremap_caller;
void __iomem *ioremap(resource_size_t res_cookie, size_t size)
{
return arch_ioremap_caller(res_cookie, size, MT_DEVICE,
__builtin_return_address(0));
}
EXPORT_SYMBOL(ioremap);
void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size)
{
return arch_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED,
__builtin_return_address(0));
}
EXPORT_SYMBOL(ioremap_cache);
void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size)
{
return arch_ioremap_caller(res_cookie, size, MT_DEVICE_WC,
__builtin_return_address(0));
}
EXPORT_SYMBOL(ioremap_wc);
/*
* Remap an arbitrary physical address space into the kernel virtual
* address space as memory. Needed when the kernel wants to execute
* code in external memory. This is needed for reprogramming source
* clocks that would affect normal memory for example. Please see
* CONFIG_GENERIC_ALLOCATOR for allocating external memory.
*/
void __iomem *
__arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached)
{
unsigned int mtype;
if (cached)
mtype = MT_MEMORY_RWX;
else
mtype = MT_MEMORY_RWX_NONCACHED;
return __arm_ioremap_caller(phys_addr, size, mtype,
__builtin_return_address(0));
}
void __arm_iomem_set_ro(void __iomem *ptr, size_t size)
{
set_memory_ro((unsigned long)ptr, PAGE_ALIGN(size) / PAGE_SIZE);
}
void *arch_memremap_wb(phys_addr_t phys_addr, size_t size)
{
return (__force void *)arch_ioremap_caller(phys_addr, size,
MT_MEMORY_RW,
__builtin_return_address(0));
}
void iounmap(volatile void __iomem *io_addr)
{
void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
struct static_vm *svm;
/* If this is a static mapping, we must leave it alone */
svm = find_static_vm_vaddr(addr);
if (svm)
return;
#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
{
struct vm_struct *vm;
vm = find_vm_area(addr);
/*
* If this is a section based mapping we need to handle it
* specially as the VM subsystem does not know how to handle
* such a beast.
*/
if (vm && (vm->flags & VM_ARM_SECTION_MAPPING))
unmap_area_sections((unsigned long)vm->addr, vm->size);
}
#endif
vunmap(addr);
}
EXPORT_SYMBOL(iounmap);
#if defined(CONFIG_PCI) || IS_ENABLED(CONFIG_PCMCIA)
static int pci_ioremap_mem_type = MT_DEVICE;
void pci_ioremap_set_mem_type(int mem_type)
{
pci_ioremap_mem_type = mem_type;
}
int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
{
unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
if (!(res->flags & IORESOURCE_IO))
return -EINVAL;
if (res->end > IO_SPACE_LIMIT)
return -EINVAL;
return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
__pgprot(get_mem_type(pci_ioremap_mem_type)->prot_pte));
}
EXPORT_SYMBOL(pci_remap_iospace);
void __iomem *pci_remap_cfgspace(resource_size_t res_cookie, size_t size)
{
return arch_ioremap_caller(res_cookie, size, MT_UNCACHED,
__builtin_return_address(0));
}
EXPORT_SYMBOL_GPL(pci_remap_cfgspace);
#endif
/*
* Must be called after early_fixmap_init
*/
void __init early_ioremap_init(void)
{
early_ioremap_setup();
}
bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size,
unsigned long flags)
{
unsigned long pfn = PHYS_PFN(offset);
return memblock_is_map_memory(pfn);
}
| linux-master | arch/arm/mm/ioremap.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/arch/arm/mm/proc-syms.c
*
* Copyright (C) 2000-2002 Russell King
*/
#include <linux/module.h>
#include <linux/mm.h>
#include <asm/cacheflush.h>
#include <asm/proc-fns.h>
#include <asm/tlbflush.h>
#include <asm/page.h>
#ifndef MULTI_CPU
EXPORT_SYMBOL(cpu_dcache_clean_area);
#ifdef CONFIG_MMU
EXPORT_SYMBOL(cpu_set_pte_ext);
#endif
#else
EXPORT_SYMBOL(processor);
#endif
#ifndef MULTI_CACHE
EXPORT_SYMBOL(__cpuc_flush_kern_all);
EXPORT_SYMBOL(__cpuc_flush_user_all);
EXPORT_SYMBOL(__cpuc_flush_user_range);
EXPORT_SYMBOL(__cpuc_coherent_kern_range);
EXPORT_SYMBOL(__cpuc_flush_dcache_area);
#else
EXPORT_SYMBOL(cpu_cache);
#endif
#ifdef CONFIG_MMU
#ifndef MULTI_USER
EXPORT_SYMBOL(__cpu_clear_user_highpage);
EXPORT_SYMBOL(__cpu_copy_user_highpage);
#else
EXPORT_SYMBOL(cpu_user);
#endif
#endif
/*
* No module should need to touch the TLB (and currently
* no modules do. We export this for "loadkernel" support
* (booting a new kernel from within a running kernel.)
*/
#ifdef MULTI_TLB
EXPORT_SYMBOL(cpu_tlb);
#endif
| linux-master | arch/arm/mm/proc-syms.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Debug helper to dump the current kernel pagetables of the system
* so that we can see what the various memory ranges are set to.
*
* Derived from x86 implementation:
* (C) Copyright 2008 Intel Corporation
*
* Author: Arjan van de Ven <[email protected]>
*/
#include <linux/debugfs.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/seq_file.h>
#include <asm/domain.h>
#include <asm/fixmap.h>
#include <asm/page.h>
#include <asm/ptdump.h>
static struct addr_marker address_markers[] = {
#ifdef CONFIG_KASAN
{ KASAN_SHADOW_START, "Kasan shadow start"},
{ KASAN_SHADOW_END, "Kasan shadow end"},
#endif
{ MODULES_VADDR, "Modules" },
{ PAGE_OFFSET, "Kernel Mapping" },
{ 0, "vmalloc() Area" },
{ FDT_FIXED_BASE, "FDT Area" },
{ FIXADDR_START, "Fixmap Area" },
{ VECTORS_BASE, "Vectors" },
{ VECTORS_BASE + PAGE_SIZE * 2, "Vectors End" },
{ -1, NULL },
};
#define pt_dump_seq_printf(m, fmt, args...) \
({ \
if (m) \
seq_printf(m, fmt, ##args); \
})
#define pt_dump_seq_puts(m, fmt) \
({ \
if (m) \
seq_printf(m, fmt); \
})
struct pg_state {
struct seq_file *seq;
const struct addr_marker *marker;
unsigned long start_address;
unsigned level;
u64 current_prot;
bool check_wx;
unsigned long wx_pages;
const char *current_domain;
};
struct prot_bits {
u64 mask;
u64 val;
const char *set;
const char *clear;
bool ro_bit;
bool nx_bit;
};
static const struct prot_bits pte_bits[] = {
{
.mask = L_PTE_USER,
.val = L_PTE_USER,
.set = "USR",
.clear = " ",
}, {
.mask = L_PTE_RDONLY,
.val = L_PTE_RDONLY,
.set = "ro",
.clear = "RW",
.ro_bit = true,
}, {
.mask = L_PTE_XN,
.val = L_PTE_XN,
.set = "NX",
.clear = "x ",
.nx_bit = true,
}, {
.mask = L_PTE_SHARED,
.val = L_PTE_SHARED,
.set = "SHD",
.clear = " ",
}, {
.mask = L_PTE_MT_MASK,
.val = L_PTE_MT_UNCACHED,
.set = "SO/UNCACHED",
}, {
.mask = L_PTE_MT_MASK,
.val = L_PTE_MT_BUFFERABLE,
.set = "MEM/BUFFERABLE/WC",
}, {
.mask = L_PTE_MT_MASK,
.val = L_PTE_MT_WRITETHROUGH,
.set = "MEM/CACHED/WT",
}, {
.mask = L_PTE_MT_MASK,
.val = L_PTE_MT_WRITEBACK,
.set = "MEM/CACHED/WBRA",
#ifndef CONFIG_ARM_LPAE
}, {
.mask = L_PTE_MT_MASK,
.val = L_PTE_MT_MINICACHE,
.set = "MEM/MINICACHE",
#endif
}, {
.mask = L_PTE_MT_MASK,
.val = L_PTE_MT_WRITEALLOC,
.set = "MEM/CACHED/WBWA",
}, {
.mask = L_PTE_MT_MASK,
.val = L_PTE_MT_DEV_SHARED,
.set = "DEV/SHARED",
#ifndef CONFIG_ARM_LPAE
}, {
.mask = L_PTE_MT_MASK,
.val = L_PTE_MT_DEV_NONSHARED,
.set = "DEV/NONSHARED",
#endif
}, {
.mask = L_PTE_MT_MASK,
.val = L_PTE_MT_DEV_WC,
.set = "DEV/WC",
}, {
.mask = L_PTE_MT_MASK,
.val = L_PTE_MT_DEV_CACHED,
.set = "DEV/CACHED",
},
};
static const struct prot_bits section_bits[] = {
#ifdef CONFIG_ARM_LPAE
{
.mask = PMD_SECT_USER,
.val = PMD_SECT_USER,
.set = "USR",
}, {
.mask = L_PMD_SECT_RDONLY | PMD_SECT_AP2,
.val = L_PMD_SECT_RDONLY | PMD_SECT_AP2,
.set = "ro",
.clear = "RW",
.ro_bit = true,
#elif __LINUX_ARM_ARCH__ >= 6
{
.mask = PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
.val = PMD_SECT_APX | PMD_SECT_AP_WRITE,
.set = " ro",
.ro_bit = true,
}, {
.mask = PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
.val = PMD_SECT_AP_WRITE,
.set = " RW",
}, {
.mask = PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
.val = PMD_SECT_AP_READ,
.set = "USR ro",
}, {
.mask = PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
.val = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
.set = "USR RW",
#else /* ARMv4/ARMv5 */
/* These are approximate */
{
.mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
.val = 0,
.set = " ro",
.ro_bit = true,
}, {
.mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
.val = PMD_SECT_AP_WRITE,
.set = " RW",
}, {
.mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
.val = PMD_SECT_AP_READ,
.set = "USR ro",
}, {
.mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
.val = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
.set = "USR RW",
#endif
}, {
.mask = PMD_SECT_XN,
.val = PMD_SECT_XN,
.set = "NX",
.clear = "x ",
.nx_bit = true,
}, {
.mask = PMD_SECT_S,
.val = PMD_SECT_S,
.set = "SHD",
.clear = " ",
},
};
struct pg_level {
const char *name;
const struct prot_bits *bits;
size_t num;
u64 mask;
const struct prot_bits *ro_bit;
const struct prot_bits *nx_bit;
};
static struct pg_level pg_level[] = {
{
}, { /* pgd */
}, { /* p4d */
}, { /* pud */
}, { /* pmd */
.name = (CONFIG_PGTABLE_LEVELS > 2) ? "PMD" : "PGD",
.bits = section_bits,
.num = ARRAY_SIZE(section_bits),
}, { /* pte */
.name = "PTE",
.bits = pte_bits,
.num = ARRAY_SIZE(pte_bits),
},
};
static void dump_prot(struct pg_state *st, const struct prot_bits *bits, size_t num)
{
unsigned i;
for (i = 0; i < num; i++, bits++) {
const char *s;
if ((st->current_prot & bits->mask) == bits->val)
s = bits->set;
else
s = bits->clear;
if (s)
pt_dump_seq_printf(st->seq, " %s", s);
}
}
static void note_prot_wx(struct pg_state *st, unsigned long addr)
{
if (!st->check_wx)
return;
if ((st->current_prot & pg_level[st->level].ro_bit->mask) ==
pg_level[st->level].ro_bit->val)
return;
if ((st->current_prot & pg_level[st->level].nx_bit->mask) ==
pg_level[st->level].nx_bit->val)
return;
WARN_ONCE(1, "arm/mm: Found insecure W+X mapping at address %pS\n",
(void *)st->start_address);
st->wx_pages += (addr - st->start_address) / PAGE_SIZE;
}
static void note_page(struct pg_state *st, unsigned long addr,
unsigned int level, u64 val, const char *domain)
{
static const char units[] = "KMGTPE";
u64 prot = val & pg_level[level].mask;
if (!st->level) {
st->level = level;
st->current_prot = prot;
st->current_domain = domain;
pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
} else if (prot != st->current_prot || level != st->level ||
domain != st->current_domain ||
addr >= st->marker[1].start_address) {
const char *unit = units;
unsigned long delta;
if (st->current_prot) {
note_prot_wx(st, addr);
pt_dump_seq_printf(st->seq, "0x%08lx-0x%08lx ",
st->start_address, addr);
delta = (addr - st->start_address) >> 10;
while (!(delta & 1023) && unit[1]) {
delta >>= 10;
unit++;
}
pt_dump_seq_printf(st->seq, "%9lu%c %s", delta, *unit,
pg_level[st->level].name);
if (st->current_domain)
pt_dump_seq_printf(st->seq, " %s",
st->current_domain);
if (pg_level[st->level].bits)
dump_prot(st, pg_level[st->level].bits, pg_level[st->level].num);
pt_dump_seq_printf(st->seq, "\n");
}
if (addr >= st->marker[1].start_address) {
st->marker++;
pt_dump_seq_printf(st->seq, "---[ %s ]---\n",
st->marker->name);
}
st->start_address = addr;
st->current_prot = prot;
st->current_domain = domain;
st->level = level;
}
}
static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start,
const char *domain)
{
pte_t *pte = pte_offset_kernel(pmd, 0);
unsigned long addr;
unsigned i;
for (i = 0; i < PTRS_PER_PTE; i++, pte++) {
addr = start + i * PAGE_SIZE;
note_page(st, addr, 5, pte_val(*pte), domain);
}
}
static const char *get_domain_name(pmd_t *pmd)
{
#ifndef CONFIG_ARM_LPAE
switch (pmd_val(*pmd) & PMD_DOMAIN_MASK) {
case PMD_DOMAIN(DOMAIN_KERNEL):
return "KERNEL ";
case PMD_DOMAIN(DOMAIN_USER):
return "USER ";
case PMD_DOMAIN(DOMAIN_IO):
return "IO ";
case PMD_DOMAIN(DOMAIN_VECTORS):
return "VECTORS";
default:
return "unknown";
}
#endif
return NULL;
}
static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
{
pmd_t *pmd = pmd_offset(pud, 0);
unsigned long addr;
unsigned i;
const char *domain;
for (i = 0; i < PTRS_PER_PMD; i++, pmd++) {
addr = start + i * PMD_SIZE;
domain = get_domain_name(pmd);
if (pmd_none(*pmd) || pmd_large(*pmd) || !pmd_present(*pmd))
note_page(st, addr, 4, pmd_val(*pmd), domain);
else
walk_pte(st, pmd, addr, domain);
if (SECTION_SIZE < PMD_SIZE && pmd_large(pmd[1])) {
addr += SECTION_SIZE;
pmd++;
domain = get_domain_name(pmd);
note_page(st, addr, 4, pmd_val(*pmd), domain);
}
}
}
static void walk_pud(struct pg_state *st, p4d_t *p4d, unsigned long start)
{
pud_t *pud = pud_offset(p4d, 0);
unsigned long addr;
unsigned i;
for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
addr = start + i * PUD_SIZE;
if (!pud_none(*pud)) {
walk_pmd(st, pud, addr);
} else {
note_page(st, addr, 3, pud_val(*pud), NULL);
}
}
}
static void walk_p4d(struct pg_state *st, pgd_t *pgd, unsigned long start)
{
p4d_t *p4d = p4d_offset(pgd, 0);
unsigned long addr;
unsigned i;
for (i = 0; i < PTRS_PER_P4D; i++, p4d++) {
addr = start + i * P4D_SIZE;
if (!p4d_none(*p4d)) {
walk_pud(st, p4d, addr);
} else {
note_page(st, addr, 2, p4d_val(*p4d), NULL);
}
}
}
static void walk_pgd(struct pg_state *st, struct mm_struct *mm,
unsigned long start)
{
pgd_t *pgd = pgd_offset(mm, 0UL);
unsigned i;
unsigned long addr;
for (i = 0; i < PTRS_PER_PGD; i++, pgd++) {
addr = start + i * PGDIR_SIZE;
if (!pgd_none(*pgd)) {
walk_p4d(st, pgd, addr);
} else {
note_page(st, addr, 1, pgd_val(*pgd), NULL);
}
}
}
void ptdump_walk_pgd(struct seq_file *m, struct ptdump_info *info)
{
struct pg_state st = {
.seq = m,
.marker = info->markers,
.check_wx = false,
};
walk_pgd(&st, info->mm, info->base_addr);
note_page(&st, 0, 0, 0, NULL);
}
static void __init ptdump_initialize(void)
{
unsigned i, j;
for (i = 0; i < ARRAY_SIZE(pg_level); i++)
if (pg_level[i].bits)
for (j = 0; j < pg_level[i].num; j++) {
pg_level[i].mask |= pg_level[i].bits[j].mask;
if (pg_level[i].bits[j].ro_bit)
pg_level[i].ro_bit = &pg_level[i].bits[j];
if (pg_level[i].bits[j].nx_bit)
pg_level[i].nx_bit = &pg_level[i].bits[j];
}
#ifdef CONFIG_KASAN
address_markers[4].start_address = VMALLOC_START;
#else
address_markers[2].start_address = VMALLOC_START;
#endif
}
static struct ptdump_info kernel_ptdump_info = {
.mm = &init_mm,
.markers = address_markers,
.base_addr = 0,
};
void ptdump_check_wx(void)
{
struct pg_state st = {
.seq = NULL,
.marker = (struct addr_marker[]) {
{ 0, NULL},
{ -1, NULL},
},
.check_wx = true,
};
walk_pgd(&st, &init_mm, 0);
note_page(&st, 0, 0, 0, NULL);
if (st.wx_pages)
pr_warn("Checked W+X mappings: FAILED, %lu W+X pages found\n",
st.wx_pages);
else
pr_info("Checked W+X mappings: passed, no W+X pages found\n");
}
static int __init ptdump_init(void)
{
ptdump_initialize();
ptdump_debugfs_register(&kernel_ptdump_info, "kernel_page_tables");
return 0;
}
__initcall(ptdump_init);
| linux-master | arch/arm/mm/dump.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/arch/arm/mm/copypage-v4wt.S
*
* Copyright (C) 1995-1999 Russell King
*
* This is for CPUs with a writethrough cache and 'flush ID cache' is
* the only supported cache operation.
*/
#include <linux/init.h>
#include <linux/highmem.h>
/*
* ARMv4 optimised copy_user_highpage
*
* Since we have writethrough caches, we don't have to worry about
* dirty data in the cache. However, we do have to ensure that
* subsequent reads are up to date.
*/
static void v4wt_copy_user_page(void *kto, const void *kfrom)
{
int tmp;
asm volatile ("\
.syntax unified\n\
ldmia %1!, {r3, r4, ip, lr} @ 4\n\
1: stmia %0!, {r3, r4, ip, lr} @ 4\n\
ldmia %1!, {r3, r4, ip, lr} @ 4+1\n\
stmia %0!, {r3, r4, ip, lr} @ 4\n\
ldmia %1!, {r3, r4, ip, lr} @ 4\n\
stmia %0!, {r3, r4, ip, lr} @ 4\n\
ldmia %1!, {r3, r4, ip, lr} @ 4\n\
subs %2, %2, #1 @ 1\n\
stmia %0!, {r3, r4, ip, lr} @ 4\n\
ldmiane %1!, {r3, r4, ip, lr} @ 4\n\
bne 1b @ 1\n\
mcr p15, 0, %2, c7, c7, 0 @ flush ID cache"
: "+&r" (kto), "+&r" (kfrom), "=&r" (tmp)
: "2" (PAGE_SIZE / 64)
: "r3", "r4", "ip", "lr");
}
void v4wt_copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr, struct vm_area_struct *vma)
{
void *kto, *kfrom;
kto = kmap_atomic(to);
kfrom = kmap_atomic(from);
v4wt_copy_user_page(kto, kfrom);
kunmap_atomic(kfrom);
kunmap_atomic(kto);
}
/*
* ARMv4 optimised clear_user_page
*
* Same story as above.
*/
void v4wt_clear_user_highpage(struct page *page, unsigned long vaddr)
{
void *ptr, *kaddr = kmap_atomic(page);
asm volatile("\
mov r1, %2 @ 1\n\
mov r2, #0 @ 1\n\
mov r3, #0 @ 1\n\
mov ip, #0 @ 1\n\
mov lr, #0 @ 1\n\
1: stmia %0!, {r2, r3, ip, lr} @ 4\n\
stmia %0!, {r2, r3, ip, lr} @ 4\n\
stmia %0!, {r2, r3, ip, lr} @ 4\n\
stmia %0!, {r2, r3, ip, lr} @ 4\n\
subs r1, r1, #1 @ 1\n\
bne 1b @ 1\n\
mcr p15, 0, r2, c7, c7, 0 @ flush ID cache"
: "=r" (ptr)
: "0" (kaddr), "I" (PAGE_SIZE / 64)
: "r1", "r2", "r3", "ip", "lr");
kunmap_atomic(kaddr);
}
struct cpu_user_fns v4wt_user_fns __initdata = {
.cpu_clear_user_highpage = v4wt_clear_user_highpage,
.cpu_copy_user_highpage = v4wt_copy_user_highpage,
};
| linux-master | arch/arm/mm/copypage-v4wt.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2015-2016 Socionext Inc.
* Author: Masahiro Yamada <[email protected]>
*/
#define pr_fmt(fmt) "uniphier: " fmt
#include <linux/bitops.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/log2.h>
#include <linux/of_address.h>
#include <linux/slab.h>
#include <asm/hardware/cache-uniphier.h>
#include <asm/outercache.h>
/* control registers */
#define UNIPHIER_SSCC 0x0 /* Control Register */
#define UNIPHIER_SSCC_BST BIT(20) /* UCWG burst read */
#define UNIPHIER_SSCC_ACT BIT(19) /* Inst-Data separate */
#define UNIPHIER_SSCC_WTG BIT(18) /* WT gathering on */
#define UNIPHIER_SSCC_PRD BIT(17) /* enable pre-fetch */
#define UNIPHIER_SSCC_ON BIT(0) /* enable cache */
#define UNIPHIER_SSCLPDAWCR 0x30 /* Unified/Data Active Way Control */
#define UNIPHIER_SSCLPIAWCR 0x34 /* Instruction Active Way Control */
/* revision registers */
#define UNIPHIER_SSCID 0x0 /* ID Register */
/* operation registers */
#define UNIPHIER_SSCOPE 0x244 /* Cache Operation Primitive Entry */
#define UNIPHIER_SSCOPE_CM_INV 0x0 /* invalidate */
#define UNIPHIER_SSCOPE_CM_CLEAN 0x1 /* clean */
#define UNIPHIER_SSCOPE_CM_FLUSH 0x2 /* flush */
#define UNIPHIER_SSCOPE_CM_SYNC 0x8 /* sync (drain bufs) */
#define UNIPHIER_SSCOPE_CM_FLUSH_PREFETCH 0x9 /* flush p-fetch buf */
#define UNIPHIER_SSCOQM 0x248 /* Cache Operation Queue Mode */
#define UNIPHIER_SSCOQM_S_MASK (0x3 << 17)
#define UNIPHIER_SSCOQM_S_RANGE (0x0 << 17)
#define UNIPHIER_SSCOQM_S_ALL (0x1 << 17)
#define UNIPHIER_SSCOQM_CE BIT(15) /* notify completion */
#define UNIPHIER_SSCOQM_CM_INV 0x0 /* invalidate */
#define UNIPHIER_SSCOQM_CM_CLEAN 0x1 /* clean */
#define UNIPHIER_SSCOQM_CM_FLUSH 0x2 /* flush */
#define UNIPHIER_SSCOQAD 0x24c /* Cache Operation Queue Address */
#define UNIPHIER_SSCOQSZ 0x250 /* Cache Operation Queue Size */
#define UNIPHIER_SSCOPPQSEF 0x25c /* Cache Operation Queue Set Complete*/
#define UNIPHIER_SSCOPPQSEF_FE BIT(1)
#define UNIPHIER_SSCOPPQSEF_OE BIT(0)
#define UNIPHIER_SSCOLPQS 0x260 /* Cache Operation Queue Status */
#define UNIPHIER_SSCOLPQS_EF BIT(2)
#define UNIPHIER_SSCOLPQS_EST BIT(1)
#define UNIPHIER_SSCOLPQS_QST BIT(0)
/* Is the operation region specified by address range? */
#define UNIPHIER_SSCOQM_S_IS_RANGE(op) \
((op & UNIPHIER_SSCOQM_S_MASK) == UNIPHIER_SSCOQM_S_RANGE)
/**
* uniphier_cache_data - UniPhier outer cache specific data
*
* @ctrl_base: virtual base address of control registers
* @rev_base: virtual base address of revision registers
* @op_base: virtual base address of operation registers
* @way_mask: each bit specifies if the way is present
* @nsets: number of associativity sets
* @line_size: line size in bytes
* @range_op_max_size: max size that can be handled by a single range operation
* @list: list node to include this level in the whole cache hierarchy
*/
struct uniphier_cache_data {
void __iomem *ctrl_base;
void __iomem *rev_base;
void __iomem *op_base;
void __iomem *way_ctrl_base;
u32 way_mask;
u32 nsets;
u32 line_size;
u32 range_op_max_size;
struct list_head list;
};
/*
* List of the whole outer cache hierarchy. This list is only modified during
* the early boot stage, so no mutex is taken for the access to the list.
*/
static LIST_HEAD(uniphier_cache_list);
/**
* __uniphier_cache_sync - perform a sync point for a particular cache level
*
* @data: cache controller specific data
*/
static void __uniphier_cache_sync(struct uniphier_cache_data *data)
{
/* This sequence need not be atomic. Do not disable IRQ. */
writel_relaxed(UNIPHIER_SSCOPE_CM_SYNC,
data->op_base + UNIPHIER_SSCOPE);
/* need a read back to confirm */
readl_relaxed(data->op_base + UNIPHIER_SSCOPE);
}
/**
* __uniphier_cache_maint_common - run a queue operation for a particular level
*
* @data: cache controller specific data
* @start: start address of range operation (don't care for "all" operation)
* @size: data size of range operation (don't care for "all" operation)
* @operation: flags to specify the desired cache operation
*/
static void __uniphier_cache_maint_common(struct uniphier_cache_data *data,
unsigned long start,
unsigned long size,
u32 operation)
{
unsigned long flags;
/*
* No spin lock is necessary here because:
*
* [1] This outer cache controller is able to accept maintenance
* operations from multiple CPUs at a time in an SMP system; if a
* maintenance operation is under way and another operation is issued,
* the new one is stored in the queue. The controller performs one
* operation after another. If the queue is full, the status register,
* UNIPHIER_SSCOPPQSEF, indicates that the queue registration has
* failed. The status registers, UNIPHIER_{SSCOPPQSEF, SSCOLPQS}, have
* different instances for each CPU, i.e. each CPU can track the status
* of the maintenance operations triggered by itself.
*
* [2] The cache command registers, UNIPHIER_{SSCOQM, SSCOQAD, SSCOQSZ,
* SSCOQWN}, are shared between multiple CPUs, but the hardware still
* guarantees the registration sequence is atomic; the write access to
* them are arbitrated by the hardware. The first accessor to the
* register, UNIPHIER_SSCOQM, holds the access right and it is released
* by reading the status register, UNIPHIER_SSCOPPQSEF. While one CPU
* is holding the access right, other CPUs fail to register operations.
* One CPU should not hold the access right for a long time, so local
* IRQs should be disabled while the following sequence.
*/
local_irq_save(flags);
/* clear the complete notification flag */
writel_relaxed(UNIPHIER_SSCOLPQS_EF, data->op_base + UNIPHIER_SSCOLPQS);
do {
/* set cache operation */
writel_relaxed(UNIPHIER_SSCOQM_CE | operation,
data->op_base + UNIPHIER_SSCOQM);
/* set address range if needed */
if (likely(UNIPHIER_SSCOQM_S_IS_RANGE(operation))) {
writel_relaxed(start, data->op_base + UNIPHIER_SSCOQAD);
writel_relaxed(size, data->op_base + UNIPHIER_SSCOQSZ);
}
} while (unlikely(readl_relaxed(data->op_base + UNIPHIER_SSCOPPQSEF) &
(UNIPHIER_SSCOPPQSEF_FE | UNIPHIER_SSCOPPQSEF_OE)));
/* wait until the operation is completed */
while (likely(readl_relaxed(data->op_base + UNIPHIER_SSCOLPQS) !=
UNIPHIER_SSCOLPQS_EF))
cpu_relax();
local_irq_restore(flags);
}
static void __uniphier_cache_maint_all(struct uniphier_cache_data *data,
u32 operation)
{
__uniphier_cache_maint_common(data, 0, 0,
UNIPHIER_SSCOQM_S_ALL | operation);
__uniphier_cache_sync(data);
}
static void __uniphier_cache_maint_range(struct uniphier_cache_data *data,
unsigned long start, unsigned long end,
u32 operation)
{
unsigned long size;
/*
* If the start address is not aligned,
* perform a cache operation for the first cache-line
*/
start = start & ~(data->line_size - 1);
size = end - start;
if (unlikely(size >= (unsigned long)(-data->line_size))) {
/* this means cache operation for all range */
__uniphier_cache_maint_all(data, operation);
return;
}
/*
* If the end address is not aligned,
* perform a cache operation for the last cache-line
*/
size = ALIGN(size, data->line_size);
while (size) {
unsigned long chunk_size = min_t(unsigned long, size,
data->range_op_max_size);
__uniphier_cache_maint_common(data, start, chunk_size,
UNIPHIER_SSCOQM_S_RANGE | operation);
start += chunk_size;
size -= chunk_size;
}
__uniphier_cache_sync(data);
}
static void __uniphier_cache_enable(struct uniphier_cache_data *data, bool on)
{
u32 val = 0;
if (on)
val = UNIPHIER_SSCC_WTG | UNIPHIER_SSCC_PRD | UNIPHIER_SSCC_ON;
writel_relaxed(val, data->ctrl_base + UNIPHIER_SSCC);
}
static void __init __uniphier_cache_set_active_ways(
struct uniphier_cache_data *data)
{
unsigned int cpu;
for_each_possible_cpu(cpu)
writel_relaxed(data->way_mask, data->way_ctrl_base + 4 * cpu);
}
static void uniphier_cache_maint_range(unsigned long start, unsigned long end,
u32 operation)
{
struct uniphier_cache_data *data;
list_for_each_entry(data, &uniphier_cache_list, list)
__uniphier_cache_maint_range(data, start, end, operation);
}
static void uniphier_cache_maint_all(u32 operation)
{
struct uniphier_cache_data *data;
list_for_each_entry(data, &uniphier_cache_list, list)
__uniphier_cache_maint_all(data, operation);
}
static void uniphier_cache_inv_range(unsigned long start, unsigned long end)
{
uniphier_cache_maint_range(start, end, UNIPHIER_SSCOQM_CM_INV);
}
static void uniphier_cache_clean_range(unsigned long start, unsigned long end)
{
uniphier_cache_maint_range(start, end, UNIPHIER_SSCOQM_CM_CLEAN);
}
static void uniphier_cache_flush_range(unsigned long start, unsigned long end)
{
uniphier_cache_maint_range(start, end, UNIPHIER_SSCOQM_CM_FLUSH);
}
static void __init uniphier_cache_inv_all(void)
{
uniphier_cache_maint_all(UNIPHIER_SSCOQM_CM_INV);
}
static void uniphier_cache_flush_all(void)
{
uniphier_cache_maint_all(UNIPHIER_SSCOQM_CM_FLUSH);
}
static void uniphier_cache_disable(void)
{
struct uniphier_cache_data *data;
list_for_each_entry_reverse(data, &uniphier_cache_list, list)
__uniphier_cache_enable(data, false);
uniphier_cache_flush_all();
}
static void __init uniphier_cache_enable(void)
{
struct uniphier_cache_data *data;
uniphier_cache_inv_all();
list_for_each_entry(data, &uniphier_cache_list, list) {
__uniphier_cache_enable(data, true);
__uniphier_cache_set_active_ways(data);
}
}
static void uniphier_cache_sync(void)
{
struct uniphier_cache_data *data;
list_for_each_entry(data, &uniphier_cache_list, list)
__uniphier_cache_sync(data);
}
static const struct of_device_id uniphier_cache_match[] __initconst = {
{ .compatible = "socionext,uniphier-system-cache" },
{ /* sentinel */ }
};
static int __init __uniphier_cache_init(struct device_node *np,
unsigned int *cache_level)
{
struct uniphier_cache_data *data;
u32 level, cache_size;
struct device_node *next_np;
int ret = 0;
if (!of_match_node(uniphier_cache_match, np)) {
pr_err("L%d: not compatible with uniphier cache\n",
*cache_level);
return -EINVAL;
}
if (of_property_read_u32(np, "cache-level", &level)) {
pr_err("L%d: cache-level is not specified\n", *cache_level);
return -EINVAL;
}
if (level != *cache_level) {
pr_err("L%d: cache-level is unexpected value %d\n",
*cache_level, level);
return -EINVAL;
}
if (!of_property_read_bool(np, "cache-unified")) {
pr_err("L%d: cache-unified is not specified\n", *cache_level);
return -EINVAL;
}
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
if (of_property_read_u32(np, "cache-line-size", &data->line_size) ||
!is_power_of_2(data->line_size)) {
pr_err("L%d: cache-line-size is unspecified or invalid\n",
*cache_level);
ret = -EINVAL;
goto err;
}
if (of_property_read_u32(np, "cache-sets", &data->nsets) ||
!is_power_of_2(data->nsets)) {
pr_err("L%d: cache-sets is unspecified or invalid\n",
*cache_level);
ret = -EINVAL;
goto err;
}
if (of_property_read_u32(np, "cache-size", &cache_size) ||
cache_size == 0 || cache_size % (data->nsets * data->line_size)) {
pr_err("L%d: cache-size is unspecified or invalid\n",
*cache_level);
ret = -EINVAL;
goto err;
}
data->way_mask = GENMASK(cache_size / data->nsets / data->line_size - 1,
0);
data->ctrl_base = of_iomap(np, 0);
if (!data->ctrl_base) {
pr_err("L%d: failed to map control register\n", *cache_level);
ret = -ENOMEM;
goto err;
}
data->rev_base = of_iomap(np, 1);
if (!data->rev_base) {
pr_err("L%d: failed to map revision register\n", *cache_level);
ret = -ENOMEM;
goto err;
}
data->op_base = of_iomap(np, 2);
if (!data->op_base) {
pr_err("L%d: failed to map operation register\n", *cache_level);
ret = -ENOMEM;
goto err;
}
data->way_ctrl_base = data->ctrl_base + 0xc00;
if (*cache_level == 2) {
u32 revision = readl(data->rev_base + UNIPHIER_SSCID);
/*
* The size of range operation is limited to (1 << 22) or less
* for PH-sLD8 or older SoCs.
*/
if (revision <= 0x16)
data->range_op_max_size = (u32)1 << 22;
/*
* Unfortunatly, the offset address of active way control base
* varies from SoC to SoC.
*/
switch (revision) {
case 0x11: /* sLD3 */
data->way_ctrl_base = data->ctrl_base + 0x870;
break;
case 0x12: /* LD4 */
case 0x16: /* sld8 */
data->way_ctrl_base = data->ctrl_base + 0x840;
break;
default:
break;
}
}
data->range_op_max_size -= data->line_size;
INIT_LIST_HEAD(&data->list);
list_add_tail(&data->list, &uniphier_cache_list); /* no mutex */
/*
* OK, this level has been successfully initialized. Look for the next
* level cache. Do not roll back even if the initialization of the
* next level cache fails because we want to continue with available
* cache levels.
*/
next_np = of_find_next_cache_node(np);
if (next_np) {
(*cache_level)++;
ret = __uniphier_cache_init(next_np, cache_level);
}
of_node_put(next_np);
return ret;
err:
iounmap(data->op_base);
iounmap(data->rev_base);
iounmap(data->ctrl_base);
kfree(data);
return ret;
}
int __init uniphier_cache_init(void)
{
struct device_node *np = NULL;
unsigned int cache_level;
int ret = 0;
/* look for level 2 cache */
while ((np = of_find_matching_node(np, uniphier_cache_match)))
if (!of_property_read_u32(np, "cache-level", &cache_level) &&
cache_level == 2)
break;
if (!np)
return -ENODEV;
ret = __uniphier_cache_init(np, &cache_level);
of_node_put(np);
if (ret) {
/*
* Error out iif L2 initialization fails. Continue with any
* error on L3 or outer because they are optional.
*/
if (cache_level == 2) {
pr_err("failed to initialize L2 cache\n");
return ret;
}
cache_level--;
ret = 0;
}
outer_cache.inv_range = uniphier_cache_inv_range;
outer_cache.clean_range = uniphier_cache_clean_range;
outer_cache.flush_range = uniphier_cache_flush_range;
outer_cache.flush_all = uniphier_cache_flush_all;
outer_cache.disable = uniphier_cache_disable;
outer_cache.sync = uniphier_cache_sync;
uniphier_cache_enable();
pr_info("enabled outer cache (cache level: %d)\n", cache_level);
return ret;
}
| linux-master | arch/arm/mm/cache-uniphier.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/arch/arm/mm/alignment.c
*
* Copyright (C) 1995 Linus Torvalds
* Modifications for ARM processor (c) 1995-2001 Russell King
* Thumb alignment fault fixups (c) 2004 MontaVista Software, Inc.
* - Adapted from gdb/sim/arm/thumbemu.c -- Thumb instruction emulation.
* Copyright (C) 1996, Cygnus Software Technologies Ltd.
*/
#include <linux/moduleparam.h>
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/sched/debug.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/sched/signal.h>
#include <linux/uaccess.h>
#include <asm/cp15.h>
#include <asm/system_info.h>
#include <asm/unaligned.h>
#include <asm/opcodes.h>
#include "fault.h"
#include "mm.h"
/*
* 32-bit misaligned trap handler (c) 1998 San Mehat (CCC) -July 1998
* /proc/sys/debug/alignment, modified and integrated into
* Linux 2.1 by Russell King
*
* Speed optimisations and better fault handling by Russell King.
*
* *** NOTE ***
* This code is not portable to processors with late data abort handling.
*/
#define CODING_BITS(i) (i & 0x0e000000)
#define COND_BITS(i) (i & 0xf0000000)
#define LDST_I_BIT(i) (i & (1 << 26)) /* Immediate constant */
#define LDST_P_BIT(i) (i & (1 << 24)) /* Preindex */
#define LDST_U_BIT(i) (i & (1 << 23)) /* Add offset */
#define LDST_W_BIT(i) (i & (1 << 21)) /* Writeback */
#define LDST_L_BIT(i) (i & (1 << 20)) /* Load */
#define LDST_P_EQ_U(i) ((((i) ^ ((i) >> 1)) & (1 << 23)) == 0)
#define LDSTHD_I_BIT(i) (i & (1 << 22)) /* double/half-word immed */
#define LDM_S_BIT(i) (i & (1 << 22)) /* write CPSR from SPSR */
#define RN_BITS(i) ((i >> 16) & 15) /* Rn */
#define RD_BITS(i) ((i >> 12) & 15) /* Rd */
#define RM_BITS(i) (i & 15) /* Rm */
#define REGMASK_BITS(i) (i & 0xffff)
#define OFFSET_BITS(i) (i & 0x0fff)
#define IS_SHIFT(i) (i & 0x0ff0)
#define SHIFT_BITS(i) ((i >> 7) & 0x1f)
#define SHIFT_TYPE(i) (i & 0x60)
#define SHIFT_LSL 0x00
#define SHIFT_LSR 0x20
#define SHIFT_ASR 0x40
#define SHIFT_RORRRX 0x60
#define BAD_INSTR 0xdeadc0de
/* Thumb-2 32 bit format per ARMv7 DDI0406A A6.3, either f800h,e800h,f800h */
#define IS_T32(hi16) \
(((hi16) & 0xe000) == 0xe000 && ((hi16) & 0x1800))
static unsigned long ai_user;
static unsigned long ai_sys;
static void *ai_sys_last_pc;
static unsigned long ai_skipped;
static unsigned long ai_half;
static unsigned long ai_word;
static unsigned long ai_dword;
static unsigned long ai_multi;
static int ai_usermode;
static unsigned long cr_no_alignment;
core_param(alignment, ai_usermode, int, 0600);
#define UM_WARN (1 << 0)
#define UM_FIXUP (1 << 1)
#define UM_SIGNAL (1 << 2)
/* Return true if and only if the ARMv6 unaligned access model is in use. */
static bool cpu_is_v6_unaligned(void)
{
return cpu_architecture() >= CPU_ARCH_ARMv6 && get_cr() & CR_U;
}
static int safe_usermode(int new_usermode, bool warn)
{
/*
* ARMv6 and later CPUs can perform unaligned accesses for
* most single load and store instructions up to word size.
* LDM, STM, LDRD and STRD still need to be handled.
*
* Ignoring the alignment fault is not an option on these
* CPUs since we spin re-faulting the instruction without
* making any progress.
*/
if (cpu_is_v6_unaligned() && !(new_usermode & (UM_FIXUP | UM_SIGNAL))) {
new_usermode |= UM_FIXUP;
if (warn)
pr_warn("alignment: ignoring faults is unsafe on this CPU. Defaulting to fixup mode.\n");
}
return new_usermode;
}
#ifdef CONFIG_PROC_FS
static const char *usermode_action[] = {
"ignored",
"warn",
"fixup",
"fixup+warn",
"signal",
"signal+warn"
};
static int alignment_proc_show(struct seq_file *m, void *v)
{
seq_printf(m, "User:\t\t%lu\n", ai_user);
seq_printf(m, "System:\t\t%lu (%pS)\n", ai_sys, ai_sys_last_pc);
seq_printf(m, "Skipped:\t%lu\n", ai_skipped);
seq_printf(m, "Half:\t\t%lu\n", ai_half);
seq_printf(m, "Word:\t\t%lu\n", ai_word);
if (cpu_architecture() >= CPU_ARCH_ARMv5TE)
seq_printf(m, "DWord:\t\t%lu\n", ai_dword);
seq_printf(m, "Multi:\t\t%lu\n", ai_multi);
seq_printf(m, "User faults:\t%i (%s)\n", ai_usermode,
usermode_action[ai_usermode]);
return 0;
}
static int alignment_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, alignment_proc_show, NULL);
}
static ssize_t alignment_proc_write(struct file *file, const char __user *buffer,
size_t count, loff_t *pos)
{
char mode;
if (count > 0) {
if (get_user(mode, buffer))
return -EFAULT;
if (mode >= '0' && mode <= '5')
ai_usermode = safe_usermode(mode - '0', true);
}
return count;
}
static const struct proc_ops alignment_proc_ops = {
.proc_open = alignment_proc_open,
.proc_read = seq_read,
.proc_lseek = seq_lseek,
.proc_release = single_release,
.proc_write = alignment_proc_write,
};
#endif /* CONFIG_PROC_FS */
union offset_union {
unsigned long un;
signed long sn;
};
#define TYPE_ERROR 0
#define TYPE_FAULT 1
#define TYPE_LDST 2
#define TYPE_DONE 3
#ifdef __ARMEB__
#define BE 1
#define FIRST_BYTE_16 "mov %1, %1, ror #8\n"
#define FIRST_BYTE_32 "mov %1, %1, ror #24\n"
#define NEXT_BYTE "ror #24"
#else
#define BE 0
#define FIRST_BYTE_16
#define FIRST_BYTE_32
#define NEXT_BYTE "lsr #8"
#endif
#define __get8_unaligned_check(ins,val,addr,err) \
__asm__( \
ARM( "1: "ins" %1, [%2], #1\n" ) \
THUMB( "1: "ins" %1, [%2]\n" ) \
THUMB( " add %2, %2, #1\n" ) \
"2:\n" \
" .pushsection .text.fixup,\"ax\"\n" \
" .align 2\n" \
"3: mov %0, #1\n" \
" b 2b\n" \
" .popsection\n" \
" .pushsection __ex_table,\"a\"\n" \
" .align 3\n" \
" .long 1b, 3b\n" \
" .popsection\n" \
: "=r" (err), "=&r" (val), "=r" (addr) \
: "0" (err), "2" (addr))
#define __get16_unaligned_check(ins,val,addr) \
do { \
unsigned int err = 0, v, a = addr; \
__get8_unaligned_check(ins,v,a,err); \
val = v << ((BE) ? 8 : 0); \
__get8_unaligned_check(ins,v,a,err); \
val |= v << ((BE) ? 0 : 8); \
if (err) \
goto fault; \
} while (0)
#define get16_unaligned_check(val,addr) \
__get16_unaligned_check("ldrb",val,addr)
#define get16t_unaligned_check(val,addr) \
__get16_unaligned_check("ldrbt",val,addr)
#define __get32_unaligned_check(ins,val,addr) \
do { \
unsigned int err = 0, v, a = addr; \
__get8_unaligned_check(ins,v,a,err); \
val = v << ((BE) ? 24 : 0); \
__get8_unaligned_check(ins,v,a,err); \
val |= v << ((BE) ? 16 : 8); \
__get8_unaligned_check(ins,v,a,err); \
val |= v << ((BE) ? 8 : 16); \
__get8_unaligned_check(ins,v,a,err); \
val |= v << ((BE) ? 0 : 24); \
if (err) \
goto fault; \
} while (0)
#define get32_unaligned_check(val,addr) \
__get32_unaligned_check("ldrb",val,addr)
#define get32t_unaligned_check(val,addr) \
__get32_unaligned_check("ldrbt",val,addr)
#define __put16_unaligned_check(ins,val,addr) \
do { \
unsigned int err = 0, v = val, a = addr; \
__asm__( FIRST_BYTE_16 \
ARM( "1: "ins" %1, [%2], #1\n" ) \
THUMB( "1: "ins" %1, [%2]\n" ) \
THUMB( " add %2, %2, #1\n" ) \
" mov %1, %1, "NEXT_BYTE"\n" \
"2: "ins" %1, [%2]\n" \
"3:\n" \
" .pushsection .text.fixup,\"ax\"\n" \
" .align 2\n" \
"4: mov %0, #1\n" \
" b 3b\n" \
" .popsection\n" \
" .pushsection __ex_table,\"a\"\n" \
" .align 3\n" \
" .long 1b, 4b\n" \
" .long 2b, 4b\n" \
" .popsection\n" \
: "=r" (err), "=&r" (v), "=&r" (a) \
: "0" (err), "1" (v), "2" (a)); \
if (err) \
goto fault; \
} while (0)
#define put16_unaligned_check(val,addr) \
__put16_unaligned_check("strb",val,addr)
#define put16t_unaligned_check(val,addr) \
__put16_unaligned_check("strbt",val,addr)
#define __put32_unaligned_check(ins,val,addr) \
do { \
unsigned int err = 0, v = val, a = addr; \
__asm__( FIRST_BYTE_32 \
ARM( "1: "ins" %1, [%2], #1\n" ) \
THUMB( "1: "ins" %1, [%2]\n" ) \
THUMB( " add %2, %2, #1\n" ) \
" mov %1, %1, "NEXT_BYTE"\n" \
ARM( "2: "ins" %1, [%2], #1\n" ) \
THUMB( "2: "ins" %1, [%2]\n" ) \
THUMB( " add %2, %2, #1\n" ) \
" mov %1, %1, "NEXT_BYTE"\n" \
ARM( "3: "ins" %1, [%2], #1\n" ) \
THUMB( "3: "ins" %1, [%2]\n" ) \
THUMB( " add %2, %2, #1\n" ) \
" mov %1, %1, "NEXT_BYTE"\n" \
"4: "ins" %1, [%2]\n" \
"5:\n" \
" .pushsection .text.fixup,\"ax\"\n" \
" .align 2\n" \
"6: mov %0, #1\n" \
" b 5b\n" \
" .popsection\n" \
" .pushsection __ex_table,\"a\"\n" \
" .align 3\n" \
" .long 1b, 6b\n" \
" .long 2b, 6b\n" \
" .long 3b, 6b\n" \
" .long 4b, 6b\n" \
" .popsection\n" \
: "=r" (err), "=&r" (v), "=&r" (a) \
: "0" (err), "1" (v), "2" (a)); \
if (err) \
goto fault; \
} while (0)
#define put32_unaligned_check(val,addr) \
__put32_unaligned_check("strb", val, addr)
#define put32t_unaligned_check(val,addr) \
__put32_unaligned_check("strbt", val, addr)
static void
do_alignment_finish_ldst(unsigned long addr, u32 instr, struct pt_regs *regs, union offset_union offset)
{
if (!LDST_U_BIT(instr))
offset.un = -offset.un;
if (!LDST_P_BIT(instr))
addr += offset.un;
if (!LDST_P_BIT(instr) || LDST_W_BIT(instr))
regs->uregs[RN_BITS(instr)] = addr;
}
static int
do_alignment_ldrhstrh(unsigned long addr, u32 instr, struct pt_regs *regs)
{
unsigned int rd = RD_BITS(instr);
ai_half += 1;
if (user_mode(regs))
goto user;
if (LDST_L_BIT(instr)) {
unsigned long val;
get16_unaligned_check(val, addr);
/* signed half-word? */
if (instr & 0x40)
val = (signed long)((signed short) val);
regs->uregs[rd] = val;
} else
put16_unaligned_check(regs->uregs[rd], addr);
return TYPE_LDST;
user:
if (LDST_L_BIT(instr)) {
unsigned long val;
unsigned int __ua_flags = uaccess_save_and_enable();
get16t_unaligned_check(val, addr);
uaccess_restore(__ua_flags);
/* signed half-word? */
if (instr & 0x40)
val = (signed long)((signed short) val);
regs->uregs[rd] = val;
} else {
unsigned int __ua_flags = uaccess_save_and_enable();
put16t_unaligned_check(regs->uregs[rd], addr);
uaccess_restore(__ua_flags);
}
return TYPE_LDST;
fault:
return TYPE_FAULT;
}
static int
do_alignment_ldrdstrd(unsigned long addr, u32 instr, struct pt_regs *regs)
{
unsigned int rd = RD_BITS(instr);
unsigned int rd2;
int load;
if ((instr & 0xfe000000) == 0xe8000000) {
/* ARMv7 Thumb-2 32-bit LDRD/STRD */
rd2 = (instr >> 8) & 0xf;
load = !!(LDST_L_BIT(instr));
} else if (((rd & 1) == 1) || (rd == 14))
goto bad;
else {
load = ((instr & 0xf0) == 0xd0);
rd2 = rd + 1;
}
ai_dword += 1;
if (user_mode(regs))
goto user;
if (load) {
unsigned long val;
get32_unaligned_check(val, addr);
regs->uregs[rd] = val;
get32_unaligned_check(val, addr + 4);
regs->uregs[rd2] = val;
} else {
put32_unaligned_check(regs->uregs[rd], addr);
put32_unaligned_check(regs->uregs[rd2], addr + 4);
}
return TYPE_LDST;
user:
if (load) {
unsigned long val, val2;
unsigned int __ua_flags = uaccess_save_and_enable();
get32t_unaligned_check(val, addr);
get32t_unaligned_check(val2, addr + 4);
uaccess_restore(__ua_flags);
regs->uregs[rd] = val;
regs->uregs[rd2] = val2;
} else {
unsigned int __ua_flags = uaccess_save_and_enable();
put32t_unaligned_check(regs->uregs[rd], addr);
put32t_unaligned_check(regs->uregs[rd2], addr + 4);
uaccess_restore(__ua_flags);
}
return TYPE_LDST;
bad:
return TYPE_ERROR;
fault:
return TYPE_FAULT;
}
static int
do_alignment_ldrstr(unsigned long addr, u32 instr, struct pt_regs *regs)
{
unsigned int rd = RD_BITS(instr);
ai_word += 1;
if ((!LDST_P_BIT(instr) && LDST_W_BIT(instr)) || user_mode(regs))
goto trans;
if (LDST_L_BIT(instr)) {
unsigned int val;
get32_unaligned_check(val, addr);
regs->uregs[rd] = val;
} else
put32_unaligned_check(regs->uregs[rd], addr);
return TYPE_LDST;
trans:
if (LDST_L_BIT(instr)) {
unsigned int val;
unsigned int __ua_flags = uaccess_save_and_enable();
get32t_unaligned_check(val, addr);
uaccess_restore(__ua_flags);
regs->uregs[rd] = val;
} else {
unsigned int __ua_flags = uaccess_save_and_enable();
put32t_unaligned_check(regs->uregs[rd], addr);
uaccess_restore(__ua_flags);
}
return TYPE_LDST;
fault:
return TYPE_FAULT;
}
/*
* LDM/STM alignment handler.
*
* There are 4 variants of this instruction:
*
* B = rn pointer before instruction, A = rn pointer after instruction
* ------ increasing address ----->
* | | r0 | r1 | ... | rx | |
* PU = 01 B A
* PU = 11 B A
* PU = 00 A B
* PU = 10 A B
*/
static int
do_alignment_ldmstm(unsigned long addr, u32 instr, struct pt_regs *regs)
{
unsigned int rd, rn, correction, nr_regs, regbits;
unsigned long eaddr, newaddr;
if (LDM_S_BIT(instr))
goto bad;
correction = 4; /* processor implementation defined */
regs->ARM_pc += correction;
ai_multi += 1;
/* count the number of registers in the mask to be transferred */
nr_regs = hweight16(REGMASK_BITS(instr)) * 4;
rn = RN_BITS(instr);
newaddr = eaddr = regs->uregs[rn];
if (!LDST_U_BIT(instr))
nr_regs = -nr_regs;
newaddr += nr_regs;
if (!LDST_U_BIT(instr))
eaddr = newaddr;
if (LDST_P_EQ_U(instr)) /* U = P */
eaddr += 4;
/*
* For alignment faults on the ARM922T/ARM920T the MMU makes
* the FSR (and hence addr) equal to the updated base address
* of the multiple access rather than the restored value.
* Switch this message off if we've got a ARM92[02], otherwise
* [ls]dm alignment faults are noisy!
*/
#if !(defined CONFIG_CPU_ARM922T) && !(defined CONFIG_CPU_ARM920T)
/*
* This is a "hint" - we already have eaddr worked out by the
* processor for us.
*/
if (addr != eaddr) {
pr_err("LDMSTM: PC = %08lx, instr = %08x, "
"addr = %08lx, eaddr = %08lx\n",
instruction_pointer(regs), instr, addr, eaddr);
show_regs(regs);
}
#endif
if (user_mode(regs)) {
unsigned int __ua_flags = uaccess_save_and_enable();
for (regbits = REGMASK_BITS(instr), rd = 0; regbits;
regbits >>= 1, rd += 1)
if (regbits & 1) {
if (LDST_L_BIT(instr)) {
unsigned int val;
get32t_unaligned_check(val, eaddr);
regs->uregs[rd] = val;
} else
put32t_unaligned_check(regs->uregs[rd], eaddr);
eaddr += 4;
}
uaccess_restore(__ua_flags);
} else {
for (regbits = REGMASK_BITS(instr), rd = 0; regbits;
regbits >>= 1, rd += 1)
if (regbits & 1) {
if (LDST_L_BIT(instr)) {
unsigned int val;
get32_unaligned_check(val, eaddr);
regs->uregs[rd] = val;
} else
put32_unaligned_check(regs->uregs[rd], eaddr);
eaddr += 4;
}
}
if (LDST_W_BIT(instr))
regs->uregs[rn] = newaddr;
if (!LDST_L_BIT(instr) || !(REGMASK_BITS(instr) & (1 << 15)))
regs->ARM_pc -= correction;
return TYPE_DONE;
fault:
regs->ARM_pc -= correction;
return TYPE_FAULT;
bad:
pr_err("Alignment trap: not handling ldm with s-bit set\n");
return TYPE_ERROR;
}
/*
* Convert Thumb ld/st instruction forms to equivalent ARM instructions so
* we can reuse ARM userland alignment fault fixups for Thumb.
*
* This implementation was initially based on the algorithm found in
* gdb/sim/arm/thumbemu.c. It is basically just a code reduction of same
* to convert only Thumb ld/st instruction forms to equivalent ARM forms.
*
* NOTES:
* 1. Comments below refer to ARM ARM DDI0100E Thumb Instruction sections.
* 2. If for some reason we're passed an non-ld/st Thumb instruction to
* decode, we return 0xdeadc0de. This should never happen under normal
* circumstances but if it does, we've got other problems to deal with
* elsewhere and we obviously can't fix those problems here.
*/
static unsigned long
thumb2arm(u16 tinstr)
{
u32 L = (tinstr & (1<<11)) >> 11;
switch ((tinstr & 0xf800) >> 11) {
/* 6.5.1 Format 1: */
case 0x6000 >> 11: /* 7.1.52 STR(1) */
case 0x6800 >> 11: /* 7.1.26 LDR(1) */
case 0x7000 >> 11: /* 7.1.55 STRB(1) */
case 0x7800 >> 11: /* 7.1.30 LDRB(1) */
return 0xe5800000 |
((tinstr & (1<<12)) << (22-12)) | /* fixup */
(L<<20) | /* L==1? */
((tinstr & (7<<0)) << (12-0)) | /* Rd */
((tinstr & (7<<3)) << (16-3)) | /* Rn */
((tinstr & (31<<6)) >> /* immed_5 */
(6 - ((tinstr & (1<<12)) ? 0 : 2)));
case 0x8000 >> 11: /* 7.1.57 STRH(1) */
case 0x8800 >> 11: /* 7.1.32 LDRH(1) */
return 0xe1c000b0 |
(L<<20) | /* L==1? */
((tinstr & (7<<0)) << (12-0)) | /* Rd */
((tinstr & (7<<3)) << (16-3)) | /* Rn */
((tinstr & (7<<6)) >> (6-1)) | /* immed_5[2:0] */
((tinstr & (3<<9)) >> (9-8)); /* immed_5[4:3] */
/* 6.5.1 Format 2: */
case 0x5000 >> 11:
case 0x5800 >> 11:
{
static const u32 subset[8] = {
0xe7800000, /* 7.1.53 STR(2) */
0xe18000b0, /* 7.1.58 STRH(2) */
0xe7c00000, /* 7.1.56 STRB(2) */
0xe19000d0, /* 7.1.34 LDRSB */
0xe7900000, /* 7.1.27 LDR(2) */
0xe19000b0, /* 7.1.33 LDRH(2) */
0xe7d00000, /* 7.1.31 LDRB(2) */
0xe19000f0 /* 7.1.35 LDRSH */
};
return subset[(tinstr & (7<<9)) >> 9] |
((tinstr & (7<<0)) << (12-0)) | /* Rd */
((tinstr & (7<<3)) << (16-3)) | /* Rn */
((tinstr & (7<<6)) >> (6-0)); /* Rm */
}
/* 6.5.1 Format 3: */
case 0x4800 >> 11: /* 7.1.28 LDR(3) */
/* NOTE: This case is not technically possible. We're
* loading 32-bit memory data via PC relative
* addressing mode. So we can and should eliminate
* this case. But I'll leave it here for now.
*/
return 0xe59f0000 |
((tinstr & (7<<8)) << (12-8)) | /* Rd */
((tinstr & 255) << (2-0)); /* immed_8 */
/* 6.5.1 Format 4: */
case 0x9000 >> 11: /* 7.1.54 STR(3) */
case 0x9800 >> 11: /* 7.1.29 LDR(4) */
return 0xe58d0000 |
(L<<20) | /* L==1? */
((tinstr & (7<<8)) << (12-8)) | /* Rd */
((tinstr & 255) << 2); /* immed_8 */
/* 6.6.1 Format 1: */
case 0xc000 >> 11: /* 7.1.51 STMIA */
case 0xc800 >> 11: /* 7.1.25 LDMIA */
{
u32 Rn = (tinstr & (7<<8)) >> 8;
u32 W = ((L<<Rn) & (tinstr&255)) ? 0 : 1<<21;
return 0xe8800000 | W | (L<<20) | (Rn<<16) |
(tinstr&255);
}
/* 6.6.1 Format 2: */
case 0xb000 >> 11: /* 7.1.48 PUSH */
case 0xb800 >> 11: /* 7.1.47 POP */
if ((tinstr & (3 << 9)) == 0x0400) {
static const u32 subset[4] = {
0xe92d0000, /* STMDB sp!,{registers} */
0xe92d4000, /* STMDB sp!,{registers,lr} */
0xe8bd0000, /* LDMIA sp!,{registers} */
0xe8bd8000 /* LDMIA sp!,{registers,pc} */
};
return subset[(L<<1) | ((tinstr & (1<<8)) >> 8)] |
(tinstr & 255); /* register_list */
}
fallthrough; /* for illegal instruction case */
default:
return BAD_INSTR;
}
}
/*
* Convert Thumb-2 32 bit LDM, STM, LDRD, STRD to equivalent instruction
* handlable by ARM alignment handler, also find the corresponding handler,
* so that we can reuse ARM userland alignment fault fixups for Thumb.
*
* @pinstr: original Thumb-2 instruction; returns new handlable instruction
* @regs: register context.
* @poffset: return offset from faulted addr for later writeback
*
* NOTES:
* 1. Comments below refer to ARMv7 DDI0406A Thumb Instruction sections.
* 2. Register name Rt from ARMv7 is same as Rd from ARMv6 (Rd is Rt)
*/
static void *
do_alignment_t32_to_handler(u32 *pinstr, struct pt_regs *regs,
union offset_union *poffset)
{
u32 instr = *pinstr;
u16 tinst1 = (instr >> 16) & 0xffff;
u16 tinst2 = instr & 0xffff;
switch (tinst1 & 0xffe0) {
/* A6.3.5 Load/Store multiple */
case 0xe880: /* STM/STMIA/STMEA,LDM/LDMIA, PUSH/POP T2 */
case 0xe8a0: /* ...above writeback version */
case 0xe900: /* STMDB/STMFD, LDMDB/LDMEA */
case 0xe920: /* ...above writeback version */
/* no need offset decision since handler calculates it */
return do_alignment_ldmstm;
case 0xf840: /* POP/PUSH T3 (single register) */
if (RN_BITS(instr) == 13 && (tinst2 & 0x09ff) == 0x0904) {
u32 L = !!(LDST_L_BIT(instr));
const u32 subset[2] = {
0xe92d0000, /* STMDB sp!,{registers} */
0xe8bd0000, /* LDMIA sp!,{registers} */
};
*pinstr = subset[L] | (1<<RD_BITS(instr));
return do_alignment_ldmstm;
}
/* Else fall through for illegal instruction case */
break;
/* A6.3.6 Load/store double, STRD/LDRD(immed, lit, reg) */
case 0xe860:
case 0xe960:
case 0xe8e0:
case 0xe9e0:
poffset->un = (tinst2 & 0xff) << 2;
fallthrough;
case 0xe940:
case 0xe9c0:
return do_alignment_ldrdstrd;
/*
* No need to handle load/store instructions up to word size
* since ARMv6 and later CPUs can perform unaligned accesses.
*/
default:
break;
}
return NULL;
}
static int alignment_get_arm(struct pt_regs *regs, u32 *ip, u32 *inst)
{
u32 instr = 0;
int fault;
if (user_mode(regs))
fault = get_user(instr, ip);
else
fault = get_kernel_nofault(instr, ip);
*inst = __mem_to_opcode_arm(instr);
return fault;
}
static int alignment_get_thumb(struct pt_regs *regs, u16 *ip, u16 *inst)
{
u16 instr = 0;
int fault;
if (user_mode(regs))
fault = get_user(instr, ip);
else
fault = get_kernel_nofault(instr, ip);
*inst = __mem_to_opcode_thumb16(instr);
return fault;
}
static int
do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
union offset_union offset;
unsigned long instrptr;
int (*handler)(unsigned long addr, u32 instr, struct pt_regs *regs);
unsigned int type;
u32 instr = 0;
u16 tinstr = 0;
int isize = 4;
int thumb2_32b = 0;
int fault;
if (interrupts_enabled(regs))
local_irq_enable();
instrptr = instruction_pointer(regs);
if (thumb_mode(regs)) {
u16 *ptr = (u16 *)(instrptr & ~1);
fault = alignment_get_thumb(regs, ptr, &tinstr);
if (!fault) {
if (cpu_architecture() >= CPU_ARCH_ARMv7 &&
IS_T32(tinstr)) {
/* Thumb-2 32-bit */
u16 tinst2;
fault = alignment_get_thumb(regs, ptr + 1, &tinst2);
instr = __opcode_thumb32_compose(tinstr, tinst2);
thumb2_32b = 1;
} else {
isize = 2;
instr = thumb2arm(tinstr);
}
}
} else {
fault = alignment_get_arm(regs, (void *)instrptr, &instr);
}
if (fault) {
type = TYPE_FAULT;
goto bad_or_fault;
}
if (user_mode(regs))
goto user;
ai_sys += 1;
ai_sys_last_pc = (void *)instruction_pointer(regs);
fixup:
regs->ARM_pc += isize;
switch (CODING_BITS(instr)) {
case 0x00000000: /* 3.13.4 load/store instruction extensions */
if (LDSTHD_I_BIT(instr))
offset.un = (instr & 0xf00) >> 4 | (instr & 15);
else
offset.un = regs->uregs[RM_BITS(instr)];
if ((instr & 0x000000f0) == 0x000000b0 || /* LDRH, STRH */
(instr & 0x001000f0) == 0x001000f0) /* LDRSH */
handler = do_alignment_ldrhstrh;
else if ((instr & 0x001000f0) == 0x000000d0 || /* LDRD */
(instr & 0x001000f0) == 0x000000f0) /* STRD */
handler = do_alignment_ldrdstrd;
else if ((instr & 0x01f00ff0) == 0x01000090) /* SWP */
goto swp;
else
goto bad;
break;
case 0x04000000: /* ldr or str immediate */
if (COND_BITS(instr) == 0xf0000000) /* NEON VLDn, VSTn */
goto bad;
offset.un = OFFSET_BITS(instr);
handler = do_alignment_ldrstr;
break;
case 0x06000000: /* ldr or str register */
offset.un = regs->uregs[RM_BITS(instr)];
if (IS_SHIFT(instr)) {
unsigned int shiftval = SHIFT_BITS(instr);
switch(SHIFT_TYPE(instr)) {
case SHIFT_LSL:
offset.un <<= shiftval;
break;
case SHIFT_LSR:
offset.un >>= shiftval;
break;
case SHIFT_ASR:
offset.sn >>= shiftval;
break;
case SHIFT_RORRRX:
if (shiftval == 0) {
offset.un >>= 1;
if (regs->ARM_cpsr & PSR_C_BIT)
offset.un |= 1 << 31;
} else
offset.un = offset.un >> shiftval |
offset.un << (32 - shiftval);
break;
}
}
handler = do_alignment_ldrstr;
break;
case 0x08000000: /* ldm or stm, or thumb-2 32bit instruction */
if (thumb2_32b) {
offset.un = 0;
handler = do_alignment_t32_to_handler(&instr, regs, &offset);
} else {
offset.un = 0;
handler = do_alignment_ldmstm;
}
break;
default:
goto bad;
}
if (!handler)
goto bad;
type = handler(addr, instr, regs);
if (type == TYPE_ERROR || type == TYPE_FAULT) {
regs->ARM_pc -= isize;
goto bad_or_fault;
}
if (type == TYPE_LDST)
do_alignment_finish_ldst(addr, instr, regs, offset);
if (thumb_mode(regs))
regs->ARM_cpsr = it_advance(regs->ARM_cpsr);
return 0;
bad_or_fault:
if (type == TYPE_ERROR)
goto bad;
/*
* We got a fault - fix it up, or die.
*/
do_bad_area(addr, fsr, regs);
return 0;
swp:
pr_err("Alignment trap: not handling swp instruction\n");
bad:
/*
* Oops, we didn't handle the instruction.
*/
pr_err("Alignment trap: not handling instruction "
"%0*x at [<%08lx>]\n",
isize << 1,
isize == 2 ? tinstr : instr, instrptr);
ai_skipped += 1;
return 1;
user:
ai_user += 1;
if (ai_usermode & UM_WARN)
printk("Alignment trap: %s (%d) PC=0x%08lx Instr=0x%0*x "
"Address=0x%08lx FSR 0x%03x\n", current->comm,
task_pid_nr(current), instrptr,
isize << 1,
isize == 2 ? tinstr : instr,
addr, fsr);
if (ai_usermode & UM_FIXUP)
goto fixup;
if (ai_usermode & UM_SIGNAL) {
force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)addr);
} else {
/*
* We're about to disable the alignment trap and return to
* user space. But if an interrupt occurs before actually
* reaching user space, then the IRQ vector entry code will
* notice that we were still in kernel space and therefore
* the alignment trap won't be re-enabled in that case as it
* is presumed to be always on from kernel space.
* Let's prevent that race by disabling interrupts here (they
* are disabled on the way back to user space anyway in
* entry-common.S) and disable the alignment trap only if
* there is no work pending for this thread.
*/
raw_local_irq_disable();
if (!(read_thread_flags() & _TIF_WORK_MASK))
set_cr(cr_no_alignment);
}
return 0;
}
static int __init noalign_setup(char *__unused)
{
set_cr(__clear_cr(CR_A));
return 1;
}
__setup("noalign", noalign_setup);
/*
* This needs to be done after sysctl_init_bases(), otherwise sys/ will be
* overwritten. Actually, this shouldn't be in sys/ at all since
* it isn't a sysctl, and it doesn't contain sysctl information.
* We now locate it in /proc/cpu/alignment instead.
*/
static int __init alignment_init(void)
{
#ifdef CONFIG_PROC_FS
struct proc_dir_entry *res;
res = proc_create("cpu/alignment", S_IWUSR | S_IRUGO, NULL,
&alignment_proc_ops);
if (!res)
return -ENOMEM;
#endif
if (cpu_is_v6_unaligned()) {
set_cr(__clear_cr(CR_A));
ai_usermode = safe_usermode(ai_usermode, false);
}
cr_no_alignment = get_cr() & ~CR_A;
hook_fault_code(FAULT_CODE_ALIGNMENT, do_alignment, SIGBUS, BUS_ADRALN,
"alignment exception");
/*
* ARMv6K and ARMv7 use fault status 3 (0b00011) as Access Flag section
* fault, not as alignment error.
*
* TODO: handle ARMv6K properly. Runtime check for 'K' extension is
* needed.
*/
if (cpu_architecture() <= CPU_ARCH_ARMv6) {
hook_fault_code(3, do_alignment, SIGBUS, BUS_ADRALN,
"alignment exception");
}
return 0;
}
fs_initcall(alignment_init);
| linux-master | arch/arm/mm/alignment.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/arch/arm/mm/copypage-v4wb.c
*
* Copyright (C) 1995-1999 Russell King
*/
#include <linux/init.h>
#include <linux/highmem.h>
/*
* ARMv4 optimised copy_user_highpage
*
* We flush the destination cache lines just before we write the data into the
* corresponding address. Since the Dcache is read-allocate, this removes the
* Dcache aliasing issue. The writes will be forwarded to the write buffer,
* and merged as appropriate.
*
* Note: We rely on all ARMv4 processors implementing the "invalidate D line"
* instruction. If your processor does not supply this, you have to write your
* own copy_user_highpage that does the right thing.
*/
static void v4wb_copy_user_page(void *kto, const void *kfrom)
{
int tmp;
asm volatile ("\
.syntax unified\n\
ldmia %1!, {r3, r4, ip, lr} @ 4\n\
1: mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\
stmia %0!, {r3, r4, ip, lr} @ 4\n\
ldmia %1!, {r3, r4, ip, lr} @ 4+1\n\
stmia %0!, {r3, r4, ip, lr} @ 4\n\
ldmia %1!, {r3, r4, ip, lr} @ 4\n\
mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\
stmia %0!, {r3, r4, ip, lr} @ 4\n\
ldmia %1!, {r3, r4, ip, lr} @ 4\n\
subs %2, %2, #1 @ 1\n\
stmia %0!, {r3, r4, ip, lr} @ 4\n\
ldmiane %1!, {r3, r4, ip, lr} @ 4\n\
bne 1b @ 1\n\
mcr p15, 0, %1, c7, c10, 4 @ 1 drain WB"
: "+&r" (kto), "+&r" (kfrom), "=&r" (tmp)
: "2" (PAGE_SIZE / 64)
: "r3", "r4", "ip", "lr");
}
void v4wb_copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr, struct vm_area_struct *vma)
{
void *kto, *kfrom;
kto = kmap_atomic(to);
kfrom = kmap_atomic(from);
flush_cache_page(vma, vaddr, page_to_pfn(from));
v4wb_copy_user_page(kto, kfrom);
kunmap_atomic(kfrom);
kunmap_atomic(kto);
}
/*
* ARMv4 optimised clear_user_page
*
* Same story as above.
*/
void v4wb_clear_user_highpage(struct page *page, unsigned long vaddr)
{
void *ptr, *kaddr = kmap_atomic(page);
asm volatile("\
mov r1, %2 @ 1\n\
mov r2, #0 @ 1\n\
mov r3, #0 @ 1\n\
mov ip, #0 @ 1\n\
mov lr, #0 @ 1\n\
1: mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\
stmia %0!, {r2, r3, ip, lr} @ 4\n\
stmia %0!, {r2, r3, ip, lr} @ 4\n\
mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\
stmia %0!, {r2, r3, ip, lr} @ 4\n\
stmia %0!, {r2, r3, ip, lr} @ 4\n\
subs r1, r1, #1 @ 1\n\
bne 1b @ 1\n\
mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB"
: "=r" (ptr)
: "0" (kaddr), "I" (PAGE_SIZE / 64)
: "r1", "r2", "r3", "ip", "lr");
kunmap_atomic(kaddr);
}
struct cpu_user_fns v4wb_user_fns __initdata = {
.cpu_clear_user_highpage = v4wb_clear_user_highpage,
.cpu_copy_user_highpage = v4wb_copy_user_highpage,
};
| linux-master | arch/arm/mm/copypage-v4wb.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/arch/arm/lib/copypage-armv4mc.S
*
* Copyright (C) 1995-2005 Russell King
*
* This handles the mini data cache, as found on SA11x0 and XScale
* processors. When we copy a user page page, we map it in such a way
* that accesses to this page will not touch the main data cache, but
* will be cached in the mini data cache. This prevents us thrashing
* the main data cache on page faults.
*/
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
#include "mm.h"
#define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \
L_PTE_MT_MINICACHE)
static DEFINE_RAW_SPINLOCK(minicache_lock);
/*
* ARMv4 mini-dcache optimised copy_user_highpage
*
* We flush the destination cache lines just before we write the data into the
* corresponding address. Since the Dcache is read-allocate, this removes the
* Dcache aliasing issue. The writes will be forwarded to the write buffer,
* and merged as appropriate.
*
* Note: We rely on all ARMv4 processors implementing the "invalidate D line"
* instruction. If your processor does not supply this, you have to write your
* own copy_user_highpage that does the right thing.
*/
static void mc_copy_user_page(void *from, void *to)
{
int tmp;
asm volatile ("\
.syntax unified\n\
ldmia %0!, {r2, r3, ip, lr} @ 4\n\
1: mcr p15, 0, %1, c7, c6, 1 @ 1 invalidate D line\n\
stmia %1!, {r2, r3, ip, lr} @ 4\n\
ldmia %0!, {r2, r3, ip, lr} @ 4+1\n\
stmia %1!, {r2, r3, ip, lr} @ 4\n\
ldmia %0!, {r2, r3, ip, lr} @ 4\n\
mcr p15, 0, %1, c7, c6, 1 @ 1 invalidate D line\n\
stmia %1!, {r2, r3, ip, lr} @ 4\n\
ldmia %0!, {r2, r3, ip, lr} @ 4\n\
subs %2, %2, #1 @ 1\n\
stmia %1!, {r2, r3, ip, lr} @ 4\n\
ldmiane %0!, {r2, r3, ip, lr} @ 4\n\
bne 1b @ "
: "+&r" (from), "+&r" (to), "=&r" (tmp)
: "2" (PAGE_SIZE / 64)
: "r2", "r3", "ip", "lr");
}
void v4_mc_copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr, struct vm_area_struct *vma)
{
struct folio *src = page_folio(from);
void *kto = kmap_atomic(to);
if (!test_and_set_bit(PG_dcache_clean, &src->flags))
__flush_dcache_folio(folio_flush_mapping(src), src);
raw_spin_lock(&minicache_lock);
set_top_pte(COPYPAGE_MINICACHE, mk_pte(from, minicache_pgprot));
mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto);
raw_spin_unlock(&minicache_lock);
kunmap_atomic(kto);
}
/*
* ARMv4 optimised clear_user_page
*/
void v4_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
{
void *ptr, *kaddr = kmap_atomic(page);
asm volatile("\
mov r1, %2 @ 1\n\
mov r2, #0 @ 1\n\
mov r3, #0 @ 1\n\
mov ip, #0 @ 1\n\
mov lr, #0 @ 1\n\
1: mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\
stmia %0!, {r2, r3, ip, lr} @ 4\n\
stmia %0!, {r2, r3, ip, lr} @ 4\n\
mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\
stmia %0!, {r2, r3, ip, lr} @ 4\n\
stmia %0!, {r2, r3, ip, lr} @ 4\n\
subs r1, r1, #1 @ 1\n\
bne 1b @ 1"
: "=r" (ptr)
: "0" (kaddr), "I" (PAGE_SIZE / 64)
: "r1", "r2", "r3", "ip", "lr");
kunmap_atomic(kaddr);
}
struct cpu_user_fns v4_mc_user_fns __initdata = {
.cpu_clear_user_highpage = v4_mc_clear_user_highpage,
.cpu_copy_user_highpage = v4_mc_copy_user_highpage,
};
| linux-master | arch/arm/mm/copypage-v4mc.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/arch/arm/lib/copypage-fa.S
*
* Copyright (C) 2005 Faraday Corp.
* Copyright (C) 2008-2009 Paulius Zaleckas <[email protected]>
*
* Based on copypage-v4wb.S:
* Copyright (C) 1995-1999 Russell King
*/
#include <linux/init.h>
#include <linux/highmem.h>
/*
* Faraday optimised copy_user_page
*/
static void fa_copy_user_page(void *kto, const void *kfrom)
{
int tmp;
asm volatile ("\
1: ldmia %1!, {r3, r4, ip, lr} @ 4\n\
stmia %0, {r3, r4, ip, lr} @ 4\n\
mcr p15, 0, %0, c7, c14, 1 @ 1 clean and invalidate D line\n\
add %0, %0, #16 @ 1\n\
ldmia %1!, {r3, r4, ip, lr} @ 4\n\
stmia %0, {r3, r4, ip, lr} @ 4\n\
mcr p15, 0, %0, c7, c14, 1 @ 1 clean and invalidate D line\n\
add %0, %0, #16 @ 1\n\
subs %2, %2, #1 @ 1\n\
bne 1b @ 1\n\
mcr p15, 0, %2, c7, c10, 4 @ 1 drain WB"
: "+&r" (kto), "+&r" (kfrom), "=&r" (tmp)
: "2" (PAGE_SIZE / 32)
: "r3", "r4", "ip", "lr");
}
void fa_copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr, struct vm_area_struct *vma)
{
void *kto, *kfrom;
kto = kmap_atomic(to);
kfrom = kmap_atomic(from);
fa_copy_user_page(kto, kfrom);
kunmap_atomic(kfrom);
kunmap_atomic(kto);
}
/*
* Faraday optimised clear_user_page
*
* Same story as above.
*/
void fa_clear_user_highpage(struct page *page, unsigned long vaddr)
{
void *ptr, *kaddr = kmap_atomic(page);
asm volatile("\
mov r1, %2 @ 1\n\
mov r2, #0 @ 1\n\
mov r3, #0 @ 1\n\
mov ip, #0 @ 1\n\
mov lr, #0 @ 1\n\
1: stmia %0, {r2, r3, ip, lr} @ 4\n\
mcr p15, 0, %0, c7, c14, 1 @ 1 clean and invalidate D line\n\
add %0, %0, #16 @ 1\n\
stmia %0, {r2, r3, ip, lr} @ 4\n\
mcr p15, 0, %0, c7, c14, 1 @ 1 clean and invalidate D line\n\
add %0, %0, #16 @ 1\n\
subs r1, r1, #1 @ 1\n\
bne 1b @ 1\n\
mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB"
: "=r" (ptr)
: "0" (kaddr), "I" (PAGE_SIZE / 32)
: "r1", "r2", "r3", "ip", "lr");
kunmap_atomic(kaddr);
}
struct cpu_user_fns fa_user_fns __initdata = {
.cpu_clear_user_highpage = fa_clear_user_highpage,
.cpu_copy_user_highpage = fa_copy_user_highpage,
};
| linux-master | arch/arm/mm/copypage-fa.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* arch/arm/mm/cache-tauros2.c - Tauros2 L2 cache controller support
*
* Copyright (C) 2008 Marvell Semiconductor
*
* References:
* - PJ1 CPU Core Datasheet,
* Document ID MV-S104837-01, Rev 0.7, January 24 2008.
* - PJ4 CPU Core Datasheet,
* Document ID MV-S105190-00, Rev 0.7, March 14 2008.
*/
#include <linux/init.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <asm/cacheflush.h>
#include <asm/cp15.h>
#include <asm/cputype.h>
#include <asm/hardware/cache-tauros2.h>
/* CP15 PJ4 Control configuration register */
#define CCR_L2C_PREFETCH_DISABLE BIT(24)
#define CCR_L2C_ECC_ENABLE BIT(23)
#define CCR_L2C_WAY7_4_DISABLE BIT(21)
#define CCR_L2C_BURST8_ENABLE BIT(20)
/*
* When Tauros2 is used on a CPU that supports the v7 hierarchical
* cache operations, the cache handling code in proc-v7.S takes care
* of everything, including handling DMA coherency.
*
* So, we only need to register outer cache operations here if we're
* being used on a pre-v7 CPU, and we only need to build support for
* outer cache operations into the kernel image if the kernel has been
* configured to support a pre-v7 CPU.
*/
#ifdef CONFIG_CPU_32v5
/*
* Low-level cache maintenance operations.
*/
static inline void tauros2_clean_pa(unsigned long addr)
{
__asm__("mcr p15, 1, %0, c7, c11, 3" : : "r" (addr));
}
static inline void tauros2_clean_inv_pa(unsigned long addr)
{
__asm__("mcr p15, 1, %0, c7, c15, 3" : : "r" (addr));
}
static inline void tauros2_inv_pa(unsigned long addr)
{
__asm__("mcr p15, 1, %0, c7, c7, 3" : : "r" (addr));
}
/*
* Linux primitives.
*
* Note that the end addresses passed to Linux primitives are
* noninclusive.
*/
#define CACHE_LINE_SIZE 32
static void tauros2_inv_range(unsigned long start, unsigned long end)
{
/*
* Clean and invalidate partial first cache line.
*/
if (start & (CACHE_LINE_SIZE - 1)) {
tauros2_clean_inv_pa(start & ~(CACHE_LINE_SIZE - 1));
start = (start | (CACHE_LINE_SIZE - 1)) + 1;
}
/*
* Clean and invalidate partial last cache line.
*/
if (end & (CACHE_LINE_SIZE - 1)) {
tauros2_clean_inv_pa(end & ~(CACHE_LINE_SIZE - 1));
end &= ~(CACHE_LINE_SIZE - 1);
}
/*
* Invalidate all full cache lines between 'start' and 'end'.
*/
while (start < end) {
tauros2_inv_pa(start);
start += CACHE_LINE_SIZE;
}
dsb();
}
static void tauros2_clean_range(unsigned long start, unsigned long end)
{
start &= ~(CACHE_LINE_SIZE - 1);
while (start < end) {
tauros2_clean_pa(start);
start += CACHE_LINE_SIZE;
}
dsb();
}
static void tauros2_flush_range(unsigned long start, unsigned long end)
{
start &= ~(CACHE_LINE_SIZE - 1);
while (start < end) {
tauros2_clean_inv_pa(start);
start += CACHE_LINE_SIZE;
}
dsb();
}
static void tauros2_disable(void)
{
__asm__ __volatile__ (
"mcr p15, 1, %0, c7, c11, 0 @L2 Cache Clean All\n\t"
"mrc p15, 0, %0, c1, c0, 0\n\t"
"bic %0, %0, #(1 << 26)\n\t"
"mcr p15, 0, %0, c1, c0, 0 @Disable L2 Cache\n\t"
: : "r" (0x0));
}
static void tauros2_resume(void)
{
__asm__ __volatile__ (
"mcr p15, 1, %0, c7, c7, 0 @L2 Cache Invalidate All\n\t"
"mrc p15, 0, %0, c1, c0, 0\n\t"
"orr %0, %0, #(1 << 26)\n\t"
"mcr p15, 0, %0, c1, c0, 0 @Enable L2 Cache\n\t"
: : "r" (0x0));
}
#endif
static inline u32 __init read_extra_features(void)
{
u32 u;
__asm__("mrc p15, 1, %0, c15, c1, 0" : "=r" (u));
return u;
}
static inline void __init write_extra_features(u32 u)
{
__asm__("mcr p15, 1, %0, c15, c1, 0" : : "r" (u));
}
static inline int __init cpuid_scheme(void)
{
return !!((processor_id & 0x000f0000) == 0x000f0000);
}
static inline u32 __init read_mmfr3(void)
{
u32 mmfr3;
__asm__("mrc p15, 0, %0, c0, c1, 7\n" : "=r" (mmfr3));
return mmfr3;
}
static inline u32 __init read_actlr(void)
{
u32 actlr;
__asm__("mrc p15, 0, %0, c1, c0, 1\n" : "=r" (actlr));
return actlr;
}
static inline void __init write_actlr(u32 actlr)
{
__asm__("mcr p15, 0, %0, c1, c0, 1\n" : : "r" (actlr));
}
static void enable_extra_feature(unsigned int features)
{
u32 u;
u = read_extra_features();
if (features & CACHE_TAUROS2_PREFETCH_ON)
u &= ~CCR_L2C_PREFETCH_DISABLE;
else
u |= CCR_L2C_PREFETCH_DISABLE;
pr_info("Tauros2: %s L2 prefetch.\n",
(features & CACHE_TAUROS2_PREFETCH_ON)
? "Enabling" : "Disabling");
if (features & CACHE_TAUROS2_LINEFILL_BURST8)
u |= CCR_L2C_BURST8_ENABLE;
else
u &= ~CCR_L2C_BURST8_ENABLE;
pr_info("Tauros2: %s burst8 line fill.\n",
(features & CACHE_TAUROS2_LINEFILL_BURST8)
? "Enabling" : "Disabling");
write_extra_features(u);
}
static void __init tauros2_internal_init(unsigned int features)
{
char *mode = NULL;
enable_extra_feature(features);
#ifdef CONFIG_CPU_32v5
if ((processor_id & 0xff0f0000) == 0x56050000) {
u32 feat;
/*
* v5 CPUs with Tauros2 have the L2 cache enable bit
* located in the CPU Extra Features register.
*/
feat = read_extra_features();
if (!(feat & 0x00400000)) {
pr_info("Tauros2: Enabling L2 cache.\n");
write_extra_features(feat | 0x00400000);
}
mode = "ARMv5";
outer_cache.inv_range = tauros2_inv_range;
outer_cache.clean_range = tauros2_clean_range;
outer_cache.flush_range = tauros2_flush_range;
outer_cache.disable = tauros2_disable;
outer_cache.resume = tauros2_resume;
}
#endif
#ifdef CONFIG_CPU_32v7
/*
* Check whether this CPU has support for the v7 hierarchical
* cache ops. (PJ4 is in its v7 personality mode if the MMFR3
* register indicates support for the v7 hierarchical cache
* ops.)
*
* (Although strictly speaking there may exist CPUs that
* implement the v7 cache ops but are only ARMv6 CPUs (due to
* not complying with all of the other ARMv7 requirements),
* there are no real-life examples of Tauros2 being used on
* such CPUs as of yet.)
*/
if (cpuid_scheme() && (read_mmfr3() & 0xf) == 1) {
u32 actlr;
/*
* When Tauros2 is used in an ARMv7 system, the L2
* enable bit is located in the Auxiliary System Control
* Register (which is the only register allowed by the
* ARMv7 spec to contain fine-grained cache control bits).
*/
actlr = read_actlr();
if (!(actlr & 0x00000002)) {
pr_info("Tauros2: Enabling L2 cache.\n");
write_actlr(actlr | 0x00000002);
}
mode = "ARMv7";
}
#endif
if (mode == NULL) {
pr_crit("Tauros2: Unable to detect CPU mode.\n");
return;
}
pr_info("Tauros2: L2 cache support initialised "
"in %s mode.\n", mode);
}
#ifdef CONFIG_OF
static const struct of_device_id tauros2_ids[] __initconst = {
{ .compatible = "marvell,tauros2-cache"},
{}
};
#endif
void __init tauros2_init(unsigned int features)
{
#ifdef CONFIG_OF
struct device_node *node;
int ret;
unsigned int f;
node = of_find_matching_node(NULL, tauros2_ids);
if (!node) {
pr_info("Not found marvell,tauros2-cache, disable it\n");
} else {
ret = of_property_read_u32(node, "marvell,tauros2-cache-features", &f);
if (ret) {
pr_info("Not found marvell,tauros-cache-features property, "
"disable extra features\n");
features = 0;
} else
features = f;
}
#endif
tauros2_internal_init(features);
}
| linux-master | arch/arm/mm/cache-tauros2.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/arch/arm/mm/context.c
*
* Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved.
* Copyright (C) 2012 ARM Limited
*
* Author: Will Deacon <[email protected]>
*/
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/percpu.h>
#include <asm/mmu_context.h>
#include <asm/smp_plat.h>
#include <asm/thread_notify.h>
#include <asm/tlbflush.h>
#include <asm/proc-fns.h>
/*
* On ARMv6, we have the following structure in the Context ID:
*
* 31 7 0
* +-------------------------+-----------+
* | process ID | ASID |
* +-------------------------+-----------+
* | context ID |
* +-------------------------------------+
*
* The ASID is used to tag entries in the CPU caches and TLBs.
* The context ID is used by debuggers and trace logic, and
* should be unique within all running processes.
*
* In big endian operation, the two 32 bit words are swapped if accessed
* by non-64-bit operations.
*/
#define ASID_FIRST_VERSION (1ULL << ASID_BITS)
#define NUM_USER_ASIDS ASID_FIRST_VERSION
static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
static DEFINE_PER_CPU(atomic64_t, active_asids);
static DEFINE_PER_CPU(u64, reserved_asids);
static cpumask_t tlb_flush_pending;
#ifdef CONFIG_ARM_ERRATA_798181
void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
cpumask_t *mask)
{
int cpu;
unsigned long flags;
u64 context_id, asid;
raw_spin_lock_irqsave(&cpu_asid_lock, flags);
context_id = mm->context.id.counter;
for_each_online_cpu(cpu) {
if (cpu == this_cpu)
continue;
/*
* We only need to send an IPI if the other CPUs are
* running the same ASID as the one being invalidated.
*/
asid = per_cpu(active_asids, cpu).counter;
if (asid == 0)
asid = per_cpu(reserved_asids, cpu);
if (context_id == asid)
cpumask_set_cpu(cpu, mask);
}
raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
}
#endif
#ifdef CONFIG_ARM_LPAE
/*
* With LPAE, the ASID and page tables are updated atomicly, so there is
* no need for a reserved set of tables (the active ASID tracking prevents
* any issues across a rollover).
*/
#define cpu_set_reserved_ttbr0()
#else
static void cpu_set_reserved_ttbr0(void)
{
u32 ttb;
/*
* Copy TTBR1 into TTBR0.
* This points at swapper_pg_dir, which contains only global
* entries so any speculative walks are perfectly safe.
*/
asm volatile(
" mrc p15, 0, %0, c2, c0, 1 @ read TTBR1\n"
" mcr p15, 0, %0, c2, c0, 0 @ set TTBR0\n"
: "=r" (ttb));
isb();
}
#endif
#ifdef CONFIG_PID_IN_CONTEXTIDR
static int contextidr_notifier(struct notifier_block *unused, unsigned long cmd,
void *t)
{
u32 contextidr;
pid_t pid;
struct thread_info *thread = t;
if (cmd != THREAD_NOTIFY_SWITCH)
return NOTIFY_DONE;
pid = task_pid_nr(thread_task(thread)) << ASID_BITS;
asm volatile(
" mrc p15, 0, %0, c13, c0, 1\n"
" and %0, %0, %2\n"
" orr %0, %0, %1\n"
" mcr p15, 0, %0, c13, c0, 1\n"
: "=r" (contextidr), "+r" (pid)
: "I" (~ASID_MASK));
isb();
return NOTIFY_OK;
}
static struct notifier_block contextidr_notifier_block = {
.notifier_call = contextidr_notifier,
};
static int __init contextidr_notifier_init(void)
{
return thread_register_notifier(&contextidr_notifier_block);
}
arch_initcall(contextidr_notifier_init);
#endif
static void flush_context(unsigned int cpu)
{
int i;
u64 asid;
/* Update the list of reserved ASIDs and the ASID bitmap. */
bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
for_each_possible_cpu(i) {
asid = atomic64_xchg(&per_cpu(active_asids, i), 0);
/*
* If this CPU has already been through a
* rollover, but hasn't run another task in
* the meantime, we must preserve its reserved
* ASID, as this is the only trace we have of
* the process it is still running.
*/
if (asid == 0)
asid = per_cpu(reserved_asids, i);
__set_bit(asid & ~ASID_MASK, asid_map);
per_cpu(reserved_asids, i) = asid;
}
/* Queue a TLB invalidate and flush the I-cache if necessary. */
cpumask_setall(&tlb_flush_pending);
if (icache_is_vivt_asid_tagged())
__flush_icache_all();
}
static bool check_update_reserved_asid(u64 asid, u64 newasid)
{
int cpu;
bool hit = false;
/*
* Iterate over the set of reserved ASIDs looking for a match.
* If we find one, then we can update our mm to use newasid
* (i.e. the same ASID in the current generation) but we can't
* exit the loop early, since we need to ensure that all copies
* of the old ASID are updated to reflect the mm. Failure to do
* so could result in us missing the reserved ASID in a future
* generation.
*/
for_each_possible_cpu(cpu) {
if (per_cpu(reserved_asids, cpu) == asid) {
hit = true;
per_cpu(reserved_asids, cpu) = newasid;
}
}
return hit;
}
static u64 new_context(struct mm_struct *mm, unsigned int cpu)
{
static u32 cur_idx = 1;
u64 asid = atomic64_read(&mm->context.id);
u64 generation = atomic64_read(&asid_generation);
if (asid != 0) {
u64 newasid = generation | (asid & ~ASID_MASK);
/*
* If our current ASID was active during a rollover, we
* can continue to use it and this was just a false alarm.
*/
if (check_update_reserved_asid(asid, newasid))
return newasid;
/*
* We had a valid ASID in a previous life, so try to re-use
* it if possible.,
*/
asid &= ~ASID_MASK;
if (!__test_and_set_bit(asid, asid_map))
return newasid;
}
/*
* Allocate a free ASID. If we can't find one, take a note of the
* currently active ASIDs and mark the TLBs as requiring flushes.
* We always count from ASID #1, as we reserve ASID #0 to switch
* via TTBR0 and to avoid speculative page table walks from hitting
* in any partial walk caches, which could be populated from
* overlapping level-1 descriptors used to map both the module
* area and the userspace stack.
*/
asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
if (asid == NUM_USER_ASIDS) {
generation = atomic64_add_return(ASID_FIRST_VERSION,
&asid_generation);
flush_context(cpu);
asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
}
__set_bit(asid, asid_map);
cur_idx = asid;
cpumask_clear(mm_cpumask(mm));
return asid | generation;
}
void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
{
unsigned long flags;
unsigned int cpu = smp_processor_id();
u64 asid;
check_vmalloc_seq(mm);
/*
* We cannot update the pgd and the ASID atomicly with classic
* MMU, so switch exclusively to global mappings to avoid
* speculative page table walking with the wrong TTBR.
*/
cpu_set_reserved_ttbr0();
asid = atomic64_read(&mm->context.id);
if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
&& atomic64_xchg(&per_cpu(active_asids, cpu), asid))
goto switch_mm_fastpath;
raw_spin_lock_irqsave(&cpu_asid_lock, flags);
/* Check that our ASID belongs to the current generation. */
asid = atomic64_read(&mm->context.id);
if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
asid = new_context(mm, cpu);
atomic64_set(&mm->context.id, asid);
}
if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) {
local_flush_bp_all();
local_flush_tlb_all();
}
atomic64_set(&per_cpu(active_asids, cpu), asid);
cpumask_set_cpu(cpu, mm_cpumask(mm));
raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
switch_mm_fastpath:
cpu_switch_mm(mm->pgd, mm);
}
| linux-master | arch/arm/mm/context.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* arch/arm/mm/cache-l2x0.c - L210/L220/L310 cache controller support
*
* Copyright (C) 2007 ARM Limited
*/
#include <linux/cpu.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/log2.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <asm/cacheflush.h>
#include <asm/cp15.h>
#include <asm/cputype.h>
#include <asm/hardware/cache-l2x0.h>
#include <asm/hardware/cache-aurora-l2.h>
#include "cache-tauros3.h"
struct l2c_init_data {
const char *type;
unsigned way_size_0;
unsigned num_lock;
void (*of_parse)(const struct device_node *, u32 *, u32 *);
void (*enable)(void __iomem *, unsigned);
void (*fixup)(void __iomem *, u32, struct outer_cache_fns *);
void (*save)(void __iomem *);
void (*configure)(void __iomem *);
void (*unlock)(void __iomem *, unsigned);
struct outer_cache_fns outer_cache;
};
#define CACHE_LINE_SIZE 32
static void __iomem *l2x0_base;
static const struct l2c_init_data *l2x0_data;
static DEFINE_RAW_SPINLOCK(l2x0_lock);
static u32 l2x0_way_mask; /* Bitmask of active ways */
static u32 l2x0_size;
static unsigned long sync_reg_offset = L2X0_CACHE_SYNC;
struct l2x0_regs l2x0_saved_regs;
static bool l2x0_bresp_disable;
static bool l2x0_flz_disable;
/*
* Common code for all cache controllers.
*/
static inline void l2c_wait_mask(void __iomem *reg, unsigned long mask)
{
/* wait for cache operation by line or way to complete */
while (readl_relaxed(reg) & mask)
cpu_relax();
}
/*
* By default, we write directly to secure registers. Platforms must
* override this if they are running non-secure.
*/
static void l2c_write_sec(unsigned long val, void __iomem *base, unsigned reg)
{
if (val == readl_relaxed(base + reg))
return;
if (outer_cache.write_sec)
outer_cache.write_sec(val, reg);
else
writel_relaxed(val, base + reg);
}
/*
* This should only be called when we have a requirement that the
* register be written due to a work-around, as platforms running
* in non-secure mode may not be able to access this register.
*/
static inline void l2c_set_debug(void __iomem *base, unsigned long val)
{
l2c_write_sec(val, base, L2X0_DEBUG_CTRL);
}
static void __l2c_op_way(void __iomem *reg)
{
writel_relaxed(l2x0_way_mask, reg);
l2c_wait_mask(reg, l2x0_way_mask);
}
static inline void l2c_unlock(void __iomem *base, unsigned num)
{
unsigned i;
for (i = 0; i < num; i++) {
writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_D_BASE +
i * L2X0_LOCKDOWN_STRIDE);
writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_I_BASE +
i * L2X0_LOCKDOWN_STRIDE);
}
}
static void l2c_configure(void __iomem *base)
{
l2c_write_sec(l2x0_saved_regs.aux_ctrl, base, L2X0_AUX_CTRL);
}
/*
* Enable the L2 cache controller. This function must only be
* called when the cache controller is known to be disabled.
*/
static void l2c_enable(void __iomem *base, unsigned num_lock)
{
unsigned long flags;
if (outer_cache.configure)
outer_cache.configure(&l2x0_saved_regs);
else
l2x0_data->configure(base);
l2x0_data->unlock(base, num_lock);
local_irq_save(flags);
__l2c_op_way(base + L2X0_INV_WAY);
writel_relaxed(0, base + sync_reg_offset);
l2c_wait_mask(base + sync_reg_offset, 1);
local_irq_restore(flags);
l2c_write_sec(L2X0_CTRL_EN, base, L2X0_CTRL);
}
static void l2c_disable(void)
{
void __iomem *base = l2x0_base;
l2x0_pmu_suspend();
outer_cache.flush_all();
l2c_write_sec(0, base, L2X0_CTRL);
dsb(st);
}
static void l2c_save(void __iomem *base)
{
l2x0_saved_regs.aux_ctrl = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
}
static void l2c_resume(void)
{
void __iomem *base = l2x0_base;
/* Do not touch the controller if already enabled. */
if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN))
l2c_enable(base, l2x0_data->num_lock);
l2x0_pmu_resume();
}
/*
* L2C-210 specific code.
*
* The L2C-2x0 PA, set/way and sync operations are atomic, but we must
* ensure that no background operation is running. The way operations
* are all background tasks.
*
* While a background operation is in progress, any new operation is
* ignored (unspecified whether this causes an error.) Thankfully, not
* used on SMP.
*
* Never has a different sync register other than L2X0_CACHE_SYNC, but
* we use sync_reg_offset here so we can share some of this with L2C-310.
*/
static void __l2c210_cache_sync(void __iomem *base)
{
writel_relaxed(0, base + sync_reg_offset);
}
static void __l2c210_op_pa_range(void __iomem *reg, unsigned long start,
unsigned long end)
{
while (start < end) {
writel_relaxed(start, reg);
start += CACHE_LINE_SIZE;
}
}
static void l2c210_inv_range(unsigned long start, unsigned long end)
{
void __iomem *base = l2x0_base;
if (start & (CACHE_LINE_SIZE - 1)) {
start &= ~(CACHE_LINE_SIZE - 1);
writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA);
start += CACHE_LINE_SIZE;
}
if (end & (CACHE_LINE_SIZE - 1)) {
end &= ~(CACHE_LINE_SIZE - 1);
writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA);
}
__l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end);
__l2c210_cache_sync(base);
}
static void l2c210_clean_range(unsigned long start, unsigned long end)
{
void __iomem *base = l2x0_base;
start &= ~(CACHE_LINE_SIZE - 1);
__l2c210_op_pa_range(base + L2X0_CLEAN_LINE_PA, start, end);
__l2c210_cache_sync(base);
}
static void l2c210_flush_range(unsigned long start, unsigned long end)
{
void __iomem *base = l2x0_base;
start &= ~(CACHE_LINE_SIZE - 1);
__l2c210_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA, start, end);
__l2c210_cache_sync(base);
}
static void l2c210_flush_all(void)
{
void __iomem *base = l2x0_base;
BUG_ON(!irqs_disabled());
__l2c_op_way(base + L2X0_CLEAN_INV_WAY);
__l2c210_cache_sync(base);
}
static void l2c210_sync(void)
{
__l2c210_cache_sync(l2x0_base);
}
static const struct l2c_init_data l2c210_data __initconst = {
.type = "L2C-210",
.way_size_0 = SZ_8K,
.num_lock = 1,
.enable = l2c_enable,
.save = l2c_save,
.configure = l2c_configure,
.unlock = l2c_unlock,
.outer_cache = {
.inv_range = l2c210_inv_range,
.clean_range = l2c210_clean_range,
.flush_range = l2c210_flush_range,
.flush_all = l2c210_flush_all,
.disable = l2c_disable,
.sync = l2c210_sync,
.resume = l2c_resume,
},
};
/*
* L2C-220 specific code.
*
* All operations are background operations: they have to be waited for.
* Conflicting requests generate a slave error (which will cause an
* imprecise abort.) Never uses sync_reg_offset, so we hard-code the
* sync register here.
*
* However, we can re-use the l2c210_resume call.
*/
static inline void __l2c220_cache_sync(void __iomem *base)
{
writel_relaxed(0, base + L2X0_CACHE_SYNC);
l2c_wait_mask(base + L2X0_CACHE_SYNC, 1);
}
static void l2c220_op_way(void __iomem *base, unsigned reg)
{
unsigned long flags;
raw_spin_lock_irqsave(&l2x0_lock, flags);
__l2c_op_way(base + reg);
__l2c220_cache_sync(base);
raw_spin_unlock_irqrestore(&l2x0_lock, flags);
}
static unsigned long l2c220_op_pa_range(void __iomem *reg, unsigned long start,
unsigned long end, unsigned long flags)
{
raw_spinlock_t *lock = &l2x0_lock;
while (start < end) {
unsigned long blk_end = start + min(end - start, 4096UL);
while (start < blk_end) {
l2c_wait_mask(reg, 1);
writel_relaxed(start, reg);
start += CACHE_LINE_SIZE;
}
if (blk_end < end) {
raw_spin_unlock_irqrestore(lock, flags);
raw_spin_lock_irqsave(lock, flags);
}
}
return flags;
}
static void l2c220_inv_range(unsigned long start, unsigned long end)
{
void __iomem *base = l2x0_base;
unsigned long flags;
raw_spin_lock_irqsave(&l2x0_lock, flags);
if ((start | end) & (CACHE_LINE_SIZE - 1)) {
if (start & (CACHE_LINE_SIZE - 1)) {
start &= ~(CACHE_LINE_SIZE - 1);
writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA);
start += CACHE_LINE_SIZE;
}
if (end & (CACHE_LINE_SIZE - 1)) {
end &= ~(CACHE_LINE_SIZE - 1);
l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA);
}
}
flags = l2c220_op_pa_range(base + L2X0_INV_LINE_PA,
start, end, flags);
l2c_wait_mask(base + L2X0_INV_LINE_PA, 1);
__l2c220_cache_sync(base);
raw_spin_unlock_irqrestore(&l2x0_lock, flags);
}
static void l2c220_clean_range(unsigned long start, unsigned long end)
{
void __iomem *base = l2x0_base;
unsigned long flags;
start &= ~(CACHE_LINE_SIZE - 1);
if ((end - start) >= l2x0_size) {
l2c220_op_way(base, L2X0_CLEAN_WAY);
return;
}
raw_spin_lock_irqsave(&l2x0_lock, flags);
flags = l2c220_op_pa_range(base + L2X0_CLEAN_LINE_PA,
start, end, flags);
l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
__l2c220_cache_sync(base);
raw_spin_unlock_irqrestore(&l2x0_lock, flags);
}
static void l2c220_flush_range(unsigned long start, unsigned long end)
{
void __iomem *base = l2x0_base;
unsigned long flags;
start &= ~(CACHE_LINE_SIZE - 1);
if ((end - start) >= l2x0_size) {
l2c220_op_way(base, L2X0_CLEAN_INV_WAY);
return;
}
raw_spin_lock_irqsave(&l2x0_lock, flags);
flags = l2c220_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA,
start, end, flags);
l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
__l2c220_cache_sync(base);
raw_spin_unlock_irqrestore(&l2x0_lock, flags);
}
static void l2c220_flush_all(void)
{
l2c220_op_way(l2x0_base, L2X0_CLEAN_INV_WAY);
}
static void l2c220_sync(void)
{
unsigned long flags;
raw_spin_lock_irqsave(&l2x0_lock, flags);
__l2c220_cache_sync(l2x0_base);
raw_spin_unlock_irqrestore(&l2x0_lock, flags);
}
static void l2c220_enable(void __iomem *base, unsigned num_lock)
{
/*
* Always enable non-secure access to the lockdown registers -
* we write to them as part of the L2C enable sequence so they
* need to be accessible.
*/
l2x0_saved_regs.aux_ctrl |= L220_AUX_CTRL_NS_LOCKDOWN;
l2c_enable(base, num_lock);
}
static void l2c220_unlock(void __iomem *base, unsigned num_lock)
{
if (readl_relaxed(base + L2X0_AUX_CTRL) & L220_AUX_CTRL_NS_LOCKDOWN)
l2c_unlock(base, num_lock);
}
static const struct l2c_init_data l2c220_data = {
.type = "L2C-220",
.way_size_0 = SZ_8K,
.num_lock = 1,
.enable = l2c220_enable,
.save = l2c_save,
.configure = l2c_configure,
.unlock = l2c220_unlock,
.outer_cache = {
.inv_range = l2c220_inv_range,
.clean_range = l2c220_clean_range,
.flush_range = l2c220_flush_range,
.flush_all = l2c220_flush_all,
.disable = l2c_disable,
.sync = l2c220_sync,
.resume = l2c_resume,
},
};
/*
* L2C-310 specific code.
*
* Very similar to L2C-210, the PA, set/way and sync operations are atomic,
* and the way operations are all background tasks. However, issuing an
* operation while a background operation is in progress results in a
* SLVERR response. We can reuse:
*
* __l2c210_cache_sync (using sync_reg_offset)
* l2c210_sync
* l2c210_inv_range (if 588369 is not applicable)
* l2c210_clean_range
* l2c210_flush_range (if 588369 is not applicable)
* l2c210_flush_all (if 727915 is not applicable)
*
* Errata:
* 588369: PL310 R0P0->R1P0, fixed R2P0.
* Affects: all clean+invalidate operations
* clean and invalidate skips the invalidate step, so we need to issue
* separate operations. We also require the above debug workaround
* enclosing this code fragment on affected parts. On unaffected parts,
* we must not use this workaround without the debug register writes
* to avoid exposing a problem similar to 727915.
*
* 727915: PL310 R2P0->R3P0, fixed R3P1.
* Affects: clean+invalidate by way
* clean and invalidate by way runs in the background, and a store can
* hit the line between the clean operation and invalidate operation,
* resulting in the store being lost.
*
* 752271: PL310 R3P0->R3P1-50REL0, fixed R3P2.
* Affects: 8x64-bit (double fill) line fetches
* double fill line fetches can fail to cause dirty data to be evicted
* from the cache before the new data overwrites the second line.
*
* 753970: PL310 R3P0, fixed R3P1.
* Affects: sync
* prevents merging writes after the sync operation, until another L2C
* operation is performed (or a number of other conditions.)
*
* 769419: PL310 R0P0->R3P1, fixed R3P2.
* Affects: store buffer
* store buffer is not automatically drained.
*/
static void l2c310_inv_range_erratum(unsigned long start, unsigned long end)
{
void __iomem *base = l2x0_base;
if ((start | end) & (CACHE_LINE_SIZE - 1)) {
unsigned long flags;
/* Erratum 588369 for both clean+invalidate operations */
raw_spin_lock_irqsave(&l2x0_lock, flags);
l2c_set_debug(base, 0x03);
if (start & (CACHE_LINE_SIZE - 1)) {
start &= ~(CACHE_LINE_SIZE - 1);
writel_relaxed(start, base + L2X0_CLEAN_LINE_PA);
writel_relaxed(start, base + L2X0_INV_LINE_PA);
start += CACHE_LINE_SIZE;
}
if (end & (CACHE_LINE_SIZE - 1)) {
end &= ~(CACHE_LINE_SIZE - 1);
writel_relaxed(end, base + L2X0_CLEAN_LINE_PA);
writel_relaxed(end, base + L2X0_INV_LINE_PA);
}
l2c_set_debug(base, 0x00);
raw_spin_unlock_irqrestore(&l2x0_lock, flags);
}
__l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end);
__l2c210_cache_sync(base);
}
static void l2c310_flush_range_erratum(unsigned long start, unsigned long end)
{
raw_spinlock_t *lock = &l2x0_lock;
unsigned long flags;
void __iomem *base = l2x0_base;
raw_spin_lock_irqsave(lock, flags);
while (start < end) {
unsigned long blk_end = start + min(end - start, 4096UL);
l2c_set_debug(base, 0x03);
while (start < blk_end) {
writel_relaxed(start, base + L2X0_CLEAN_LINE_PA);
writel_relaxed(start, base + L2X0_INV_LINE_PA);
start += CACHE_LINE_SIZE;
}
l2c_set_debug(base, 0x00);
if (blk_end < end) {
raw_spin_unlock_irqrestore(lock, flags);
raw_spin_lock_irqsave(lock, flags);
}
}
raw_spin_unlock_irqrestore(lock, flags);
__l2c210_cache_sync(base);
}
static void l2c310_flush_all_erratum(void)
{
void __iomem *base = l2x0_base;
unsigned long flags;
raw_spin_lock_irqsave(&l2x0_lock, flags);
l2c_set_debug(base, 0x03);
__l2c_op_way(base + L2X0_CLEAN_INV_WAY);
l2c_set_debug(base, 0x00);
__l2c210_cache_sync(base);
raw_spin_unlock_irqrestore(&l2x0_lock, flags);
}
static void __init l2c310_save(void __iomem *base)
{
unsigned revision;
l2c_save(base);
l2x0_saved_regs.tag_latency = readl_relaxed(base +
L310_TAG_LATENCY_CTRL);
l2x0_saved_regs.data_latency = readl_relaxed(base +
L310_DATA_LATENCY_CTRL);
l2x0_saved_regs.filter_end = readl_relaxed(base +
L310_ADDR_FILTER_END);
l2x0_saved_regs.filter_start = readl_relaxed(base +
L310_ADDR_FILTER_START);
revision = readl_relaxed(base + L2X0_CACHE_ID) &
L2X0_CACHE_ID_RTL_MASK;
/* From r2p0, there is Prefetch offset/control register */
if (revision >= L310_CACHE_ID_RTL_R2P0)
l2x0_saved_regs.prefetch_ctrl = readl_relaxed(base +
L310_PREFETCH_CTRL);
/* From r3p0, there is Power control register */
if (revision >= L310_CACHE_ID_RTL_R3P0)
l2x0_saved_regs.pwr_ctrl = readl_relaxed(base +
L310_POWER_CTRL);
}
static void l2c310_configure(void __iomem *base)
{
unsigned revision;
l2c_configure(base);
/* restore pl310 setup */
l2c_write_sec(l2x0_saved_regs.tag_latency, base,
L310_TAG_LATENCY_CTRL);
l2c_write_sec(l2x0_saved_regs.data_latency, base,
L310_DATA_LATENCY_CTRL);
l2c_write_sec(l2x0_saved_regs.filter_end, base,
L310_ADDR_FILTER_END);
l2c_write_sec(l2x0_saved_regs.filter_start, base,
L310_ADDR_FILTER_START);
revision = readl_relaxed(base + L2X0_CACHE_ID) &
L2X0_CACHE_ID_RTL_MASK;
if (revision >= L310_CACHE_ID_RTL_R2P0)
l2c_write_sec(l2x0_saved_regs.prefetch_ctrl, base,
L310_PREFETCH_CTRL);
if (revision >= L310_CACHE_ID_RTL_R3P0)
l2c_write_sec(l2x0_saved_regs.pwr_ctrl, base,
L310_POWER_CTRL);
}
static int l2c310_starting_cpu(unsigned int cpu)
{
set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1));
return 0;
}
static int l2c310_dying_cpu(unsigned int cpu)
{
set_auxcr(get_auxcr() & ~(BIT(3) | BIT(2) | BIT(1)));
return 0;
}
static void __init l2c310_enable(void __iomem *base, unsigned num_lock)
{
unsigned rev = readl_relaxed(base + L2X0_CACHE_ID) & L2X0_CACHE_ID_RTL_MASK;
bool cortex_a9 = read_cpuid_part() == ARM_CPU_PART_CORTEX_A9;
u32 aux = l2x0_saved_regs.aux_ctrl;
if (rev >= L310_CACHE_ID_RTL_R2P0) {
if (cortex_a9 && !l2x0_bresp_disable) {
aux |= L310_AUX_CTRL_EARLY_BRESP;
pr_info("L2C-310 enabling early BRESP for Cortex-A9\n");
} else if (aux & L310_AUX_CTRL_EARLY_BRESP) {
pr_warn("L2C-310 early BRESP only supported with Cortex-A9\n");
aux &= ~L310_AUX_CTRL_EARLY_BRESP;
}
}
if (cortex_a9 && !l2x0_flz_disable) {
u32 aux_cur = readl_relaxed(base + L2X0_AUX_CTRL);
u32 acr = get_auxcr();
pr_debug("Cortex-A9 ACR=0x%08x\n", acr);
if (acr & BIT(3) && !(aux_cur & L310_AUX_CTRL_FULL_LINE_ZERO))
pr_err("L2C-310: full line of zeros enabled in Cortex-A9 but not L2C-310 - invalid\n");
if (aux & L310_AUX_CTRL_FULL_LINE_ZERO && !(acr & BIT(3)))
pr_err("L2C-310: enabling full line of zeros but not enabled in Cortex-A9\n");
if (!(aux & L310_AUX_CTRL_FULL_LINE_ZERO) && !outer_cache.write_sec) {
aux |= L310_AUX_CTRL_FULL_LINE_ZERO;
pr_info("L2C-310 full line of zeros enabled for Cortex-A9\n");
}
} else if (aux & (L310_AUX_CTRL_FULL_LINE_ZERO | L310_AUX_CTRL_EARLY_BRESP)) {
pr_err("L2C-310: disabling Cortex-A9 specific feature bits\n");
aux &= ~(L310_AUX_CTRL_FULL_LINE_ZERO | L310_AUX_CTRL_EARLY_BRESP);
}
/*
* Always enable non-secure access to the lockdown registers -
* we write to them as part of the L2C enable sequence so they
* need to be accessible.
*/
l2x0_saved_regs.aux_ctrl = aux | L310_AUX_CTRL_NS_LOCKDOWN;
l2c_enable(base, num_lock);
/* Read back resulting AUX_CTRL value as it could have been altered. */
aux = readl_relaxed(base + L2X0_AUX_CTRL);
if (aux & (L310_AUX_CTRL_DATA_PREFETCH | L310_AUX_CTRL_INSTR_PREFETCH)) {
u32 prefetch = readl_relaxed(base + L310_PREFETCH_CTRL);
pr_info("L2C-310 %s%s prefetch enabled, offset %u lines\n",
aux & L310_AUX_CTRL_INSTR_PREFETCH ? "I" : "",
aux & L310_AUX_CTRL_DATA_PREFETCH ? "D" : "",
1 + (prefetch & L310_PREFETCH_CTRL_OFFSET_MASK));
}
/* r3p0 or later has power control register */
if (rev >= L310_CACHE_ID_RTL_R3P0) {
u32 power_ctrl;
power_ctrl = readl_relaxed(base + L310_POWER_CTRL);
pr_info("L2C-310 dynamic clock gating %sabled, standby mode %sabled\n",
power_ctrl & L310_DYNAMIC_CLK_GATING_EN ? "en" : "dis",
power_ctrl & L310_STNDBY_MODE_EN ? "en" : "dis");
}
if (aux & L310_AUX_CTRL_FULL_LINE_ZERO)
cpuhp_setup_state(CPUHP_AP_ARM_L2X0_STARTING,
"arm/l2x0:starting", l2c310_starting_cpu,
l2c310_dying_cpu);
}
static void __init l2c310_fixup(void __iomem *base, u32 cache_id,
struct outer_cache_fns *fns)
{
unsigned revision = cache_id & L2X0_CACHE_ID_RTL_MASK;
const char *errata[8];
unsigned n = 0;
if (IS_ENABLED(CONFIG_PL310_ERRATA_588369) &&
revision < L310_CACHE_ID_RTL_R2P0 &&
/* For bcm compatibility */
fns->inv_range == l2c210_inv_range) {
fns->inv_range = l2c310_inv_range_erratum;
fns->flush_range = l2c310_flush_range_erratum;
errata[n++] = "588369";
}
if (IS_ENABLED(CONFIG_PL310_ERRATA_727915) &&
revision >= L310_CACHE_ID_RTL_R2P0 &&
revision < L310_CACHE_ID_RTL_R3P1) {
fns->flush_all = l2c310_flush_all_erratum;
errata[n++] = "727915";
}
if (revision >= L310_CACHE_ID_RTL_R3P0 &&
revision < L310_CACHE_ID_RTL_R3P2) {
u32 val = l2x0_saved_regs.prefetch_ctrl;
if (val & L310_PREFETCH_CTRL_DBL_LINEFILL) {
val &= ~L310_PREFETCH_CTRL_DBL_LINEFILL;
l2x0_saved_regs.prefetch_ctrl = val;
errata[n++] = "752271";
}
}
if (IS_ENABLED(CONFIG_PL310_ERRATA_753970) &&
revision == L310_CACHE_ID_RTL_R3P0) {
sync_reg_offset = L2X0_DUMMY_REG;
errata[n++] = "753970";
}
if (IS_ENABLED(CONFIG_PL310_ERRATA_769419))
errata[n++] = "769419";
if (n) {
unsigned i;
pr_info("L2C-310 errat%s", n > 1 ? "a" : "um");
for (i = 0; i < n; i++)
pr_cont(" %s", errata[i]);
pr_cont(" enabled\n");
}
}
static void l2c310_disable(void)
{
/*
* If full-line-of-zeros is enabled, we must first disable it in the
* Cortex-A9 auxiliary control register before disabling the L2 cache.
*/
if (l2x0_saved_regs.aux_ctrl & L310_AUX_CTRL_FULL_LINE_ZERO)
set_auxcr(get_auxcr() & ~(BIT(3) | BIT(2) | BIT(1)));
l2c_disable();
}
static void l2c310_resume(void)
{
l2c_resume();
/* Re-enable full-line-of-zeros for Cortex-A9 */
if (l2x0_saved_regs.aux_ctrl & L310_AUX_CTRL_FULL_LINE_ZERO)
set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1));
}
static void l2c310_unlock(void __iomem *base, unsigned num_lock)
{
if (readl_relaxed(base + L2X0_AUX_CTRL) & L310_AUX_CTRL_NS_LOCKDOWN)
l2c_unlock(base, num_lock);
}
static const struct l2c_init_data l2c310_init_fns __initconst = {
.type = "L2C-310",
.way_size_0 = SZ_8K,
.num_lock = 8,
.enable = l2c310_enable,
.fixup = l2c310_fixup,
.save = l2c310_save,
.configure = l2c310_configure,
.unlock = l2c310_unlock,
.outer_cache = {
.inv_range = l2c210_inv_range,
.clean_range = l2c210_clean_range,
.flush_range = l2c210_flush_range,
.flush_all = l2c210_flush_all,
.disable = l2c310_disable,
.sync = l2c210_sync,
.resume = l2c310_resume,
},
};
static int __init __l2c_init(const struct l2c_init_data *data,
u32 aux_val, u32 aux_mask, u32 cache_id, bool nosync)
{
struct outer_cache_fns fns;
unsigned way_size_bits, ways;
u32 aux, old_aux;
/*
* Save the pointer globally so that callbacks which do not receive
* context from callers can access the structure.
*/
l2x0_data = kmemdup(data, sizeof(*data), GFP_KERNEL);
if (!l2x0_data)
return -ENOMEM;
/*
* Sanity check the aux values. aux_mask is the bits we preserve
* from reading the hardware register, and aux_val is the bits we
* set.
*/
if (aux_val & aux_mask)
pr_alert("L2C: platform provided aux values permit register corruption.\n");
old_aux = aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
aux &= aux_mask;
aux |= aux_val;
if (old_aux != aux)
pr_warn("L2C: DT/platform modifies aux control register: 0x%08x -> 0x%08x\n",
old_aux, aux);
/* Determine the number of ways */
switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
case L2X0_CACHE_ID_PART_L310:
if ((aux_val | ~aux_mask) & (L2C_AUX_CTRL_WAY_SIZE_MASK | L310_AUX_CTRL_ASSOCIATIVITY_16))
pr_warn("L2C: DT/platform tries to modify or specify cache size\n");
if (aux & (1 << 16))
ways = 16;
else
ways = 8;
break;
case L2X0_CACHE_ID_PART_L210:
case L2X0_CACHE_ID_PART_L220:
ways = (aux >> 13) & 0xf;
break;
case AURORA_CACHE_ID:
ways = (aux >> 13) & 0xf;
ways = 2 << ((ways + 1) >> 2);
break;
default:
/* Assume unknown chips have 8 ways */
ways = 8;
break;
}
l2x0_way_mask = (1 << ways) - 1;
/*
* way_size_0 is the size that a way_size value of zero would be
* given the calculation: way_size = way_size_0 << way_size_bits.
* So, if way_size_bits=0 is reserved, but way_size_bits=1 is 16k,
* then way_size_0 would be 8k.
*
* L2 cache size = number of ways * way size.
*/
way_size_bits = (aux & L2C_AUX_CTRL_WAY_SIZE_MASK) >>
L2C_AUX_CTRL_WAY_SIZE_SHIFT;
l2x0_size = ways * (data->way_size_0 << way_size_bits);
fns = data->outer_cache;
fns.write_sec = outer_cache.write_sec;
fns.configure = outer_cache.configure;
if (data->fixup)
data->fixup(l2x0_base, cache_id, &fns);
if (nosync) {
pr_info("L2C: disabling outer sync\n");
fns.sync = NULL;
}
/*
* Check if l2x0 controller is already enabled. If we are booting
* in non-secure mode accessing the below registers will fault.
*/
if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
l2x0_saved_regs.aux_ctrl = aux;
data->enable(l2x0_base, data->num_lock);
}
outer_cache = fns;
/*
* It is strange to save the register state before initialisation,
* but hey, this is what the DT implementations decided to do.
*/
if (data->save)
data->save(l2x0_base);
/* Re-read it in case some bits are reserved. */
aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
pr_info("%s cache controller enabled, %d ways, %d kB\n",
data->type, ways, l2x0_size >> 10);
pr_info("%s: CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n",
data->type, cache_id, aux);
l2x0_pmu_register(l2x0_base, cache_id);
return 0;
}
void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
{
const struct l2c_init_data *data;
u32 cache_id;
l2x0_base = base;
cache_id = readl_relaxed(base + L2X0_CACHE_ID);
switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
default:
case L2X0_CACHE_ID_PART_L210:
data = &l2c210_data;
break;
case L2X0_CACHE_ID_PART_L220:
data = &l2c220_data;
break;
case L2X0_CACHE_ID_PART_L310:
data = &l2c310_init_fns;
break;
}
/* Read back current (default) hardware configuration */
if (data->save)
data->save(l2x0_base);
__l2c_init(data, aux_val, aux_mask, cache_id, false);
}
#ifdef CONFIG_OF
static int l2_wt_override;
/* Aurora don't have the cache ID register available, so we have to
* pass it though the device tree */
static u32 cache_id_part_number_from_dt;
/**
* l2x0_cache_size_of_parse() - read cache size parameters from DT
* @np: the device tree node for the l2 cache
* @aux_val: pointer to machine-supplied auxilary register value, to
* be augmented by the call (bits to be set to 1)
* @aux_mask: pointer to machine-supplied auxilary register mask, to
* be augmented by the call (bits to be set to 0)
* @associativity: variable to return the calculated associativity in
* @max_way_size: the maximum size in bytes for the cache ways
*/
static int __init l2x0_cache_size_of_parse(const struct device_node *np,
u32 *aux_val, u32 *aux_mask,
u32 *associativity,
u32 max_way_size)
{
u32 mask = 0, val = 0;
u32 cache_size = 0, sets = 0;
u32 way_size_bits = 1;
u32 way_size = 0;
u32 block_size = 0;
u32 line_size = 0;
of_property_read_u32(np, "cache-size", &cache_size);
of_property_read_u32(np, "cache-sets", &sets);
of_property_read_u32(np, "cache-block-size", &block_size);
of_property_read_u32(np, "cache-line-size", &line_size);
if (!cache_size || !sets)
return -ENODEV;
/* All these l2 caches have the same line = block size actually */
if (!line_size) {
if (block_size) {
/* If linesize is not given, it is equal to blocksize */
line_size = block_size;
} else {
/* Fall back to known size */
pr_warn("L2C OF: no cache block/line size given: "
"falling back to default size %d bytes\n",
CACHE_LINE_SIZE);
line_size = CACHE_LINE_SIZE;
}
}
if (line_size != CACHE_LINE_SIZE)
pr_warn("L2C OF: DT supplied line size %d bytes does "
"not match hardware line size of %d bytes\n",
line_size,
CACHE_LINE_SIZE);
/*
* Since:
* set size = cache size / sets
* ways = cache size / (sets * line size)
* way size = cache size / (cache size / (sets * line size))
* way size = sets * line size
* associativity = ways = cache size / way size
*/
way_size = sets * line_size;
*associativity = cache_size / way_size;
if (way_size > max_way_size) {
pr_err("L2C OF: set size %dKB is too large\n", way_size);
return -EINVAL;
}
pr_info("L2C OF: override cache size: %d bytes (%dKB)\n",
cache_size, cache_size >> 10);
pr_info("L2C OF: override line size: %d bytes\n", line_size);
pr_info("L2C OF: override way size: %d bytes (%dKB)\n",
way_size, way_size >> 10);
pr_info("L2C OF: override associativity: %d\n", *associativity);
/*
* Calculates the bits 17:19 to set for way size:
* 512KB -> 6, 256KB -> 5, ... 16KB -> 1
*/
way_size_bits = ilog2(way_size >> 10) - 3;
if (way_size_bits < 1 || way_size_bits > 6) {
pr_err("L2C OF: cache way size illegal: %dKB is not mapped\n",
way_size);
return -EINVAL;
}
mask |= L2C_AUX_CTRL_WAY_SIZE_MASK;
val |= (way_size_bits << L2C_AUX_CTRL_WAY_SIZE_SHIFT);
*aux_val &= ~mask;
*aux_val |= val;
*aux_mask &= ~mask;
return 0;
}
static void __init l2x0_of_parse(const struct device_node *np,
u32 *aux_val, u32 *aux_mask)
{
u32 data[2] = { 0, 0 };
u32 tag = 0;
u32 dirty = 0;
u32 val = 0, mask = 0;
u32 assoc;
int ret;
of_property_read_u32(np, "arm,tag-latency", &tag);
if (tag) {
mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK;
val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT;
}
of_property_read_u32_array(np, "arm,data-latency",
data, ARRAY_SIZE(data));
if (data[0] && data[1]) {
mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK |
L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK;
val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) |
((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT);
}
of_property_read_u32(np, "arm,dirty-latency", &dirty);
if (dirty) {
mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK;
val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT;
}
if (of_property_read_bool(np, "arm,parity-enable")) {
mask &= ~L2C_AUX_CTRL_PARITY_ENABLE;
val |= L2C_AUX_CTRL_PARITY_ENABLE;
} else if (of_property_read_bool(np, "arm,parity-disable")) {
mask &= ~L2C_AUX_CTRL_PARITY_ENABLE;
}
if (of_property_read_bool(np, "arm,shared-override")) {
mask &= ~L2C_AUX_CTRL_SHARED_OVERRIDE;
val |= L2C_AUX_CTRL_SHARED_OVERRIDE;
}
ret = l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_256K);
if (ret)
return;
if (assoc > 8) {
pr_err("l2x0 of: cache setting yield too high associativity\n");
pr_err("l2x0 of: %d calculated, max 8\n", assoc);
} else {
mask |= L2X0_AUX_CTRL_ASSOC_MASK;
val |= (assoc << L2X0_AUX_CTRL_ASSOC_SHIFT);
}
*aux_val &= ~mask;
*aux_val |= val;
*aux_mask &= ~mask;
}
static const struct l2c_init_data of_l2c210_data __initconst = {
.type = "L2C-210",
.way_size_0 = SZ_8K,
.num_lock = 1,
.of_parse = l2x0_of_parse,
.enable = l2c_enable,
.save = l2c_save,
.configure = l2c_configure,
.unlock = l2c_unlock,
.outer_cache = {
.inv_range = l2c210_inv_range,
.clean_range = l2c210_clean_range,
.flush_range = l2c210_flush_range,
.flush_all = l2c210_flush_all,
.disable = l2c_disable,
.sync = l2c210_sync,
.resume = l2c_resume,
},
};
static const struct l2c_init_data of_l2c220_data __initconst = {
.type = "L2C-220",
.way_size_0 = SZ_8K,
.num_lock = 1,
.of_parse = l2x0_of_parse,
.enable = l2c220_enable,
.save = l2c_save,
.configure = l2c_configure,
.unlock = l2c220_unlock,
.outer_cache = {
.inv_range = l2c220_inv_range,
.clean_range = l2c220_clean_range,
.flush_range = l2c220_flush_range,
.flush_all = l2c220_flush_all,
.disable = l2c_disable,
.sync = l2c220_sync,
.resume = l2c_resume,
},
};
static void __init l2c310_of_parse(const struct device_node *np,
u32 *aux_val, u32 *aux_mask)
{
u32 data[3] = { 0, 0, 0 };
u32 tag[3] = { 0, 0, 0 };
u32 filter[2] = { 0, 0 };
u32 assoc;
u32 prefetch;
u32 power;
u32 val;
int ret;
of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag));
if (tag[0] && tag[1] && tag[2])
l2x0_saved_regs.tag_latency =
L310_LATENCY_CTRL_RD(tag[0] - 1) |
L310_LATENCY_CTRL_WR(tag[1] - 1) |
L310_LATENCY_CTRL_SETUP(tag[2] - 1);
of_property_read_u32_array(np, "arm,data-latency",
data, ARRAY_SIZE(data));
if (data[0] && data[1] && data[2])
l2x0_saved_regs.data_latency =
L310_LATENCY_CTRL_RD(data[0] - 1) |
L310_LATENCY_CTRL_WR(data[1] - 1) |
L310_LATENCY_CTRL_SETUP(data[2] - 1);
of_property_read_u32_array(np, "arm,filter-ranges",
filter, ARRAY_SIZE(filter));
if (filter[1]) {
l2x0_saved_regs.filter_end =
ALIGN(filter[0] + filter[1], SZ_1M);
l2x0_saved_regs.filter_start = (filter[0] & ~(SZ_1M - 1))
| L310_ADDR_FILTER_EN;
}
ret = l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_512K);
if (!ret) {
switch (assoc) {
case 16:
*aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK;
*aux_val |= L310_AUX_CTRL_ASSOCIATIVITY_16;
*aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK;
break;
case 8:
*aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK;
*aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK;
break;
default:
pr_err("L2C-310 OF cache associativity %d invalid, only 8 or 16 permitted\n",
assoc);
break;
}
}
if (of_property_read_bool(np, "arm,shared-override")) {
*aux_val |= L2C_AUX_CTRL_SHARED_OVERRIDE;
*aux_mask &= ~L2C_AUX_CTRL_SHARED_OVERRIDE;
}
if (of_property_read_bool(np, "arm,parity-enable")) {
*aux_val |= L2C_AUX_CTRL_PARITY_ENABLE;
*aux_mask &= ~L2C_AUX_CTRL_PARITY_ENABLE;
} else if (of_property_read_bool(np, "arm,parity-disable")) {
*aux_val &= ~L2C_AUX_CTRL_PARITY_ENABLE;
*aux_mask &= ~L2C_AUX_CTRL_PARITY_ENABLE;
}
if (of_property_read_bool(np, "arm,early-bresp-disable"))
l2x0_bresp_disable = true;
if (of_property_read_bool(np, "arm,full-line-zero-disable"))
l2x0_flz_disable = true;
prefetch = l2x0_saved_regs.prefetch_ctrl;
ret = of_property_read_u32(np, "arm,double-linefill", &val);
if (ret == 0) {
if (val)
prefetch |= L310_PREFETCH_CTRL_DBL_LINEFILL;
else
prefetch &= ~L310_PREFETCH_CTRL_DBL_LINEFILL;
} else if (ret != -EINVAL) {
pr_err("L2C-310 OF arm,double-linefill property value is missing\n");
}
ret = of_property_read_u32(np, "arm,double-linefill-incr", &val);
if (ret == 0) {
if (val)
prefetch |= L310_PREFETCH_CTRL_DBL_LINEFILL_INCR;
else
prefetch &= ~L310_PREFETCH_CTRL_DBL_LINEFILL_INCR;
} else if (ret != -EINVAL) {
pr_err("L2C-310 OF arm,double-linefill-incr property value is missing\n");
}
ret = of_property_read_u32(np, "arm,double-linefill-wrap", &val);
if (ret == 0) {
if (!val)
prefetch |= L310_PREFETCH_CTRL_DBL_LINEFILL_WRAP;
else
prefetch &= ~L310_PREFETCH_CTRL_DBL_LINEFILL_WRAP;
} else if (ret != -EINVAL) {
pr_err("L2C-310 OF arm,double-linefill-wrap property value is missing\n");
}
ret = of_property_read_u32(np, "arm,prefetch-drop", &val);
if (ret == 0) {
if (val)
prefetch |= L310_PREFETCH_CTRL_PREFETCH_DROP;
else
prefetch &= ~L310_PREFETCH_CTRL_PREFETCH_DROP;
} else if (ret != -EINVAL) {
pr_err("L2C-310 OF arm,prefetch-drop property value is missing\n");
}
ret = of_property_read_u32(np, "arm,prefetch-offset", &val);
if (ret == 0) {
prefetch &= ~L310_PREFETCH_CTRL_OFFSET_MASK;
prefetch |= val & L310_PREFETCH_CTRL_OFFSET_MASK;
} else if (ret != -EINVAL) {
pr_err("L2C-310 OF arm,prefetch-offset property value is missing\n");
}
ret = of_property_read_u32(np, "prefetch-data", &val);
if (ret == 0) {
if (val) {
prefetch |= L310_PREFETCH_CTRL_DATA_PREFETCH;
*aux_val |= L310_PREFETCH_CTRL_DATA_PREFETCH;
} else {
prefetch &= ~L310_PREFETCH_CTRL_DATA_PREFETCH;
*aux_val &= ~L310_PREFETCH_CTRL_DATA_PREFETCH;
}
*aux_mask &= ~L310_PREFETCH_CTRL_DATA_PREFETCH;
} else if (ret != -EINVAL) {
pr_err("L2C-310 OF prefetch-data property value is missing\n");
}
ret = of_property_read_u32(np, "prefetch-instr", &val);
if (ret == 0) {
if (val) {
prefetch |= L310_PREFETCH_CTRL_INSTR_PREFETCH;
*aux_val |= L310_PREFETCH_CTRL_INSTR_PREFETCH;
} else {
prefetch &= ~L310_PREFETCH_CTRL_INSTR_PREFETCH;
*aux_val &= ~L310_PREFETCH_CTRL_INSTR_PREFETCH;
}
*aux_mask &= ~L310_PREFETCH_CTRL_INSTR_PREFETCH;
} else if (ret != -EINVAL) {
pr_err("L2C-310 OF prefetch-instr property value is missing\n");
}
l2x0_saved_regs.prefetch_ctrl = prefetch;
power = l2x0_saved_regs.pwr_ctrl |
L310_DYNAMIC_CLK_GATING_EN | L310_STNDBY_MODE_EN;
ret = of_property_read_u32(np, "arm,dynamic-clock-gating", &val);
if (!ret) {
if (!val)
power &= ~L310_DYNAMIC_CLK_GATING_EN;
} else if (ret != -EINVAL) {
pr_err("L2C-310 OF dynamic-clock-gating property value is missing or invalid\n");
}
ret = of_property_read_u32(np, "arm,standby-mode", &val);
if (!ret) {
if (!val)
power &= ~L310_STNDBY_MODE_EN;
} else if (ret != -EINVAL) {
pr_err("L2C-310 OF standby-mode property value is missing or invalid\n");
}
l2x0_saved_regs.pwr_ctrl = power;
}
static const struct l2c_init_data of_l2c310_data __initconst = {
.type = "L2C-310",
.way_size_0 = SZ_8K,
.num_lock = 8,
.of_parse = l2c310_of_parse,
.enable = l2c310_enable,
.fixup = l2c310_fixup,
.save = l2c310_save,
.configure = l2c310_configure,
.unlock = l2c310_unlock,
.outer_cache = {
.inv_range = l2c210_inv_range,
.clean_range = l2c210_clean_range,
.flush_range = l2c210_flush_range,
.flush_all = l2c210_flush_all,
.disable = l2c310_disable,
.sync = l2c210_sync,
.resume = l2c310_resume,
},
};
/*
* This is a variant of the of_l2c310_data with .sync set to
* NULL. Outer sync operations are not needed when the system is I/O
* coherent, and potentially harmful in certain situations (PCIe/PL310
* deadlock on Armada 375/38x due to hardware I/O coherency). The
* other operations are kept because they are infrequent (therefore do
* not cause the deadlock in practice) and needed for secondary CPU
* boot and other power management activities.
*/
static const struct l2c_init_data of_l2c310_coherent_data __initconst = {
.type = "L2C-310 Coherent",
.way_size_0 = SZ_8K,
.num_lock = 8,
.of_parse = l2c310_of_parse,
.enable = l2c310_enable,
.fixup = l2c310_fixup,
.save = l2c310_save,
.configure = l2c310_configure,
.unlock = l2c310_unlock,
.outer_cache = {
.inv_range = l2c210_inv_range,
.clean_range = l2c210_clean_range,
.flush_range = l2c210_flush_range,
.flush_all = l2c210_flush_all,
.disable = l2c310_disable,
.resume = l2c310_resume,
},
};
/*
* Note that the end addresses passed to Linux primitives are
* noninclusive, while the hardware cache range operations use
* inclusive start and end addresses.
*/
static unsigned long aurora_range_end(unsigned long start, unsigned long end)
{
/*
* Limit the number of cache lines processed at once,
* since cache range operations stall the CPU pipeline
* until completion.
*/
if (end > start + AURORA_MAX_RANGE_SIZE)
end = start + AURORA_MAX_RANGE_SIZE;
/*
* Cache range operations can't straddle a page boundary.
*/
if (end > PAGE_ALIGN(start+1))
end = PAGE_ALIGN(start+1);
return end;
}
static void aurora_pa_range(unsigned long start, unsigned long end,
unsigned long offset)
{
void __iomem *base = l2x0_base;
unsigned long range_end;
unsigned long flags;
/*
* round start and end adresses up to cache line size
*/
start &= ~(CACHE_LINE_SIZE - 1);
end = ALIGN(end, CACHE_LINE_SIZE);
/*
* perform operation on all full cache lines between 'start' and 'end'
*/
while (start < end) {
range_end = aurora_range_end(start, end);
raw_spin_lock_irqsave(&l2x0_lock, flags);
writel_relaxed(start, base + AURORA_RANGE_BASE_ADDR_REG);
writel_relaxed(range_end - CACHE_LINE_SIZE, base + offset);
raw_spin_unlock_irqrestore(&l2x0_lock, flags);
writel_relaxed(0, base + AURORA_SYNC_REG);
start = range_end;
}
}
static void aurora_inv_range(unsigned long start, unsigned long end)
{
aurora_pa_range(start, end, AURORA_INVAL_RANGE_REG);
}
static void aurora_clean_range(unsigned long start, unsigned long end)
{
/*
* If L2 is forced to WT, the L2 will always be clean and we
* don't need to do anything here.
*/
if (!l2_wt_override)
aurora_pa_range(start, end, AURORA_CLEAN_RANGE_REG);
}
static void aurora_flush_range(unsigned long start, unsigned long end)
{
if (l2_wt_override)
aurora_pa_range(start, end, AURORA_INVAL_RANGE_REG);
else
aurora_pa_range(start, end, AURORA_FLUSH_RANGE_REG);
}
static void aurora_flush_all(void)
{
void __iomem *base = l2x0_base;
unsigned long flags;
/* clean all ways */
raw_spin_lock_irqsave(&l2x0_lock, flags);
__l2c_op_way(base + L2X0_CLEAN_INV_WAY);
raw_spin_unlock_irqrestore(&l2x0_lock, flags);
writel_relaxed(0, base + AURORA_SYNC_REG);
}
static void aurora_cache_sync(void)
{
writel_relaxed(0, l2x0_base + AURORA_SYNC_REG);
}
static void aurora_disable(void)
{
void __iomem *base = l2x0_base;
unsigned long flags;
raw_spin_lock_irqsave(&l2x0_lock, flags);
__l2c_op_way(base + L2X0_CLEAN_INV_WAY);
writel_relaxed(0, base + AURORA_SYNC_REG);
l2c_write_sec(0, base, L2X0_CTRL);
dsb(st);
raw_spin_unlock_irqrestore(&l2x0_lock, flags);
}
static void aurora_save(void __iomem *base)
{
l2x0_saved_regs.ctrl = readl_relaxed(base + L2X0_CTRL);
l2x0_saved_regs.aux_ctrl = readl_relaxed(base + L2X0_AUX_CTRL);
}
/*
* For Aurora cache in no outer mode, enable via the CP15 coprocessor
* broadcasting of cache commands to L2.
*/
static void __init aurora_enable_no_outer(void __iomem *base,
unsigned num_lock)
{
u32 u;
asm volatile("mrc p15, 1, %0, c15, c2, 0" : "=r" (u));
u |= AURORA_CTRL_FW; /* Set the FW bit */
asm volatile("mcr p15, 1, %0, c15, c2, 0" : : "r" (u));
isb();
l2c_enable(base, num_lock);
}
static void __init aurora_fixup(void __iomem *base, u32 cache_id,
struct outer_cache_fns *fns)
{
sync_reg_offset = AURORA_SYNC_REG;
}
static void __init aurora_of_parse(const struct device_node *np,
u32 *aux_val, u32 *aux_mask)
{
u32 val = AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU;
u32 mask = AURORA_ACR_REPLACEMENT_MASK;
of_property_read_u32(np, "cache-id-part",
&cache_id_part_number_from_dt);
/* Determine and save the write policy */
l2_wt_override = of_property_read_bool(np, "wt-override");
if (l2_wt_override) {
val |= AURORA_ACR_FORCE_WRITE_THRO_POLICY;
mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK;
}
if (of_property_read_bool(np, "marvell,ecc-enable")) {
mask |= AURORA_ACR_ECC_EN;
val |= AURORA_ACR_ECC_EN;
}
if (of_property_read_bool(np, "arm,parity-enable")) {
mask |= AURORA_ACR_PARITY_EN;
val |= AURORA_ACR_PARITY_EN;
} else if (of_property_read_bool(np, "arm,parity-disable")) {
mask |= AURORA_ACR_PARITY_EN;
}
*aux_val &= ~mask;
*aux_val |= val;
*aux_mask &= ~mask;
}
static const struct l2c_init_data of_aurora_with_outer_data __initconst = {
.type = "Aurora",
.way_size_0 = SZ_4K,
.num_lock = 4,
.of_parse = aurora_of_parse,
.enable = l2c_enable,
.fixup = aurora_fixup,
.save = aurora_save,
.configure = l2c_configure,
.unlock = l2c_unlock,
.outer_cache = {
.inv_range = aurora_inv_range,
.clean_range = aurora_clean_range,
.flush_range = aurora_flush_range,
.flush_all = aurora_flush_all,
.disable = aurora_disable,
.sync = aurora_cache_sync,
.resume = l2c_resume,
},
};
static const struct l2c_init_data of_aurora_no_outer_data __initconst = {
.type = "Aurora",
.way_size_0 = SZ_4K,
.num_lock = 4,
.of_parse = aurora_of_parse,
.enable = aurora_enable_no_outer,
.fixup = aurora_fixup,
.save = aurora_save,
.configure = l2c_configure,
.unlock = l2c_unlock,
.outer_cache = {
.resume = l2c_resume,
},
};
/*
* For certain Broadcom SoCs, depending on the address range, different offsets
* need to be added to the address before passing it to L2 for
* invalidation/clean/flush
*
* Section Address Range Offset EMI
* 1 0x00000000 - 0x3FFFFFFF 0x80000000 VC
* 2 0x40000000 - 0xBFFFFFFF 0x40000000 SYS
* 3 0xC0000000 - 0xFFFFFFFF 0x80000000 VC
*
* When the start and end addresses have crossed two different sections, we
* need to break the L2 operation into two, each within its own section.
* For example, if we need to invalidate addresses starts at 0xBFFF0000 and
* ends at 0xC0001000, we need do invalidate 1) 0xBFFF0000 - 0xBFFFFFFF and 2)
* 0xC0000000 - 0xC0001000
*
* Note 1:
* By breaking a single L2 operation into two, we may potentially suffer some
* performance hit, but keep in mind the cross section case is very rare
*
* Note 2:
* We do not need to handle the case when the start address is in
* Section 1 and the end address is in Section 3, since it is not a valid use
* case
*
* Note 3:
* Section 1 in practical terms can no longer be used on rev A2. Because of
* that the code does not need to handle section 1 at all.
*
*/
#define BCM_SYS_EMI_START_ADDR 0x40000000UL
#define BCM_VC_EMI_SEC3_START_ADDR 0xC0000000UL
#define BCM_SYS_EMI_OFFSET 0x40000000UL
#define BCM_VC_EMI_OFFSET 0x80000000UL
static inline int bcm_addr_is_sys_emi(unsigned long addr)
{
return (addr >= BCM_SYS_EMI_START_ADDR) &&
(addr < BCM_VC_EMI_SEC3_START_ADDR);
}
static inline unsigned long bcm_l2_phys_addr(unsigned long addr)
{
if (bcm_addr_is_sys_emi(addr))
return addr + BCM_SYS_EMI_OFFSET;
else
return addr + BCM_VC_EMI_OFFSET;
}
static void bcm_inv_range(unsigned long start, unsigned long end)
{
unsigned long new_start, new_end;
BUG_ON(start < BCM_SYS_EMI_START_ADDR);
if (unlikely(end <= start))
return;
new_start = bcm_l2_phys_addr(start);
new_end = bcm_l2_phys_addr(end);
/* normal case, no cross section between start and end */
if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
l2c210_inv_range(new_start, new_end);
return;
}
/* They cross sections, so it can only be a cross from section
* 2 to section 3
*/
l2c210_inv_range(new_start,
bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
l2c210_inv_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
new_end);
}
static void bcm_clean_range(unsigned long start, unsigned long end)
{
unsigned long new_start, new_end;
BUG_ON(start < BCM_SYS_EMI_START_ADDR);
if (unlikely(end <= start))
return;
new_start = bcm_l2_phys_addr(start);
new_end = bcm_l2_phys_addr(end);
/* normal case, no cross section between start and end */
if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
l2c210_clean_range(new_start, new_end);
return;
}
/* They cross sections, so it can only be a cross from section
* 2 to section 3
*/
l2c210_clean_range(new_start,
bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
l2c210_clean_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
new_end);
}
static void bcm_flush_range(unsigned long start, unsigned long end)
{
unsigned long new_start, new_end;
BUG_ON(start < BCM_SYS_EMI_START_ADDR);
if (unlikely(end <= start))
return;
if ((end - start) >= l2x0_size) {
outer_cache.flush_all();
return;
}
new_start = bcm_l2_phys_addr(start);
new_end = bcm_l2_phys_addr(end);
/* normal case, no cross section between start and end */
if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
l2c210_flush_range(new_start, new_end);
return;
}
/* They cross sections, so it can only be a cross from section
* 2 to section 3
*/
l2c210_flush_range(new_start,
bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
l2c210_flush_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
new_end);
}
/* Broadcom L2C-310 start from ARMs R3P2 or later, and require no fixups */
static const struct l2c_init_data of_bcm_l2x0_data __initconst = {
.type = "BCM-L2C-310",
.way_size_0 = SZ_8K,
.num_lock = 8,
.of_parse = l2c310_of_parse,
.enable = l2c310_enable,
.save = l2c310_save,
.configure = l2c310_configure,
.unlock = l2c310_unlock,
.outer_cache = {
.inv_range = bcm_inv_range,
.clean_range = bcm_clean_range,
.flush_range = bcm_flush_range,
.flush_all = l2c210_flush_all,
.disable = l2c310_disable,
.sync = l2c210_sync,
.resume = l2c310_resume,
},
};
static void __init tauros3_save(void __iomem *base)
{
l2c_save(base);
l2x0_saved_regs.aux2_ctrl =
readl_relaxed(base + TAUROS3_AUX2_CTRL);
l2x0_saved_regs.prefetch_ctrl =
readl_relaxed(base + L310_PREFETCH_CTRL);
}
static void tauros3_configure(void __iomem *base)
{
l2c_configure(base);
writel_relaxed(l2x0_saved_regs.aux2_ctrl,
base + TAUROS3_AUX2_CTRL);
writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
base + L310_PREFETCH_CTRL);
}
static const struct l2c_init_data of_tauros3_data __initconst = {
.type = "Tauros3",
.way_size_0 = SZ_8K,
.num_lock = 8,
.enable = l2c_enable,
.save = tauros3_save,
.configure = tauros3_configure,
.unlock = l2c_unlock,
/* Tauros3 broadcasts L1 cache operations to L2 */
.outer_cache = {
.resume = l2c_resume,
},
};
#define L2C_ID(name, fns) { .compatible = name, .data = (void *)&fns }
static const struct of_device_id l2x0_ids[] __initconst = {
L2C_ID("arm,l210-cache", of_l2c210_data),
L2C_ID("arm,l220-cache", of_l2c220_data),
L2C_ID("arm,pl310-cache", of_l2c310_data),
L2C_ID("brcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data),
L2C_ID("marvell,aurora-outer-cache", of_aurora_with_outer_data),
L2C_ID("marvell,aurora-system-cache", of_aurora_no_outer_data),
L2C_ID("marvell,tauros3-cache", of_tauros3_data),
/* Deprecated IDs */
L2C_ID("bcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data),
{}
};
int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
{
const struct l2c_init_data *data;
struct device_node *np;
struct resource res;
u32 cache_id, old_aux;
u32 cache_level = 2;
bool nosync = false;
np = of_find_matching_node(NULL, l2x0_ids);
if (!np)
return -ENODEV;
if (of_address_to_resource(np, 0, &res))
return -ENODEV;
l2x0_base = ioremap(res.start, resource_size(&res));
if (!l2x0_base)
return -ENOMEM;
l2x0_saved_regs.phy_base = res.start;
data = of_match_node(l2x0_ids, np)->data;
if (of_device_is_compatible(np, "arm,pl310-cache") &&
of_property_read_bool(np, "arm,io-coherent"))
data = &of_l2c310_coherent_data;
old_aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
if (old_aux != ((old_aux & aux_mask) | aux_val)) {
pr_warn("L2C: platform modifies aux control register: 0x%08x -> 0x%08x\n",
old_aux, (old_aux & aux_mask) | aux_val);
} else if (aux_mask != ~0U && aux_val != 0) {
pr_alert("L2C: platform provided aux values match the hardware, so have no effect. Please remove them.\n");
}
/* All L2 caches are unified, so this property should be specified */
if (!of_property_read_bool(np, "cache-unified"))
pr_err("L2C: device tree omits to specify unified cache\n");
if (of_property_read_u32(np, "cache-level", &cache_level))
pr_err("L2C: device tree omits to specify cache-level\n");
if (cache_level != 2)
pr_err("L2C: device tree specifies invalid cache level\n");
nosync = of_property_read_bool(np, "arm,outer-sync-disable");
/* Read back current (default) hardware configuration */
if (data->save)
data->save(l2x0_base);
/* L2 configuration can only be changed if the cache is disabled */
if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN))
if (data->of_parse)
data->of_parse(np, &aux_val, &aux_mask);
if (cache_id_part_number_from_dt)
cache_id = cache_id_part_number_from_dt;
else
cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
return __l2c_init(data, aux_val, aux_mask, cache_id, nosync);
}
#endif
| linux-master | arch/arm/mm/cache-l2x0.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/arch/arm/mm/fault.c
*
* Copyright (C) 1995 Linus Torvalds
* Modifications for ARM processor (c) 1995-2004 Russell King
*/
#include <linux/extable.h>
#include <linux/signal.h>
#include <linux/mm.h>
#include <linux/hardirq.h>
#include <linux/init.h>
#include <linux/kprobes.h>
#include <linux/uaccess.h>
#include <linux/page-flags.h>
#include <linux/sched/signal.h>
#include <linux/sched/debug.h>
#include <linux/highmem.h>
#include <linux/perf_event.h>
#include <linux/kfence.h>
#include <asm/system_misc.h>
#include <asm/system_info.h>
#include <asm/tlbflush.h>
#include "fault.h"
#ifdef CONFIG_MMU
/*
* This is useful to dump out the page tables associated with
* 'addr' in mm 'mm'.
*/
void show_pte(const char *lvl, struct mm_struct *mm, unsigned long addr)
{
pgd_t *pgd;
if (!mm)
mm = &init_mm;
pgd = pgd_offset(mm, addr);
printk("%s[%08lx] *pgd=%08llx", lvl, addr, (long long)pgd_val(*pgd));
do {
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
p4d = p4d_offset(pgd, addr);
if (p4d_none(*p4d))
break;
if (p4d_bad(*p4d)) {
pr_cont("(bad)");
break;
}
pud = pud_offset(p4d, addr);
if (PTRS_PER_PUD != 1)
pr_cont(", *pud=%08llx", (long long)pud_val(*pud));
if (pud_none(*pud))
break;
if (pud_bad(*pud)) {
pr_cont("(bad)");
break;
}
pmd = pmd_offset(pud, addr);
if (PTRS_PER_PMD != 1)
pr_cont(", *pmd=%08llx", (long long)pmd_val(*pmd));
if (pmd_none(*pmd))
break;
if (pmd_bad(*pmd)) {
pr_cont("(bad)");
break;
}
/* We must not map this if we have highmem enabled */
if (PageHighMem(pfn_to_page(pmd_val(*pmd) >> PAGE_SHIFT)))
break;
pte = pte_offset_map(pmd, addr);
if (!pte)
break;
pr_cont(", *pte=%08llx", (long long)pte_val(*pte));
#ifndef CONFIG_ARM_LPAE
pr_cont(", *ppte=%08llx",
(long long)pte_val(pte[PTE_HWTABLE_PTRS]));
#endif
pte_unmap(pte);
} while(0);
pr_cont("\n");
}
#else /* CONFIG_MMU */
void show_pte(const char *lvl, struct mm_struct *mm, unsigned long addr)
{ }
#endif /* CONFIG_MMU */
static inline bool is_write_fault(unsigned int fsr)
{
return (fsr & FSR_WRITE) && !(fsr & FSR_CM);
}
static inline bool is_translation_fault(unsigned int fsr)
{
int fs = fsr_fs(fsr);
#ifdef CONFIG_ARM_LPAE
if ((fs & FS_MMU_NOLL_MASK) == FS_TRANS_NOLL)
return true;
#else
if (fs == FS_L1_TRANS || fs == FS_L2_TRANS)
return true;
#endif
return false;
}
static void die_kernel_fault(const char *msg, struct mm_struct *mm,
unsigned long addr, unsigned int fsr,
struct pt_regs *regs)
{
bust_spinlocks(1);
pr_alert("8<--- cut here ---\n");
pr_alert("Unable to handle kernel %s at virtual address %08lx when %s\n",
msg, addr, fsr & FSR_LNX_PF ? "execute" :
fsr & FSR_WRITE ? "write" : "read");
show_pte(KERN_ALERT, mm, addr);
die("Oops", regs, fsr);
bust_spinlocks(0);
make_task_dead(SIGKILL);
}
/*
* Oops. The kernel tried to access some page that wasn't present.
*/
static void
__do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
struct pt_regs *regs)
{
const char *msg;
/*
* Are we prepared to handle this kernel fault?
*/
if (fixup_exception(regs))
return;
/*
* No handler, we'll have to terminate things with extreme prejudice.
*/
if (addr < PAGE_SIZE) {
msg = "NULL pointer dereference";
} else {
if (is_translation_fault(fsr) &&
kfence_handle_page_fault(addr, is_write_fault(fsr), regs))
return;
msg = "paging request";
}
die_kernel_fault(msg, mm, addr, fsr, regs);
}
/*
* Something tried to access memory that isn't in our memory map..
* User mode accesses just cause a SIGSEGV
*/
static void
__do_user_fault(unsigned long addr, unsigned int fsr, unsigned int sig,
int code, struct pt_regs *regs)
{
struct task_struct *tsk = current;
if (addr > TASK_SIZE)
harden_branch_predictor();
#ifdef CONFIG_DEBUG_USER
if (((user_debug & UDBG_SEGV) && (sig == SIGSEGV)) ||
((user_debug & UDBG_BUS) && (sig == SIGBUS))) {
pr_err("8<--- cut here ---\n");
pr_err("%s: unhandled page fault (%d) at 0x%08lx, code 0x%03x\n",
tsk->comm, sig, addr, fsr);
show_pte(KERN_ERR, tsk->mm, addr);
show_regs(regs);
}
#endif
#ifndef CONFIG_KUSER_HELPERS
if ((sig == SIGSEGV) && ((addr & PAGE_MASK) == 0xffff0000))
printk_ratelimited(KERN_DEBUG
"%s: CONFIG_KUSER_HELPERS disabled at 0x%08lx\n",
tsk->comm, addr);
#endif
tsk->thread.address = addr;
tsk->thread.error_code = fsr;
tsk->thread.trap_no = 14;
force_sig_fault(sig, code, (void __user *)addr);
}
void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
struct task_struct *tsk = current;
struct mm_struct *mm = tsk->active_mm;
/*
* If we are in kernel mode at this point, we
* have no context to handle this fault with.
*/
if (user_mode(regs))
__do_user_fault(addr, fsr, SIGSEGV, SEGV_MAPERR, regs);
else
__do_kernel_fault(mm, addr, fsr, regs);
}
#ifdef CONFIG_MMU
#define VM_FAULT_BADMAP ((__force vm_fault_t)0x010000)
#define VM_FAULT_BADACCESS ((__force vm_fault_t)0x020000)
static inline bool is_permission_fault(unsigned int fsr)
{
int fs = fsr_fs(fsr);
#ifdef CONFIG_ARM_LPAE
if ((fs & FS_MMU_NOLL_MASK) == FS_PERM_NOLL)
return true;
#else
if (fs == FS_L1_PERM || fs == FS_L2_PERM)
return true;
#endif
return false;
}
static int __kprobes
do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
int sig, code;
vm_fault_t fault;
unsigned int flags = FAULT_FLAG_DEFAULT;
unsigned long vm_flags = VM_ACCESS_FLAGS;
if (kprobe_page_fault(regs, fsr))
return 0;
/* Enable interrupts if they were enabled in the parent context. */
if (interrupts_enabled(regs))
local_irq_enable();
/*
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
if (faulthandler_disabled() || !mm)
goto no_context;
if (user_mode(regs))
flags |= FAULT_FLAG_USER;
if (is_write_fault(fsr)) {
flags |= FAULT_FLAG_WRITE;
vm_flags = VM_WRITE;
}
if (fsr & FSR_LNX_PF) {
vm_flags = VM_EXEC;
if (is_permission_fault(fsr) && !user_mode(regs))
die_kernel_fault("execution of memory",
mm, addr, fsr, regs);
}
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
retry:
vma = lock_mm_and_find_vma(mm, addr, regs);
if (unlikely(!vma)) {
fault = VM_FAULT_BADMAP;
goto bad_area;
}
/*
* ok, we have a good vm_area for this memory access, check the
* permissions on the VMA allow for the fault which occurred.
*/
if (!(vma->vm_flags & vm_flags))
fault = VM_FAULT_BADACCESS;
else
fault = handle_mm_fault(vma, addr & PAGE_MASK, flags, regs);
/* If we need to retry but a fatal signal is pending, handle the
* signal first. We do not need to release the mmap_lock because
* it would already be released in __lock_page_or_retry in
* mm/filemap.c. */
if (fault_signal_pending(fault, regs)) {
if (!user_mode(regs))
goto no_context;
return 0;
}
/* The fault is fully completed (including releasing mmap lock) */
if (fault & VM_FAULT_COMPLETED)
return 0;
if (!(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_RETRY) {
flags |= FAULT_FLAG_TRIED;
goto retry;
}
}
mmap_read_unlock(mm);
/*
* Handle the "normal" case first - VM_FAULT_MAJOR
*/
if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | VM_FAULT_BADACCESS))))
return 0;
bad_area:
/*
* If we are in kernel mode at this point, we
* have no context to handle this fault with.
*/
if (!user_mode(regs))
goto no_context;
if (fault & VM_FAULT_OOM) {
/*
* We ran out of memory, call the OOM killer, and return to
* userspace (which will retry the fault, or kill us if we
* got oom-killed)
*/
pagefault_out_of_memory();
return 0;
}
if (fault & VM_FAULT_SIGBUS) {
/*
* We had some memory, but were unable to
* successfully fix up this page fault.
*/
sig = SIGBUS;
code = BUS_ADRERR;
} else {
/*
* Something tried to access memory that
* isn't in our memory map..
*/
sig = SIGSEGV;
code = fault == VM_FAULT_BADACCESS ?
SEGV_ACCERR : SEGV_MAPERR;
}
__do_user_fault(addr, fsr, sig, code, regs);
return 0;
no_context:
__do_kernel_fault(mm, addr, fsr, regs);
return 0;
}
#else /* CONFIG_MMU */
static int
do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
return 0;
}
#endif /* CONFIG_MMU */
/*
* First Level Translation Fault Handler
*
* We enter here because the first level page table doesn't contain
* a valid entry for the address.
*
* If the address is in kernel space (>= TASK_SIZE), then we are
* probably faulting in the vmalloc() area.
*
* If the init_task's first level page tables contains the relevant
* entry, we copy the it to this task. If not, we send the process
* a signal, fixup the exception, or oops the kernel.
*
* NOTE! We MUST NOT take any locks for this case. We may be in an
* interrupt or a critical region, and should only copy the information
* from the master page table, nothing more.
*/
#ifdef CONFIG_MMU
static int __kprobes
do_translation_fault(unsigned long addr, unsigned int fsr,
struct pt_regs *regs)
{
unsigned int index;
pgd_t *pgd, *pgd_k;
p4d_t *p4d, *p4d_k;
pud_t *pud, *pud_k;
pmd_t *pmd, *pmd_k;
if (addr < TASK_SIZE)
return do_page_fault(addr, fsr, regs);
if (user_mode(regs))
goto bad_area;
index = pgd_index(addr);
pgd = cpu_get_pgd() + index;
pgd_k = init_mm.pgd + index;
p4d = p4d_offset(pgd, addr);
p4d_k = p4d_offset(pgd_k, addr);
if (p4d_none(*p4d_k))
goto bad_area;
if (!p4d_present(*p4d))
set_p4d(p4d, *p4d_k);
pud = pud_offset(p4d, addr);
pud_k = pud_offset(p4d_k, addr);
if (pud_none(*pud_k))
goto bad_area;
if (!pud_present(*pud))
set_pud(pud, *pud_k);
pmd = pmd_offset(pud, addr);
pmd_k = pmd_offset(pud_k, addr);
#ifdef CONFIG_ARM_LPAE
/*
* Only one hardware entry per PMD with LPAE.
*/
index = 0;
#else
/*
* On ARM one Linux PGD entry contains two hardware entries (see page
* tables layout in pgtable.h). We normally guarantee that we always
* fill both L1 entries. But create_mapping() doesn't follow the rule.
* It can create inidividual L1 entries, so here we have to call
* pmd_none() check for the entry really corresponded to address, not
* for the first of pair.
*/
index = (addr >> SECTION_SHIFT) & 1;
#endif
if (pmd_none(pmd_k[index]))
goto bad_area;
copy_pmd(pmd, pmd_k);
return 0;
bad_area:
do_bad_area(addr, fsr, regs);
return 0;
}
#else /* CONFIG_MMU */
static int
do_translation_fault(unsigned long addr, unsigned int fsr,
struct pt_regs *regs)
{
return 0;
}
#endif /* CONFIG_MMU */
/*
* Some section permission faults need to be handled gracefully.
* They can happen due to a __{get,put}_user during an oops.
*/
#ifndef CONFIG_ARM_LPAE
static int
do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
do_bad_area(addr, fsr, regs);
return 0;
}
#endif /* CONFIG_ARM_LPAE */
/*
* This abort handler always returns "fault".
*/
static int
do_bad(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
return 1;
}
struct fsr_info {
int (*fn)(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
int sig;
int code;
const char *name;
};
/* FSR definition */
#ifdef CONFIG_ARM_LPAE
#include "fsr-3level.c"
#else
#include "fsr-2level.c"
#endif
void __init
hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *),
int sig, int code, const char *name)
{
if (nr < 0 || nr >= ARRAY_SIZE(fsr_info))
BUG();
fsr_info[nr].fn = fn;
fsr_info[nr].sig = sig;
fsr_info[nr].code = code;
fsr_info[nr].name = name;
}
/*
* Dispatch a data abort to the relevant handler.
*/
asmlinkage void
do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
return;
pr_alert("8<--- cut here ---\n");
pr_alert("Unhandled fault: %s (0x%03x) at 0x%08lx\n",
inf->name, fsr, addr);
show_pte(KERN_ALERT, current->mm, addr);
arm_notify_die("", regs, inf->sig, inf->code, (void __user *)addr,
fsr, 0);
}
void __init
hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *),
int sig, int code, const char *name)
{
if (nr < 0 || nr >= ARRAY_SIZE(ifsr_info))
BUG();
ifsr_info[nr].fn = fn;
ifsr_info[nr].sig = sig;
ifsr_info[nr].code = code;
ifsr_info[nr].name = name;
}
asmlinkage void
do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
{
const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
return;
pr_alert("Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
inf->name, ifsr, addr);
arm_notify_die("", regs, inf->sig, inf->code, (void __user *)addr,
ifsr, 0);
}
/*
* Abort handler to be used only during first unmasking of asynchronous aborts
* on the boot CPU. This makes sure that the machine will not die if the
* firmware/bootloader left an imprecise abort pending for us to trip over.
*/
static int __init early_abort_handler(unsigned long addr, unsigned int fsr,
struct pt_regs *regs)
{
pr_warn("Hit pending asynchronous external abort (FSR=0x%08x) during "
"first unmask, this is most likely caused by a "
"firmware/bootloader bug.\n", fsr);
return 0;
}
void __init early_abt_enable(void)
{
fsr_info[FSR_FS_AEA].fn = early_abort_handler;
local_abt_enable();
fsr_info[FSR_FS_AEA].fn = do_bad;
}
#ifndef CONFIG_ARM_LPAE
static int __init exceptions_init(void)
{
if (cpu_architecture() >= CPU_ARCH_ARMv6) {
hook_fault_code(4, do_translation_fault, SIGSEGV, SEGV_MAPERR,
"I-cache maintenance fault");
}
if (cpu_architecture() >= CPU_ARCH_ARMv7) {
/*
* TODO: Access flag faults introduced in ARMv6K.
* Runtime check for 'K' extension is needed
*/
hook_fault_code(3, do_bad, SIGSEGV, SEGV_MAPERR,
"section access flag fault");
hook_fault_code(6, do_bad, SIGSEGV, SEGV_MAPERR,
"section access flag fault");
}
return 0;
}
arch_initcall(exceptions_init);
#endif
| linux-master | arch/arm/mm/fault.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/arch/arm/mm/nommu.c
*
* ARM uCLinux supporting functions.
*/
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/io.h>
#include <linux/memblock.h>
#include <linux/kernel.h>
#include <asm/cacheflush.h>
#include <asm/cp15.h>
#include <asm/sections.h>
#include <asm/page.h>
#include <asm/setup.h>
#include <asm/traps.h>
#include <asm/mach/arch.h>
#include <asm/cputype.h>
#include <asm/mpu.h>
#include <asm/procinfo.h>
#include <asm/idmap.h>
#include "mm.h"
unsigned long vectors_base;
/*
* empty_zero_page is a special page that is used for
* zero-initialized data and COW.
*/
struct page *empty_zero_page;
EXPORT_SYMBOL(empty_zero_page);
#ifdef CONFIG_ARM_MPU
struct mpu_rgn_info mpu_rgn_info;
#endif
#ifdef CONFIG_CPU_CP15
#ifdef CONFIG_CPU_HIGH_VECTOR
unsigned long setup_vectors_base(void)
{
unsigned long reg = get_cr();
set_cr(reg | CR_V);
return 0xffff0000;
}
#else /* CONFIG_CPU_HIGH_VECTOR */
/* Write exception base address to VBAR */
static inline void set_vbar(unsigned long val)
{
asm("mcr p15, 0, %0, c12, c0, 0" : : "r" (val) : "cc");
}
/*
* Security extensions, bits[7:4], permitted values,
* 0b0000 - not implemented, 0b0001/0b0010 - implemented
*/
static inline bool security_extensions_enabled(void)
{
/* Check CPUID Identification Scheme before ID_PFR1 read */
if ((read_cpuid_id() & 0x000f0000) == 0x000f0000)
return cpuid_feature_extract(CPUID_EXT_PFR1, 4) ||
cpuid_feature_extract(CPUID_EXT_PFR1, 20);
return 0;
}
unsigned long setup_vectors_base(void)
{
unsigned long base = 0, reg = get_cr();
set_cr(reg & ~CR_V);
if (security_extensions_enabled()) {
if (IS_ENABLED(CONFIG_REMAP_VECTORS_TO_RAM))
base = CONFIG_DRAM_BASE;
set_vbar(base);
} else if (IS_ENABLED(CONFIG_REMAP_VECTORS_TO_RAM)) {
if (CONFIG_DRAM_BASE != 0)
pr_err("Security extensions not enabled, vectors cannot be remapped to RAM, vectors base will be 0x00000000\n");
}
return base;
}
#endif /* CONFIG_CPU_HIGH_VECTOR */
#endif /* CONFIG_CPU_CP15 */
void __init arm_mm_memblock_reserve(void)
{
#ifndef CONFIG_CPU_V7M
vectors_base = IS_ENABLED(CONFIG_CPU_CP15) ? setup_vectors_base() : 0;
/*
* Register the exception vector page.
* some architectures which the DRAM is the exception vector to trap,
* alloc_page breaks with error, although it is not NULL, but "0."
*/
memblock_reserve(vectors_base, 2 * PAGE_SIZE);
#else /* ifndef CONFIG_CPU_V7M */
/*
* There is no dedicated vector page on V7-M. So nothing needs to be
* reserved here.
*/
#endif
/*
* In any case, always ensure address 0 is never used as many things
* get very confused if 0 is returned as a legitimate address.
*/
memblock_reserve(0, 1);
}
static void __init adjust_lowmem_bounds_mpu(void)
{
unsigned long pmsa = read_cpuid_ext(CPUID_EXT_MMFR0) & MMFR0_PMSA;
switch (pmsa) {
case MMFR0_PMSAv7:
pmsav7_adjust_lowmem_bounds();
break;
case MMFR0_PMSAv8:
pmsav8_adjust_lowmem_bounds();
break;
default:
break;
}
}
static void __init mpu_setup(void)
{
unsigned long pmsa = read_cpuid_ext(CPUID_EXT_MMFR0) & MMFR0_PMSA;
switch (pmsa) {
case MMFR0_PMSAv7:
pmsav7_setup();
break;
case MMFR0_PMSAv8:
pmsav8_setup();
break;
default:
break;
}
}
void __init adjust_lowmem_bounds(void)
{
phys_addr_t end;
adjust_lowmem_bounds_mpu();
end = memblock_end_of_DRAM();
high_memory = __va(end - 1) + 1;
memblock_set_current_limit(end);
}
/*
* paging_init() sets up the page tables, initialises the zone memory
* maps, and sets up the zero page, bad page and bad page tables.
*/
void __init paging_init(const struct machine_desc *mdesc)
{
void *zero_page;
early_trap_init((void *)vectors_base);
mpu_setup();
/* allocate the zero page. */
zero_page = (void *)memblock_alloc(PAGE_SIZE, PAGE_SIZE);
if (!zero_page)
panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
__func__, PAGE_SIZE, PAGE_SIZE);
bootmem_init();
empty_zero_page = virt_to_page(zero_page);
flush_dcache_page(empty_zero_page);
}
/*
* We don't need to do anything here for nommu machines.
*/
void setup_mm_for_reboot(void)
{
}
void flush_dcache_folio(struct folio *folio)
{
__cpuc_flush_dcache_area(folio_address(folio), folio_size(folio));
}
EXPORT_SYMBOL(flush_dcache_folio);
void flush_dcache_page(struct page *page)
{
__cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
}
EXPORT_SYMBOL(flush_dcache_page);
void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long uaddr, void *dst, const void *src,
unsigned long len)
{
memcpy(dst, src, len);
if (vma->vm_flags & VM_EXEC)
__cpuc_coherent_user_range(uaddr, uaddr + len);
}
void __iomem *__arm_ioremap_pfn(unsigned long pfn, unsigned long offset,
size_t size, unsigned int mtype)
{
if (pfn >= (0x100000000ULL >> PAGE_SHIFT))
return NULL;
return (void __iomem *) (offset + (pfn << PAGE_SHIFT));
}
EXPORT_SYMBOL(__arm_ioremap_pfn);
void __iomem *__arm_ioremap_caller(phys_addr_t phys_addr, size_t size,
unsigned int mtype, void *caller)
{
return (void __iomem *)phys_addr;
}
void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t, unsigned int, void *);
void __iomem *ioremap(resource_size_t res_cookie, size_t size)
{
return __arm_ioremap_caller(res_cookie, size, MT_DEVICE,
__builtin_return_address(0));
}
EXPORT_SYMBOL(ioremap);
void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size)
{
return __arm_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED,
__builtin_return_address(0));
}
EXPORT_SYMBOL(ioremap_cache);
void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size)
{
return __arm_ioremap_caller(res_cookie, size, MT_DEVICE_WC,
__builtin_return_address(0));
}
EXPORT_SYMBOL(ioremap_wc);
#ifdef CONFIG_PCI
#include <asm/mach/map.h>
void __iomem *pci_remap_cfgspace(resource_size_t res_cookie, size_t size)
{
return arch_ioremap_caller(res_cookie, size, MT_UNCACHED,
__builtin_return_address(0));
}
EXPORT_SYMBOL_GPL(pci_remap_cfgspace);
#endif
void *arch_memremap_wb(phys_addr_t phys_addr, size_t size)
{
return (void *)phys_addr;
}
void iounmap(volatile void __iomem *io_addr)
{
}
EXPORT_SYMBOL(iounmap);
| linux-master | arch/arm/mm/nommu.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2010 ARM Ltd.
* Written by Catalin Marinas <[email protected]>
*/
#include <linux/bug.h>
#include <linux/smp.h>
#include <asm/outercache.h>
void outer_disable(void)
{
WARN_ON(!irqs_disabled());
WARN_ON(num_online_cpus() > 1);
if (outer_cache.disable)
outer_cache.disable();
}
| linux-master | arch/arm/mm/l2c-common.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/arch/arm/mm/fault-armv.c
*
* Copyright (C) 1995 Linus Torvalds
* Modifications for ARM processor (c) 1995-2002 Russell King
*/
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/bitops.h>
#include <linux/vmalloc.h>
#include <linux/init.h>
#include <linux/pagemap.h>
#include <linux/gfp.h>
#include <asm/bugs.h>
#include <asm/cacheflush.h>
#include <asm/cachetype.h>
#include <asm/tlbflush.h>
#include "mm.h"
static pteval_t shared_pte_mask = L_PTE_MT_BUFFERABLE;
#if __LINUX_ARM_ARCH__ < 6
/*
* We take the easy way out of this problem - we make the
* PTE uncacheable. However, we leave the write buffer on.
*
* Note that the pte lock held when calling update_mmu_cache must also
* guard the pte (somewhere else in the same mm) that we modify here.
* Therefore those configurations which might call adjust_pte (those
* without CONFIG_CPU_CACHE_VIPT) cannot support split page_table_lock.
*/
static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address,
unsigned long pfn, pte_t *ptep)
{
pte_t entry = *ptep;
int ret;
/*
* If this page is present, it's actually being shared.
*/
ret = pte_present(entry);
/*
* If this page isn't present, or is already setup to
* fault (ie, is old), we can safely ignore any issues.
*/
if (ret && (pte_val(entry) & L_PTE_MT_MASK) != shared_pte_mask) {
flush_cache_page(vma, address, pfn);
outer_flush_range((pfn << PAGE_SHIFT),
(pfn << PAGE_SHIFT) + PAGE_SIZE);
pte_val(entry) &= ~L_PTE_MT_MASK;
pte_val(entry) |= shared_pte_mask;
set_pte_at(vma->vm_mm, address, ptep, entry);
flush_tlb_page(vma, address);
}
return ret;
}
#if USE_SPLIT_PTE_PTLOCKS
/*
* If we are using split PTE locks, then we need to take the page
* lock here. Otherwise we are using shared mm->page_table_lock
* which is already locked, thus cannot take it.
*/
static inline void do_pte_lock(spinlock_t *ptl)
{
/*
* Use nested version here to indicate that we are already
* holding one similar spinlock.
*/
spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
}
static inline void do_pte_unlock(spinlock_t *ptl)
{
spin_unlock(ptl);
}
#else /* !USE_SPLIT_PTE_PTLOCKS */
static inline void do_pte_lock(spinlock_t *ptl) {}
static inline void do_pte_unlock(spinlock_t *ptl) {}
#endif /* USE_SPLIT_PTE_PTLOCKS */
static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
unsigned long pfn)
{
spinlock_t *ptl;
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
int ret;
pgd = pgd_offset(vma->vm_mm, address);
if (pgd_none_or_clear_bad(pgd))
return 0;
p4d = p4d_offset(pgd, address);
if (p4d_none_or_clear_bad(p4d))
return 0;
pud = pud_offset(p4d, address);
if (pud_none_or_clear_bad(pud))
return 0;
pmd = pmd_offset(pud, address);
if (pmd_none_or_clear_bad(pmd))
return 0;
/*
* This is called while another page table is mapped, so we
* must use the nested version. This also means we need to
* open-code the spin-locking.
*/
pte = pte_offset_map_nolock(vma->vm_mm, pmd, address, &ptl);
if (!pte)
return 0;
do_pte_lock(ptl);
ret = do_adjust_pte(vma, address, pfn, pte);
do_pte_unlock(ptl);
pte_unmap(pte);
return ret;
}
static void
make_coherent(struct address_space *mapping, struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep, unsigned long pfn)
{
struct mm_struct *mm = vma->vm_mm;
struct vm_area_struct *mpnt;
unsigned long offset;
pgoff_t pgoff;
int aliases = 0;
pgoff = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT);
/*
* If we have any shared mappings that are in the same mm
* space, then we need to handle them specially to maintain
* cache coherency.
*/
flush_dcache_mmap_lock(mapping);
vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
/*
* If this VMA is not in our MM, we can ignore it.
* Note that we intentionally mask out the VMA
* that we are fixing up.
*/
if (mpnt->vm_mm != mm || mpnt == vma)
continue;
if (!(mpnt->vm_flags & VM_MAYSHARE))
continue;
offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
aliases += adjust_pte(mpnt, mpnt->vm_start + offset, pfn);
}
flush_dcache_mmap_unlock(mapping);
if (aliases)
do_adjust_pte(vma, addr, pfn, ptep);
}
/*
* Take care of architecture specific things when placing a new PTE into
* a page table, or changing an existing PTE. Basically, there are two
* things that we need to take care of:
*
* 1. If PG_dcache_clean is not set for the page, we need to ensure
* that any cache entries for the kernels virtual memory
* range are written back to the page.
* 2. If we have multiple shared mappings of the same space in
* an object, we need to deal with the cache aliasing issues.
*
* Note that the pte lock will be held.
*/
void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep, unsigned int nr)
{
unsigned long pfn = pte_pfn(*ptep);
struct address_space *mapping;
struct folio *folio;
if (!pfn_valid(pfn))
return;
/*
* The zero page is never written to, so never has any dirty
* cache lines, and therefore never needs to be flushed.
*/
if (is_zero_pfn(pfn))
return;
folio = page_folio(pfn_to_page(pfn));
mapping = folio_flush_mapping(folio);
if (!test_and_set_bit(PG_dcache_clean, &folio->flags))
__flush_dcache_folio(mapping, folio);
if (mapping) {
if (cache_is_vivt())
make_coherent(mapping, vma, addr, ptep, pfn);
else if (vma->vm_flags & VM_EXEC)
__flush_icache_all();
}
}
#endif /* __LINUX_ARM_ARCH__ < 6 */
/*
* Check whether the write buffer has physical address aliasing
* issues. If it has, we need to avoid them for the case where
* we have several shared mappings of the same object in user
* space.
*/
static int __init check_writebuffer(unsigned long *p1, unsigned long *p2)
{
register unsigned long zero = 0, one = 1, val;
local_irq_disable();
mb();
*p1 = one;
mb();
*p2 = zero;
mb();
val = *p1;
mb();
local_irq_enable();
return val != zero;
}
void __init check_writebuffer_bugs(void)
{
struct page *page;
const char *reason;
unsigned long v = 1;
pr_info("CPU: Testing write buffer coherency: ");
page = alloc_page(GFP_KERNEL);
if (page) {
unsigned long *p1, *p2;
pgprot_t prot = __pgprot_modify(PAGE_KERNEL,
L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE);
p1 = vmap(&page, 1, VM_IOREMAP, prot);
p2 = vmap(&page, 1, VM_IOREMAP, prot);
if (p1 && p2) {
v = check_writebuffer(p1, p2);
reason = "enabling work-around";
} else {
reason = "unable to map memory\n";
}
vunmap(p1);
vunmap(p2);
put_page(page);
} else {
reason = "unable to grab page\n";
}
if (v) {
pr_cont("failed, %s\n", reason);
shared_pte_mask = L_PTE_MT_UNCACHED;
} else {
pr_cont("ok\n");
}
}
| linux-master | arch/arm/mm/fault-armv.c |
/*
* Based on linux/arch/arm/pmsa-v7.c
*
* ARM PMSAv8 supporting functions.
*/
#include <linux/memblock.h>
#include <linux/range.h>
#include <asm/cp15.h>
#include <asm/cputype.h>
#include <asm/mpu.h>
#include <asm/page.h>
#include <asm/sections.h>
#include "mm.h"
#ifndef CONFIG_CPU_V7M
#define PRSEL __ACCESS_CP15(c6, 0, c2, 1)
#define PRBAR __ACCESS_CP15(c6, 0, c3, 0)
#define PRLAR __ACCESS_CP15(c6, 0, c3, 1)
static inline u32 prlar_read(void)
{
return read_sysreg(PRLAR);
}
static inline u32 prbar_read(void)
{
return read_sysreg(PRBAR);
}
static inline void prsel_write(u32 v)
{
write_sysreg(v, PRSEL);
}
static inline void prbar_write(u32 v)
{
write_sysreg(v, PRBAR);
}
static inline void prlar_write(u32 v)
{
write_sysreg(v, PRLAR);
}
#else
static inline u32 prlar_read(void)
{
return readl_relaxed(BASEADDR_V7M_SCB + PMSAv8_RLAR);
}
static inline u32 prbar_read(void)
{
return readl_relaxed(BASEADDR_V7M_SCB + PMSAv8_RBAR);
}
static inline void prsel_write(u32 v)
{
writel_relaxed(v, BASEADDR_V7M_SCB + PMSAv8_RNR);
}
static inline void prbar_write(u32 v)
{
writel_relaxed(v, BASEADDR_V7M_SCB + PMSAv8_RBAR);
}
static inline void prlar_write(u32 v)
{
writel_relaxed(v, BASEADDR_V7M_SCB + PMSAv8_RLAR);
}
#endif
static struct range __initdata io[MPU_MAX_REGIONS];
static struct range __initdata mem[MPU_MAX_REGIONS];
static unsigned int __initdata mpu_max_regions;
static __init bool is_region_fixed(int number)
{
switch (number) {
case PMSAv8_XIP_REGION:
case PMSAv8_KERNEL_REGION:
return true;
default:
return false;
}
}
void __init pmsav8_adjust_lowmem_bounds(void)
{
phys_addr_t mem_end;
phys_addr_t reg_start, reg_end;
bool first = true;
u64 i;
for_each_mem_range(i, ®_start, ®_end) {
if (first) {
phys_addr_t phys_offset = PHYS_OFFSET;
/*
* Initially only use memory continuous from
* PHYS_OFFSET */
if (reg_start != phys_offset)
panic("First memory bank must be contiguous from PHYS_OFFSET");
mem_end = reg_end;
first = false;
} else {
/*
* memblock auto merges contiguous blocks, remove
* all blocks afterwards in one go (we can't remove
* blocks separately while iterating)
*/
pr_notice("Ignoring RAM after %pa, memory at %pa ignored\n",
&mem_end, ®_start);
memblock_remove(reg_start, 0 - reg_start);
break;
}
}
}
static int __init __mpu_max_regions(void)
{
static int max_regions;
u32 mpuir;
if (max_regions)
return max_regions;
mpuir = read_cpuid_mputype();
max_regions = (mpuir & MPUIR_DREGION_SZMASK) >> MPUIR_DREGION;
return max_regions;
}
static int __init __pmsav8_setup_region(unsigned int number, u32 bar, u32 lar)
{
if (number > mpu_max_regions
|| number >= MPU_MAX_REGIONS)
return -ENOENT;
dsb();
prsel_write(number);
isb();
prbar_write(bar);
prlar_write(lar);
mpu_rgn_info.rgns[number].prbar = bar;
mpu_rgn_info.rgns[number].prlar = lar;
mpu_rgn_info.used++;
return 0;
}
static int __init pmsav8_setup_ram(unsigned int number, phys_addr_t start,phys_addr_t end)
{
u32 bar, lar;
if (is_region_fixed(number))
return -EINVAL;
bar = start;
lar = (end - 1) & ~(PMSAv8_MINALIGN - 1);
bar |= PMSAv8_AP_PL1RW_PL0RW | PMSAv8_RGN_SHARED;
lar |= PMSAv8_LAR_IDX(PMSAv8_RGN_NORMAL) | PMSAv8_LAR_EN;
return __pmsav8_setup_region(number, bar, lar);
}
static int __init pmsav8_setup_io(unsigned int number, phys_addr_t start,phys_addr_t end)
{
u32 bar, lar;
if (is_region_fixed(number))
return -EINVAL;
bar = start;
lar = (end - 1) & ~(PMSAv8_MINALIGN - 1);
bar |= PMSAv8_AP_PL1RW_PL0RW | PMSAv8_RGN_SHARED | PMSAv8_BAR_XN;
lar |= PMSAv8_LAR_IDX(PMSAv8_RGN_DEVICE_nGnRnE) | PMSAv8_LAR_EN;
return __pmsav8_setup_region(number, bar, lar);
}
static int __init pmsav8_setup_fixed(unsigned int number, phys_addr_t start,phys_addr_t end)
{
u32 bar, lar;
if (!is_region_fixed(number))
return -EINVAL;
bar = start;
lar = (end - 1) & ~(PMSAv8_MINALIGN - 1);
bar |= PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED;
lar |= PMSAv8_LAR_IDX(PMSAv8_RGN_NORMAL) | PMSAv8_LAR_EN;
prsel_write(number);
isb();
if (prbar_read() != bar || prlar_read() != lar)
return -EINVAL;
/* Reserved region was set up early, we just need a record for secondaries */
mpu_rgn_info.rgns[number].prbar = bar;
mpu_rgn_info.rgns[number].prlar = lar;
mpu_rgn_info.used++;
return 0;
}
#ifndef CONFIG_CPU_V7M
static int __init pmsav8_setup_vector(unsigned int number, phys_addr_t start,phys_addr_t end)
{
u32 bar, lar;
if (number == PMSAv8_KERNEL_REGION)
return -EINVAL;
bar = start;
lar = (end - 1) & ~(PMSAv8_MINALIGN - 1);
bar |= PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED;
lar |= PMSAv8_LAR_IDX(PMSAv8_RGN_NORMAL) | PMSAv8_LAR_EN;
return __pmsav8_setup_region(number, bar, lar);
}
#endif
void __init pmsav8_setup(void)
{
int i, err = 0;
int region = PMSAv8_KERNEL_REGION;
/* How many regions are supported ? */
mpu_max_regions = __mpu_max_regions();
/* RAM: single chunk of memory */
add_range(mem, ARRAY_SIZE(mem), 0, memblock.memory.regions[0].base,
memblock.memory.regions[0].base + memblock.memory.regions[0].size);
/* IO: cover full 4G range */
add_range(io, ARRAY_SIZE(io), 0, 0, 0xffffffff);
/* RAM and IO: exclude kernel */
subtract_range(mem, ARRAY_SIZE(mem), __pa(KERNEL_START), __pa(KERNEL_END));
subtract_range(io, ARRAY_SIZE(io), __pa(KERNEL_START), __pa(KERNEL_END));
#ifdef CONFIG_XIP_KERNEL
/* RAM and IO: exclude xip */
subtract_range(mem, ARRAY_SIZE(mem), CONFIG_XIP_PHYS_ADDR, __pa(_exiprom));
subtract_range(io, ARRAY_SIZE(io), CONFIG_XIP_PHYS_ADDR, __pa(_exiprom));
#endif
#ifndef CONFIG_CPU_V7M
/* RAM and IO: exclude vectors */
subtract_range(mem, ARRAY_SIZE(mem), vectors_base, vectors_base + 2 * PAGE_SIZE);
subtract_range(io, ARRAY_SIZE(io), vectors_base, vectors_base + 2 * PAGE_SIZE);
#endif
/* IO: exclude RAM */
for (i = 0; i < ARRAY_SIZE(mem); i++)
subtract_range(io, ARRAY_SIZE(io), mem[i].start, mem[i].end);
/* Now program MPU */
#ifdef CONFIG_XIP_KERNEL
/* ROM */
err |= pmsav8_setup_fixed(PMSAv8_XIP_REGION, CONFIG_XIP_PHYS_ADDR, __pa(_exiprom));
#endif
/* Kernel */
err |= pmsav8_setup_fixed(region++, __pa(KERNEL_START), __pa(KERNEL_END));
/* IO */
for (i = 0; i < ARRAY_SIZE(io); i++) {
if (!io[i].end)
continue;
err |= pmsav8_setup_io(region++, io[i].start, io[i].end);
}
/* RAM */
for (i = 0; i < ARRAY_SIZE(mem); i++) {
if (!mem[i].end)
continue;
err |= pmsav8_setup_ram(region++, mem[i].start, mem[i].end);
}
/* Vectors */
#ifndef CONFIG_CPU_V7M
err |= pmsav8_setup_vector(region++, vectors_base, vectors_base + 2 * PAGE_SIZE);
#endif
if (err)
pr_warn("MPU region initialization failure! %d", err);
else
pr_info("Using ARM PMSAv8 Compliant MPU. Used %d of %d regions\n",
mpu_rgn_info.used, mpu_max_regions);
}
| linux-master | arch/arm/mm/pmsa-v8.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/arch/arm/mm/mmu.c
*
* Copyright (C) 1995-2005 Russell King
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/mman.h>
#include <linux/nodemask.h>
#include <linux/memblock.h>
#include <linux/fs.h>
#include <linux/vmalloc.h>
#include <linux/sizes.h>
#include <asm/cp15.h>
#include <asm/cputype.h>
#include <asm/cachetype.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/smp_plat.h>
#include <asm/tcm.h>
#include <asm/tlb.h>
#include <asm/highmem.h>
#include <asm/system_info.h>
#include <asm/traps.h>
#include <asm/procinfo.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/kasan_def.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/mach/pci.h>
#include <asm/fixmap.h>
#include "fault.h"
#include "mm.h"
extern unsigned long __atags_pointer;
/*
* empty_zero_page is a special page that is used for
* zero-initialized data and COW.
*/
struct page *empty_zero_page;
EXPORT_SYMBOL(empty_zero_page);
/*
* The pmd table for the upper-most set of pages.
*/
pmd_t *top_pmd;
pmdval_t user_pmd_table = _PAGE_USER_TABLE;
#define CPOLICY_UNCACHED 0
#define CPOLICY_BUFFERED 1
#define CPOLICY_WRITETHROUGH 2
#define CPOLICY_WRITEBACK 3
#define CPOLICY_WRITEALLOC 4
static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK;
static unsigned int ecc_mask __initdata = 0;
pgprot_t pgprot_user;
pgprot_t pgprot_kernel;
EXPORT_SYMBOL(pgprot_user);
EXPORT_SYMBOL(pgprot_kernel);
struct cachepolicy {
const char policy[16];
unsigned int cr_mask;
pmdval_t pmd;
pteval_t pte;
};
static struct cachepolicy cache_policies[] __initdata = {
{
.policy = "uncached",
.cr_mask = CR_W|CR_C,
.pmd = PMD_SECT_UNCACHED,
.pte = L_PTE_MT_UNCACHED,
}, {
.policy = "buffered",
.cr_mask = CR_C,
.pmd = PMD_SECT_BUFFERED,
.pte = L_PTE_MT_BUFFERABLE,
}, {
.policy = "writethrough",
.cr_mask = 0,
.pmd = PMD_SECT_WT,
.pte = L_PTE_MT_WRITETHROUGH,
}, {
.policy = "writeback",
.cr_mask = 0,
.pmd = PMD_SECT_WB,
.pte = L_PTE_MT_WRITEBACK,
}, {
.policy = "writealloc",
.cr_mask = 0,
.pmd = PMD_SECT_WBWA,
.pte = L_PTE_MT_WRITEALLOC,
}
};
#ifdef CONFIG_CPU_CP15
static unsigned long initial_pmd_value __initdata = 0;
/*
* Initialise the cache_policy variable with the initial state specified
* via the "pmd" value. This is used to ensure that on ARMv6 and later,
* the C code sets the page tables up with the same policy as the head
* assembly code, which avoids an illegal state where the TLBs can get
* confused. See comments in early_cachepolicy() for more information.
*/
void __init init_default_cache_policy(unsigned long pmd)
{
int i;
initial_pmd_value = pmd;
pmd &= PMD_SECT_CACHE_MASK;
for (i = 0; i < ARRAY_SIZE(cache_policies); i++)
if (cache_policies[i].pmd == pmd) {
cachepolicy = i;
break;
}
if (i == ARRAY_SIZE(cache_policies))
pr_err("ERROR: could not find cache policy\n");
}
/*
* These are useful for identifying cache coherency problems by allowing
* the cache or the cache and writebuffer to be turned off. (Note: the
* write buffer should not be on and the cache off).
*/
static int __init early_cachepolicy(char *p)
{
int i, selected = -1;
for (i = 0; i < ARRAY_SIZE(cache_policies); i++) {
int len = strlen(cache_policies[i].policy);
if (memcmp(p, cache_policies[i].policy, len) == 0) {
selected = i;
break;
}
}
if (selected == -1)
pr_err("ERROR: unknown or unsupported cache policy\n");
/*
* This restriction is partly to do with the way we boot; it is
* unpredictable to have memory mapped using two different sets of
* memory attributes (shared, type, and cache attribs). We can not
* change these attributes once the initial assembly has setup the
* page tables.
*/
if (cpu_architecture() >= CPU_ARCH_ARMv6 && selected != cachepolicy) {
pr_warn("Only cachepolicy=%s supported on ARMv6 and later\n",
cache_policies[cachepolicy].policy);
return 0;
}
if (selected != cachepolicy) {
unsigned long cr = __clear_cr(cache_policies[selected].cr_mask);
cachepolicy = selected;
flush_cache_all();
set_cr(cr);
}
return 0;
}
early_param("cachepolicy", early_cachepolicy);
static int __init early_nocache(char *__unused)
{
char *p = "buffered";
pr_warn("nocache is deprecated; use cachepolicy=%s\n", p);
early_cachepolicy(p);
return 0;
}
early_param("nocache", early_nocache);
static int __init early_nowrite(char *__unused)
{
char *p = "uncached";
pr_warn("nowb is deprecated; use cachepolicy=%s\n", p);
early_cachepolicy(p);
return 0;
}
early_param("nowb", early_nowrite);
#ifndef CONFIG_ARM_LPAE
static int __init early_ecc(char *p)
{
if (memcmp(p, "on", 2) == 0)
ecc_mask = PMD_PROTECTION;
else if (memcmp(p, "off", 3) == 0)
ecc_mask = 0;
return 0;
}
early_param("ecc", early_ecc);
#endif
#else /* ifdef CONFIG_CPU_CP15 */
static int __init early_cachepolicy(char *p)
{
pr_warn("cachepolicy kernel parameter not supported without cp15\n");
return 0;
}
early_param("cachepolicy", early_cachepolicy);
static int __init noalign_setup(char *__unused)
{
pr_warn("noalign kernel parameter not supported without cp15\n");
return 1;
}
__setup("noalign", noalign_setup);
#endif /* ifdef CONFIG_CPU_CP15 / else */
#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
#define PROT_PTE_S2_DEVICE PROT_PTE_DEVICE
#define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
static struct mem_type mem_types[] __ro_after_init = {
[MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
.prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
L_PTE_SHARED,
.prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PROT_SECT_DEVICE | PMD_SECT_S,
.domain = DOMAIN_IO,
},
[MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */
.prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_NONSHARED,
.prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PROT_SECT_DEVICE,
.domain = DOMAIN_IO,
},
[MT_DEVICE_CACHED] = { /* ioremap_cache */
.prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_CACHED,
.prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB,
.domain = DOMAIN_IO,
},
[MT_DEVICE_WC] = { /* ioremap_wc */
.prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC,
.prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PROT_SECT_DEVICE,
.domain = DOMAIN_IO,
},
[MT_UNCACHED] = {
.prot_pte = PROT_PTE_DEVICE,
.prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
.domain = DOMAIN_IO,
},
[MT_CACHECLEAN] = {
.prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
.domain = DOMAIN_KERNEL,
},
#ifndef CONFIG_ARM_LPAE
[MT_MINICLEAN] = {
.prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
.domain = DOMAIN_KERNEL,
},
#endif
[MT_LOW_VECTORS] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_RDONLY,
.prot_l1 = PMD_TYPE_TABLE,
.domain = DOMAIN_VECTORS,
},
[MT_HIGH_VECTORS] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_USER | L_PTE_RDONLY,
.prot_l1 = PMD_TYPE_TABLE,
.domain = DOMAIN_VECTORS,
},
[MT_MEMORY_RWX] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
.prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
.domain = DOMAIN_KERNEL,
},
[MT_MEMORY_RW] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_XN,
.prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
.domain = DOMAIN_KERNEL,
},
[MT_MEMORY_RO] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_XN | L_PTE_RDONLY,
.prot_l1 = PMD_TYPE_TABLE,
#ifdef CONFIG_ARM_LPAE
.prot_sect = PMD_TYPE_SECT | L_PMD_SECT_RDONLY | PMD_SECT_AP2,
#else
.prot_sect = PMD_TYPE_SECT,
#endif
.domain = DOMAIN_KERNEL,
},
[MT_ROM] = {
.prot_sect = PMD_TYPE_SECT,
.domain = DOMAIN_KERNEL,
},
[MT_MEMORY_RWX_NONCACHED] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_MT_BUFFERABLE,
.prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
.domain = DOMAIN_KERNEL,
},
[MT_MEMORY_RW_DTCM] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_XN,
.prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
.domain = DOMAIN_KERNEL,
},
[MT_MEMORY_RWX_ITCM] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
.prot_l1 = PMD_TYPE_TABLE,
.domain = DOMAIN_KERNEL,
},
[MT_MEMORY_RW_SO] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_MT_UNCACHED | L_PTE_XN,
.prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S |
PMD_SECT_UNCACHED | PMD_SECT_XN,
.domain = DOMAIN_KERNEL,
},
[MT_MEMORY_DMA_READY] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_XN,
.prot_l1 = PMD_TYPE_TABLE,
.domain = DOMAIN_KERNEL,
},
};
const struct mem_type *get_mem_type(unsigned int type)
{
return type < ARRAY_SIZE(mem_types) ? &mem_types[type] : NULL;
}
EXPORT_SYMBOL(get_mem_type);
static pte_t *(*pte_offset_fixmap)(pmd_t *dir, unsigned long addr);
static pte_t bm_pte[PTRS_PER_PTE + PTE_HWTABLE_PTRS]
__aligned(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE) __initdata;
static pte_t * __init pte_offset_early_fixmap(pmd_t *dir, unsigned long addr)
{
return &bm_pte[pte_index(addr)];
}
static pte_t *pte_offset_late_fixmap(pmd_t *dir, unsigned long addr)
{
return pte_offset_kernel(dir, addr);
}
static inline pmd_t * __init fixmap_pmd(unsigned long addr)
{
return pmd_off_k(addr);
}
void __init early_fixmap_init(void)
{
pmd_t *pmd;
/*
* The early fixmap range spans multiple pmds, for which
* we are not prepared:
*/
BUILD_BUG_ON((__fix_to_virt(__end_of_early_ioremap_region) >> PMD_SHIFT)
!= FIXADDR_TOP >> PMD_SHIFT);
pmd = fixmap_pmd(FIXADDR_TOP);
pmd_populate_kernel(&init_mm, pmd, bm_pte);
pte_offset_fixmap = pte_offset_early_fixmap;
}
/*
* To avoid TLB flush broadcasts, this uses local_flush_tlb_kernel_range().
* As a result, this can only be called with preemption disabled, as under
* stop_machine().
*/
void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
{
unsigned long vaddr = __fix_to_virt(idx);
pte_t *pte = pte_offset_fixmap(pmd_off_k(vaddr), vaddr);
/* Make sure fixmap region does not exceed available allocation. */
BUILD_BUG_ON(__fix_to_virt(__end_of_fixed_addresses) < FIXADDR_START);
BUG_ON(idx >= __end_of_fixed_addresses);
/* We support only device mappings before pgprot_kernel is set. */
if (WARN_ON(pgprot_val(prot) != pgprot_val(FIXMAP_PAGE_IO) &&
pgprot_val(prot) && pgprot_val(pgprot_kernel) == 0))
return;
if (pgprot_val(prot))
set_pte_at(NULL, vaddr, pte,
pfn_pte(phys >> PAGE_SHIFT, prot));
else
pte_clear(NULL, vaddr, pte);
local_flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE);
}
static pgprot_t protection_map[16] __ro_after_init = {
[VM_NONE] = __PAGE_NONE,
[VM_READ] = __PAGE_READONLY,
[VM_WRITE] = __PAGE_COPY,
[VM_WRITE | VM_READ] = __PAGE_COPY,
[VM_EXEC] = __PAGE_READONLY_EXEC,
[VM_EXEC | VM_READ] = __PAGE_READONLY_EXEC,
[VM_EXEC | VM_WRITE] = __PAGE_COPY_EXEC,
[VM_EXEC | VM_WRITE | VM_READ] = __PAGE_COPY_EXEC,
[VM_SHARED] = __PAGE_NONE,
[VM_SHARED | VM_READ] = __PAGE_READONLY,
[VM_SHARED | VM_WRITE] = __PAGE_SHARED,
[VM_SHARED | VM_WRITE | VM_READ] = __PAGE_SHARED,
[VM_SHARED | VM_EXEC] = __PAGE_READONLY_EXEC,
[VM_SHARED | VM_EXEC | VM_READ] = __PAGE_READONLY_EXEC,
[VM_SHARED | VM_EXEC | VM_WRITE] = __PAGE_SHARED_EXEC,
[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = __PAGE_SHARED_EXEC
};
DECLARE_VM_GET_PAGE_PROT
/*
* Adjust the PMD section entries according to the CPU in use.
*/
static void __init build_mem_type_table(void)
{
struct cachepolicy *cp;
unsigned int cr = get_cr();
pteval_t user_pgprot, kern_pgprot, vecs_pgprot;
int cpu_arch = cpu_architecture();
int i;
if (cpu_arch < CPU_ARCH_ARMv6) {
#if defined(CONFIG_CPU_DCACHE_DISABLE)
if (cachepolicy > CPOLICY_BUFFERED)
cachepolicy = CPOLICY_BUFFERED;
#elif defined(CONFIG_CPU_DCACHE_WRITETHROUGH)
if (cachepolicy > CPOLICY_WRITETHROUGH)
cachepolicy = CPOLICY_WRITETHROUGH;
#endif
}
if (cpu_arch < CPU_ARCH_ARMv5) {
if (cachepolicy >= CPOLICY_WRITEALLOC)
cachepolicy = CPOLICY_WRITEBACK;
ecc_mask = 0;
}
if (is_smp()) {
if (cachepolicy != CPOLICY_WRITEALLOC) {
pr_warn("Forcing write-allocate cache policy for SMP\n");
cachepolicy = CPOLICY_WRITEALLOC;
}
if (!(initial_pmd_value & PMD_SECT_S)) {
pr_warn("Forcing shared mappings for SMP\n");
initial_pmd_value |= PMD_SECT_S;
}
}
/*
* Strip out features not present on earlier architectures.
* Pre-ARMv5 CPUs don't have TEX bits. Pre-ARMv6 CPUs or those
* without extended page tables don't have the 'Shared' bit.
*/
if (cpu_arch < CPU_ARCH_ARMv5)
for (i = 0; i < ARRAY_SIZE(mem_types); i++)
mem_types[i].prot_sect &= ~PMD_SECT_TEX(7);
if ((cpu_arch < CPU_ARCH_ARMv6 || !(cr & CR_XP)) && !cpu_is_xsc3())
for (i = 0; i < ARRAY_SIZE(mem_types); i++)
mem_types[i].prot_sect &= ~PMD_SECT_S;
/*
* ARMv5 and lower, bit 4 must be set for page tables (was: cache
* "update-able on write" bit on ARM610). However, Xscale and
* Xscale3 require this bit to be cleared.
*/
if (cpu_is_xscale_family()) {
for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
mem_types[i].prot_sect &= ~PMD_BIT4;
mem_types[i].prot_l1 &= ~PMD_BIT4;
}
} else if (cpu_arch < CPU_ARCH_ARMv6) {
for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
if (mem_types[i].prot_l1)
mem_types[i].prot_l1 |= PMD_BIT4;
if (mem_types[i].prot_sect)
mem_types[i].prot_sect |= PMD_BIT4;
}
}
/*
* Mark the device areas according to the CPU/architecture.
*/
if (cpu_is_xsc3() || (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP))) {
if (!cpu_is_xsc3()) {
/*
* Mark device regions on ARMv6+ as execute-never
* to prevent speculative instruction fetches.
*/
mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN;
mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
/* Also setup NX memory mapping */
mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_XN;
mem_types[MT_MEMORY_RO].prot_sect |= PMD_SECT_XN;
}
if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
/*
* For ARMv7 with TEX remapping,
* - shared device is SXCB=1100
* - nonshared device is SXCB=0100
* - write combine device mem is SXCB=0001
* (Uncached Normal memory)
*/
mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1);
mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(1);
mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
} else if (cpu_is_xsc3()) {
/*
* For Xscale3,
* - shared device is TEXCB=00101
* - nonshared device is TEXCB=01000
* - write combine device mem is TEXCB=00100
* (Inner/Outer Uncacheable in xsc3 parlance)
*/
mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1) | PMD_SECT_BUFFERED;
mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
} else {
/*
* For ARMv6 and ARMv7 without TEX remapping,
* - shared device is TEXCB=00001
* - nonshared device is TEXCB=01000
* - write combine device mem is TEXCB=00100
* (Uncached Normal in ARMv6 parlance).
*/
mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED;
mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
}
} else {
/*
* On others, write combining is "Uncached/Buffered"
*/
mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
}
/*
* Now deal with the memory-type mappings
*/
cp = &cache_policies[cachepolicy];
vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
#ifndef CONFIG_ARM_LPAE
/*
* We don't use domains on ARMv6 (since this causes problems with
* v6/v7 kernels), so we must use a separate memory type for user
* r/o, kernel r/w to map the vectors page.
*/
if (cpu_arch == CPU_ARCH_ARMv6)
vecs_pgprot |= L_PTE_MT_VECTORS;
/*
* Check is it with support for the PXN bit
* in the Short-descriptor translation table format descriptors.
*/
if (cpu_arch == CPU_ARCH_ARMv7 &&
(read_cpuid_ext(CPUID_EXT_MMFR0) & 0xF) >= 4) {
user_pmd_table |= PMD_PXNTABLE;
}
#endif
/*
* ARMv6 and above have extended page tables.
*/
if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
#ifndef CONFIG_ARM_LPAE
/*
* Mark cache clean areas and XIP ROM read only
* from SVC mode and no access from userspace.
*/
mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
mem_types[MT_MEMORY_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
#endif
/*
* If the initial page tables were created with the S bit
* set, then we need to do the same here for the same
* reasons given in early_cachepolicy().
*/
if (initial_pmd_value & PMD_SECT_S) {
user_pgprot |= L_PTE_SHARED;
kern_pgprot |= L_PTE_SHARED;
vecs_pgprot |= L_PTE_SHARED;
mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S;
mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
mem_types[MT_MEMORY_RO].prot_sect |= PMD_SECT_S;
mem_types[MT_MEMORY_RO].prot_pte |= L_PTE_SHARED;
mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_S;
mem_types[MT_MEMORY_RWX_NONCACHED].prot_pte |= L_PTE_SHARED;
}
}
/*
* Non-cacheable Normal - intended for memory areas that must
* not cause dirty cache line writebacks when used
*/
if (cpu_arch >= CPU_ARCH_ARMv6) {
if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
/* Non-cacheable Normal is XCB = 001 */
mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
PMD_SECT_BUFFERED;
} else {
/* For both ARMv6 and non-TEX-remapping ARMv7 */
mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
PMD_SECT_TEX(1);
}
} else {
mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
}
#ifdef CONFIG_ARM_LPAE
/*
* Do not generate access flag faults for the kernel mappings.
*/
for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
mem_types[i].prot_pte |= PTE_EXT_AF;
if (mem_types[i].prot_sect)
mem_types[i].prot_sect |= PMD_SECT_AF;
}
kern_pgprot |= PTE_EXT_AF;
vecs_pgprot |= PTE_EXT_AF;
/*
* Set PXN for user mappings
*/
user_pgprot |= PTE_EXT_PXN;
#endif
for (i = 0; i < 16; i++) {
pteval_t v = pgprot_val(protection_map[i]);
protection_map[i] = __pgprot(v | user_pgprot);
}
mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot;
mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot;
pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot);
pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
L_PTE_DIRTY | kern_pgprot);
mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
mem_types[MT_MEMORY_RO].prot_sect |= ecc_mask | cp->pmd;
mem_types[MT_MEMORY_RO].prot_pte |= kern_pgprot;
mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask;
mem_types[MT_ROM].prot_sect |= cp->pmd;
switch (cp->pmd) {
case PMD_SECT_WT:
mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
break;
case PMD_SECT_WB:
case PMD_SECT_WBWA:
mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
break;
}
pr_info("Memory policy: %sData cache %s\n",
ecc_mask ? "ECC enabled, " : "", cp->policy);
for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
struct mem_type *t = &mem_types[i];
if (t->prot_l1)
t->prot_l1 |= PMD_DOMAIN(t->domain);
if (t->prot_sect)
t->prot_sect |= PMD_DOMAIN(t->domain);
}
}
#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot)
{
if (!pfn_valid(pfn))
return pgprot_noncached(vma_prot);
else if (file->f_flags & O_SYNC)
return pgprot_writecombine(vma_prot);
return vma_prot;
}
EXPORT_SYMBOL(phys_mem_access_prot);
#endif
#define vectors_base() (vectors_high() ? 0xffff0000 : 0)
static void __init *early_alloc(unsigned long sz)
{
void *ptr = memblock_alloc(sz, sz);
if (!ptr)
panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
__func__, sz, sz);
return ptr;
}
static void *__init late_alloc(unsigned long sz)
{
void *ptdesc = pagetable_alloc(GFP_PGTABLE_KERNEL & ~__GFP_HIGHMEM,
get_order(sz));
if (!ptdesc || !pagetable_pte_ctor(ptdesc))
BUG();
return ptdesc_to_virt(ptdesc);
}
static pte_t * __init arm_pte_alloc(pmd_t *pmd, unsigned long addr,
unsigned long prot,
void *(*alloc)(unsigned long sz))
{
if (pmd_none(*pmd)) {
pte_t *pte = alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE);
__pmd_populate(pmd, __pa(pte), prot);
}
BUG_ON(pmd_bad(*pmd));
return pte_offset_kernel(pmd, addr);
}
static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr,
unsigned long prot)
{
return arm_pte_alloc(pmd, addr, prot, early_alloc);
}
static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
unsigned long end, unsigned long pfn,
const struct mem_type *type,
void *(*alloc)(unsigned long sz),
bool ng)
{
pte_t *pte = arm_pte_alloc(pmd, addr, type->prot_l1, alloc);
do {
set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)),
ng ? PTE_EXT_NG : 0);
pfn++;
} while (pte++, addr += PAGE_SIZE, addr != end);
}
static void __init __map_init_section(pmd_t *pmd, unsigned long addr,
unsigned long end, phys_addr_t phys,
const struct mem_type *type, bool ng)
{
pmd_t *p = pmd;
#ifndef CONFIG_ARM_LPAE
/*
* In classic MMU format, puds and pmds are folded in to
* the pgds. pmd_offset gives the PGD entry. PGDs refer to a
* group of L1 entries making up one logical pointer to
* an L2 table (2MB), where as PMDs refer to the individual
* L1 entries (1MB). Hence increment to get the correct
* offset for odd 1MB sections.
* (See arch/arm/include/asm/pgtable-2level.h)
*/
if (addr & SECTION_SIZE)
pmd++;
#endif
do {
*pmd = __pmd(phys | type->prot_sect | (ng ? PMD_SECT_nG : 0));
phys += SECTION_SIZE;
} while (pmd++, addr += SECTION_SIZE, addr != end);
flush_pmd_entry(p);
}
static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
unsigned long end, phys_addr_t phys,
const struct mem_type *type,
void *(*alloc)(unsigned long sz), bool ng)
{
pmd_t *pmd = pmd_offset(pud, addr);
unsigned long next;
do {
/*
* With LPAE, we must loop over to map
* all the pmds for the given range.
*/
next = pmd_addr_end(addr, end);
/*
* Try a section mapping - addr, next and phys must all be
* aligned to a section boundary.
*/
if (type->prot_sect &&
((addr | next | phys) & ~SECTION_MASK) == 0) {
__map_init_section(pmd, addr, next, phys, type, ng);
} else {
alloc_init_pte(pmd, addr, next,
__phys_to_pfn(phys), type, alloc, ng);
}
phys += next - addr;
} while (pmd++, addr = next, addr != end);
}
static void __init alloc_init_pud(p4d_t *p4d, unsigned long addr,
unsigned long end, phys_addr_t phys,
const struct mem_type *type,
void *(*alloc)(unsigned long sz), bool ng)
{
pud_t *pud = pud_offset(p4d, addr);
unsigned long next;
do {
next = pud_addr_end(addr, end);
alloc_init_pmd(pud, addr, next, phys, type, alloc, ng);
phys += next - addr;
} while (pud++, addr = next, addr != end);
}
static void __init alloc_init_p4d(pgd_t *pgd, unsigned long addr,
unsigned long end, phys_addr_t phys,
const struct mem_type *type,
void *(*alloc)(unsigned long sz), bool ng)
{
p4d_t *p4d = p4d_offset(pgd, addr);
unsigned long next;
do {
next = p4d_addr_end(addr, end);
alloc_init_pud(p4d, addr, next, phys, type, alloc, ng);
phys += next - addr;
} while (p4d++, addr = next, addr != end);
}
#ifndef CONFIG_ARM_LPAE
static void __init create_36bit_mapping(struct mm_struct *mm,
struct map_desc *md,
const struct mem_type *type,
bool ng)
{
unsigned long addr, length, end;
phys_addr_t phys;
pgd_t *pgd;
addr = md->virtual;
phys = __pfn_to_phys(md->pfn);
length = PAGE_ALIGN(md->length);
if (!(cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())) {
pr_err("MM: CPU does not support supersection mapping for 0x%08llx at 0x%08lx\n",
(long long)__pfn_to_phys((u64)md->pfn), addr);
return;
}
/* N.B. ARMv6 supersections are only defined to work with domain 0.
* Since domain assignments can in fact be arbitrary, the
* 'domain == 0' check below is required to insure that ARMv6
* supersections are only allocated for domain 0 regardless
* of the actual domain assignments in use.
*/
if (type->domain) {
pr_err("MM: invalid domain in supersection mapping for 0x%08llx at 0x%08lx\n",
(long long)__pfn_to_phys((u64)md->pfn), addr);
return;
}
if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) {
pr_err("MM: cannot create mapping for 0x%08llx at 0x%08lx invalid alignment\n",
(long long)__pfn_to_phys((u64)md->pfn), addr);
return;
}
/*
* Shift bits [35:32] of address into bits [23:20] of PMD
* (See ARMv6 spec).
*/
phys |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20);
pgd = pgd_offset(mm, addr);
end = addr + length;
do {
p4d_t *p4d = p4d_offset(pgd, addr);
pud_t *pud = pud_offset(p4d, addr);
pmd_t *pmd = pmd_offset(pud, addr);
int i;
for (i = 0; i < 16; i++)
*pmd++ = __pmd(phys | type->prot_sect | PMD_SECT_SUPER |
(ng ? PMD_SECT_nG : 0));
addr += SUPERSECTION_SIZE;
phys += SUPERSECTION_SIZE;
pgd += SUPERSECTION_SIZE >> PGDIR_SHIFT;
} while (addr != end);
}
#endif /* !CONFIG_ARM_LPAE */
static void __init __create_mapping(struct mm_struct *mm, struct map_desc *md,
void *(*alloc)(unsigned long sz),
bool ng)
{
unsigned long addr, length, end;
phys_addr_t phys;
const struct mem_type *type;
pgd_t *pgd;
type = &mem_types[md->type];
#ifndef CONFIG_ARM_LPAE
/*
* Catch 36-bit addresses
*/
if (md->pfn >= 0x100000) {
create_36bit_mapping(mm, md, type, ng);
return;
}
#endif
addr = md->virtual & PAGE_MASK;
phys = __pfn_to_phys(md->pfn);
length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) {
pr_warn("BUG: map for 0x%08llx at 0x%08lx can not be mapped using pages, ignoring.\n",
(long long)__pfn_to_phys(md->pfn), addr);
return;
}
pgd = pgd_offset(mm, addr);
end = addr + length;
do {
unsigned long next = pgd_addr_end(addr, end);
alloc_init_p4d(pgd, addr, next, phys, type, alloc, ng);
phys += next - addr;
addr = next;
} while (pgd++, addr != end);
}
/*
* Create the page directory entries and any necessary
* page tables for the mapping specified by `md'. We
* are able to cope here with varying sizes and address
* offsets, and we take full advantage of sections and
* supersections.
*/
static void __init create_mapping(struct map_desc *md)
{
if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
pr_warn("BUG: not creating mapping for 0x%08llx at 0x%08lx in user region\n",
(long long)__pfn_to_phys((u64)md->pfn), md->virtual);
return;
}
if (md->type == MT_DEVICE &&
md->virtual >= PAGE_OFFSET && md->virtual < FIXADDR_START &&
(md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n",
(long long)__pfn_to_phys((u64)md->pfn), md->virtual);
}
__create_mapping(&init_mm, md, early_alloc, false);
}
void __init create_mapping_late(struct mm_struct *mm, struct map_desc *md,
bool ng)
{
#ifdef CONFIG_ARM_LPAE
p4d_t *p4d;
pud_t *pud;
p4d = p4d_alloc(mm, pgd_offset(mm, md->virtual), md->virtual);
if (WARN_ON(!p4d))
return;
pud = pud_alloc(mm, p4d, md->virtual);
if (WARN_ON(!pud))
return;
pmd_alloc(mm, pud, 0);
#endif
__create_mapping(mm, md, late_alloc, ng);
}
/*
* Create the architecture specific mappings
*/
void __init iotable_init(struct map_desc *io_desc, int nr)
{
struct map_desc *md;
struct vm_struct *vm;
struct static_vm *svm;
if (!nr)
return;
svm = memblock_alloc(sizeof(*svm) * nr, __alignof__(*svm));
if (!svm)
panic("%s: Failed to allocate %zu bytes align=0x%zx\n",
__func__, sizeof(*svm) * nr, __alignof__(*svm));
for (md = io_desc; nr; md++, nr--) {
create_mapping(md);
vm = &svm->vm;
vm->addr = (void *)(md->virtual & PAGE_MASK);
vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
vm->phys_addr = __pfn_to_phys(md->pfn);
vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
vm->flags |= VM_ARM_MTYPE(md->type);
vm->caller = iotable_init;
add_static_vm_early(svm++);
}
}
void __init vm_reserve_area_early(unsigned long addr, unsigned long size,
void *caller)
{
struct vm_struct *vm;
struct static_vm *svm;
svm = memblock_alloc(sizeof(*svm), __alignof__(*svm));
if (!svm)
panic("%s: Failed to allocate %zu bytes align=0x%zx\n",
__func__, sizeof(*svm), __alignof__(*svm));
vm = &svm->vm;
vm->addr = (void *)addr;
vm->size = size;
vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING;
vm->caller = caller;
add_static_vm_early(svm);
}
#ifndef CONFIG_ARM_LPAE
/*
* The Linux PMD is made of two consecutive section entries covering 2MB
* (see definition in include/asm/pgtable-2level.h). However a call to
* create_mapping() may optimize static mappings by using individual
* 1MB section mappings. This leaves the actual PMD potentially half
* initialized if the top or bottom section entry isn't used, leaving it
* open to problems if a subsequent ioremap() or vmalloc() tries to use
* the virtual space left free by that unused section entry.
*
* Let's avoid the issue by inserting dummy vm entries covering the unused
* PMD halves once the static mappings are in place.
*/
static void __init pmd_empty_section_gap(unsigned long addr)
{
vm_reserve_area_early(addr, SECTION_SIZE, pmd_empty_section_gap);
}
static void __init fill_pmd_gaps(void)
{
struct static_vm *svm;
struct vm_struct *vm;
unsigned long addr, next = 0;
pmd_t *pmd;
list_for_each_entry(svm, &static_vmlist, list) {
vm = &svm->vm;
addr = (unsigned long)vm->addr;
if (addr < next)
continue;
/*
* Check if this vm starts on an odd section boundary.
* If so and the first section entry for this PMD is free
* then we block the corresponding virtual address.
*/
if ((addr & ~PMD_MASK) == SECTION_SIZE) {
pmd = pmd_off_k(addr);
if (pmd_none(*pmd))
pmd_empty_section_gap(addr & PMD_MASK);
}
/*
* Then check if this vm ends on an odd section boundary.
* If so and the second section entry for this PMD is empty
* then we block the corresponding virtual address.
*/
addr += vm->size;
if ((addr & ~PMD_MASK) == SECTION_SIZE) {
pmd = pmd_off_k(addr) + 1;
if (pmd_none(*pmd))
pmd_empty_section_gap(addr);
}
/* no need to look at any vm entry until we hit the next PMD */
next = (addr + PMD_SIZE - 1) & PMD_MASK;
}
}
#else
#define fill_pmd_gaps() do { } while (0)
#endif
#if defined(CONFIG_PCI) && !defined(CONFIG_NEED_MACH_IO_H)
static void __init pci_reserve_io(void)
{
struct static_vm *svm;
svm = find_static_vm_vaddr((void *)PCI_IO_VIRT_BASE);
if (svm)
return;
vm_reserve_area_early(PCI_IO_VIRT_BASE, SZ_2M, pci_reserve_io);
}
#else
#define pci_reserve_io() do { } while (0)
#endif
#ifdef CONFIG_DEBUG_LL
void __init debug_ll_io_init(void)
{
struct map_desc map;
debug_ll_addr(&map.pfn, &map.virtual);
if (!map.pfn || !map.virtual)
return;
map.pfn = __phys_to_pfn(map.pfn);
map.virtual &= PAGE_MASK;
map.length = PAGE_SIZE;
map.type = MT_DEVICE;
iotable_init(&map, 1);
}
#endif
static unsigned long __initdata vmalloc_size = 240 * SZ_1M;
/*
* vmalloc=size forces the vmalloc area to be exactly 'size'
* bytes. This can be used to increase (or decrease) the vmalloc
* area - the default is 240MiB.
*/
static int __init early_vmalloc(char *arg)
{
unsigned long vmalloc_reserve = memparse(arg, NULL);
unsigned long vmalloc_max;
if (vmalloc_reserve < SZ_16M) {
vmalloc_reserve = SZ_16M;
pr_warn("vmalloc area is too small, limiting to %luMiB\n",
vmalloc_reserve >> 20);
}
vmalloc_max = VMALLOC_END - (PAGE_OFFSET + SZ_32M + VMALLOC_OFFSET);
if (vmalloc_reserve > vmalloc_max) {
vmalloc_reserve = vmalloc_max;
pr_warn("vmalloc area is too big, limiting to %luMiB\n",
vmalloc_reserve >> 20);
}
vmalloc_size = vmalloc_reserve;
return 0;
}
early_param("vmalloc", early_vmalloc);
phys_addr_t arm_lowmem_limit __initdata = 0;
void __init adjust_lowmem_bounds(void)
{
phys_addr_t block_start, block_end, memblock_limit = 0;
u64 vmalloc_limit, i;
phys_addr_t lowmem_limit = 0;
/*
* Let's use our own (unoptimized) equivalent of __pa() that is
* not affected by wrap-arounds when sizeof(phys_addr_t) == 4.
* The result is used as the upper bound on physical memory address
* and may itself be outside the valid range for which phys_addr_t
* and therefore __pa() is defined.
*/
vmalloc_limit = (u64)VMALLOC_END - vmalloc_size - VMALLOC_OFFSET -
PAGE_OFFSET + PHYS_OFFSET;
/*
* The first usable region must be PMD aligned. Mark its start
* as MEMBLOCK_NOMAP if it isn't
*/
for_each_mem_range(i, &block_start, &block_end) {
if (!IS_ALIGNED(block_start, PMD_SIZE)) {
phys_addr_t len;
len = round_up(block_start, PMD_SIZE) - block_start;
memblock_mark_nomap(block_start, len);
}
break;
}
for_each_mem_range(i, &block_start, &block_end) {
if (block_start < vmalloc_limit) {
if (block_end > lowmem_limit)
/*
* Compare as u64 to ensure vmalloc_limit does
* not get truncated. block_end should always
* fit in phys_addr_t so there should be no
* issue with assignment.
*/
lowmem_limit = min_t(u64,
vmalloc_limit,
block_end);
/*
* Find the first non-pmd-aligned page, and point
* memblock_limit at it. This relies on rounding the
* limit down to be pmd-aligned, which happens at the
* end of this function.
*
* With this algorithm, the start or end of almost any
* bank can be non-pmd-aligned. The only exception is
* that the start of the bank 0 must be section-
* aligned, since otherwise memory would need to be
* allocated when mapping the start of bank 0, which
* occurs before any free memory is mapped.
*/
if (!memblock_limit) {
if (!IS_ALIGNED(block_start, PMD_SIZE))
memblock_limit = block_start;
else if (!IS_ALIGNED(block_end, PMD_SIZE))
memblock_limit = lowmem_limit;
}
}
}
arm_lowmem_limit = lowmem_limit;
high_memory = __va(arm_lowmem_limit - 1) + 1;
if (!memblock_limit)
memblock_limit = arm_lowmem_limit;
/*
* Round the memblock limit down to a pmd size. This
* helps to ensure that we will allocate memory from the
* last full pmd, which should be mapped.
*/
memblock_limit = round_down(memblock_limit, PMD_SIZE);
if (!IS_ENABLED(CONFIG_HIGHMEM) || cache_is_vipt_aliasing()) {
if (memblock_end_of_DRAM() > arm_lowmem_limit) {
phys_addr_t end = memblock_end_of_DRAM();
pr_notice("Ignoring RAM at %pa-%pa\n",
&memblock_limit, &end);
pr_notice("Consider using a HIGHMEM enabled kernel.\n");
memblock_remove(memblock_limit, end - memblock_limit);
}
}
memblock_set_current_limit(memblock_limit);
}
static __init void prepare_page_table(void)
{
unsigned long addr;
phys_addr_t end;
/*
* Clear out all the mappings below the kernel image.
*/
#ifdef CONFIG_KASAN
/*
* KASan's shadow memory inserts itself between the TASK_SIZE
* and MODULES_VADDR. Do not clear the KASan shadow memory mappings.
*/
for (addr = 0; addr < KASAN_SHADOW_START; addr += PMD_SIZE)
pmd_clear(pmd_off_k(addr));
/*
* Skip over the KASan shadow area. KASAN_SHADOW_END is sometimes
* equal to MODULES_VADDR and then we exit the pmd clearing. If we
* are using a thumb-compiled kernel, there there will be 8MB more
* to clear as KASan always offset to 16 MB below MODULES_VADDR.
*/
for (addr = KASAN_SHADOW_END; addr < MODULES_VADDR; addr += PMD_SIZE)
pmd_clear(pmd_off_k(addr));
#else
for (addr = 0; addr < MODULES_VADDR; addr += PMD_SIZE)
pmd_clear(pmd_off_k(addr));
#endif
#ifdef CONFIG_XIP_KERNEL
/* The XIP kernel is mapped in the module area -- skip over it */
addr = ((unsigned long)_exiprom + PMD_SIZE - 1) & PMD_MASK;
#endif
for ( ; addr < PAGE_OFFSET; addr += PMD_SIZE)
pmd_clear(pmd_off_k(addr));
/*
* Find the end of the first block of lowmem.
*/
end = memblock.memory.regions[0].base + memblock.memory.regions[0].size;
if (end >= arm_lowmem_limit)
end = arm_lowmem_limit;
/*
* Clear out all the kernel space mappings, except for the first
* memory bank, up to the vmalloc region.
*/
for (addr = __phys_to_virt(end);
addr < VMALLOC_START; addr += PMD_SIZE)
pmd_clear(pmd_off_k(addr));
}
#ifdef CONFIG_ARM_LPAE
/* the first page is reserved for pgd */
#define SWAPPER_PG_DIR_SIZE (PAGE_SIZE + \
PTRS_PER_PGD * PTRS_PER_PMD * sizeof(pmd_t))
#else
#define SWAPPER_PG_DIR_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
#endif
/*
* Reserve the special regions of memory
*/
void __init arm_mm_memblock_reserve(void)
{
/*
* Reserve the page tables. These are already in use,
* and can only be in node 0.
*/
memblock_reserve(__pa(swapper_pg_dir), SWAPPER_PG_DIR_SIZE);
#ifdef CONFIG_SA1111
/*
* Because of the SA1111 DMA bug, we want to preserve our
* precious DMA-able memory...
*/
memblock_reserve(PHYS_OFFSET, __pa(swapper_pg_dir) - PHYS_OFFSET);
#endif
}
/*
* Set up the device mappings. Since we clear out the page tables for all
* mappings above VMALLOC_START, except early fixmap, we might remove debug
* device mappings. This means earlycon can be used to debug this function
* Any other function or debugging method which may touch any device _will_
* crash the kernel.
*/
static void __init devicemaps_init(const struct machine_desc *mdesc)
{
struct map_desc map;
unsigned long addr;
void *vectors;
/*
* Allocate the vector page early.
*/
vectors = early_alloc(PAGE_SIZE * 2);
early_trap_init(vectors);
/*
* Clear page table except top pmd used by early fixmaps
*/
for (addr = VMALLOC_START; addr < (FIXADDR_TOP & PMD_MASK); addr += PMD_SIZE)
pmd_clear(pmd_off_k(addr));
if (__atags_pointer) {
/* create a read-only mapping of the device tree */
map.pfn = __phys_to_pfn(__atags_pointer & SECTION_MASK);
map.virtual = FDT_FIXED_BASE;
map.length = FDT_FIXED_SIZE;
map.type = MT_MEMORY_RO;
create_mapping(&map);
}
/*
* Map the kernel if it is XIP.
* It is always first in the modulearea.
*/
#ifdef CONFIG_XIP_KERNEL
map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
map.virtual = MODULES_VADDR;
map.length = ((unsigned long)_exiprom - map.virtual + ~SECTION_MASK) & SECTION_MASK;
map.type = MT_ROM;
create_mapping(&map);
#endif
/*
* Map the cache flushing regions.
*/
#ifdef FLUSH_BASE
map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
map.virtual = FLUSH_BASE;
map.length = SZ_1M;
map.type = MT_CACHECLEAN;
create_mapping(&map);
#endif
#ifdef FLUSH_BASE_MINICACHE
map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
map.virtual = FLUSH_BASE_MINICACHE;
map.length = SZ_1M;
map.type = MT_MINICLEAN;
create_mapping(&map);
#endif
/*
* Create a mapping for the machine vectors at the high-vectors
* location (0xffff0000). If we aren't using high-vectors, also
* create a mapping at the low-vectors virtual address.
*/
map.pfn = __phys_to_pfn(virt_to_phys(vectors));
map.virtual = 0xffff0000;
map.length = PAGE_SIZE;
#ifdef CONFIG_KUSER_HELPERS
map.type = MT_HIGH_VECTORS;
#else
map.type = MT_LOW_VECTORS;
#endif
create_mapping(&map);
if (!vectors_high()) {
map.virtual = 0;
map.length = PAGE_SIZE * 2;
map.type = MT_LOW_VECTORS;
create_mapping(&map);
}
/* Now create a kernel read-only mapping */
map.pfn += 1;
map.virtual = 0xffff0000 + PAGE_SIZE;
map.length = PAGE_SIZE;
map.type = MT_LOW_VECTORS;
create_mapping(&map);
/*
* Ask the machine support to map in the statically mapped devices.
*/
if (mdesc->map_io)
mdesc->map_io();
else
debug_ll_io_init();
fill_pmd_gaps();
/* Reserve fixed i/o space in VMALLOC region */
pci_reserve_io();
/*
* Finally flush the caches and tlb to ensure that we're in a
* consistent state wrt the writebuffer. This also ensures that
* any write-allocated cache lines in the vector page are written
* back. After this point, we can start to touch devices again.
*/
local_flush_tlb_all();
flush_cache_all();
/* Enable asynchronous aborts */
early_abt_enable();
}
static void __init kmap_init(void)
{
#ifdef CONFIG_HIGHMEM
pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE),
PKMAP_BASE, _PAGE_KERNEL_TABLE);
#endif
early_pte_alloc(pmd_off_k(FIXADDR_START), FIXADDR_START,
_PAGE_KERNEL_TABLE);
}
static void __init map_lowmem(void)
{
phys_addr_t start, end;
u64 i;
/* Map all the lowmem memory banks. */
for_each_mem_range(i, &start, &end) {
struct map_desc map;
pr_debug("map lowmem start: 0x%08llx, end: 0x%08llx\n",
(long long)start, (long long)end);
if (end > arm_lowmem_limit)
end = arm_lowmem_limit;
if (start >= end)
break;
/*
* If our kernel image is in the VMALLOC area we need to remove
* the kernel physical memory from lowmem since the kernel will
* be mapped separately.
*
* The kernel will typically be at the very start of lowmem,
* but any placement relative to memory ranges is possible.
*
* If the memblock contains the kernel, we have to chisel out
* the kernel memory from it and map each part separately. We
* get 6 different theoretical cases:
*
* +--------+ +--------+
* +-- start --+ +--------+ | Kernel | | Kernel |
* | | | Kernel | | case 2 | | case 5 |
* | | | case 1 | +--------+ | | +--------+
* | Memory | +--------+ | | | Kernel |
* | range | +--------+ | | | case 6 |
* | | | Kernel | +--------+ | | +--------+
* | | | case 3 | | Kernel | | |
* +-- end ----+ +--------+ | case 4 | | |
* +--------+ +--------+
*/
/* Case 5: kernel covers range, don't map anything, should be rare */
if ((start > kernel_sec_start) && (end < kernel_sec_end))
break;
/* Cases where the kernel is starting inside the range */
if ((kernel_sec_start >= start) && (kernel_sec_start <= end)) {
/* Case 6: kernel is embedded in the range, we need two mappings */
if ((start < kernel_sec_start) && (end > kernel_sec_end)) {
/* Map memory below the kernel */
map.pfn = __phys_to_pfn(start);
map.virtual = __phys_to_virt(start);
map.length = kernel_sec_start - start;
map.type = MT_MEMORY_RW;
create_mapping(&map);
/* Map memory above the kernel */
map.pfn = __phys_to_pfn(kernel_sec_end);
map.virtual = __phys_to_virt(kernel_sec_end);
map.length = end - kernel_sec_end;
map.type = MT_MEMORY_RW;
create_mapping(&map);
break;
}
/* Case 1: kernel and range start at the same address, should be common */
if (kernel_sec_start == start)
start = kernel_sec_end;
/* Case 3: kernel and range end at the same address, should be rare */
if (kernel_sec_end == end)
end = kernel_sec_start;
} else if ((kernel_sec_start < start) && (kernel_sec_end > start) && (kernel_sec_end < end)) {
/* Case 2: kernel ends inside range, starts below it */
start = kernel_sec_end;
} else if ((kernel_sec_start > start) && (kernel_sec_start < end) && (kernel_sec_end > end)) {
/* Case 4: kernel starts inside range, ends above it */
end = kernel_sec_start;
}
map.pfn = __phys_to_pfn(start);
map.virtual = __phys_to_virt(start);
map.length = end - start;
map.type = MT_MEMORY_RW;
create_mapping(&map);
}
}
static void __init map_kernel(void)
{
/*
* We use the well known kernel section start and end and split the area in the
* middle like this:
* . .
* | RW memory |
* +----------------+ kernel_x_start
* | Executable |
* | kernel memory |
* +----------------+ kernel_x_end / kernel_nx_start
* | Non-executable |
* | kernel memory |
* +----------------+ kernel_nx_end
* | RW memory |
* . .
*
* Notice that we are dealing with section sized mappings here so all of this
* will be bumped to the closest section boundary. This means that some of the
* non-executable part of the kernel memory is actually mapped as executable.
* This will only persist until we turn on proper memory management later on
* and we remap the whole kernel with page granularity.
*/
phys_addr_t kernel_x_start = kernel_sec_start;
phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
phys_addr_t kernel_nx_start = kernel_x_end;
phys_addr_t kernel_nx_end = kernel_sec_end;
struct map_desc map;
map.pfn = __phys_to_pfn(kernel_x_start);
map.virtual = __phys_to_virt(kernel_x_start);
map.length = kernel_x_end - kernel_x_start;
map.type = MT_MEMORY_RWX;
create_mapping(&map);
/* If the nx part is small it may end up covered by the tail of the RWX section */
if (kernel_x_end == kernel_nx_end)
return;
map.pfn = __phys_to_pfn(kernel_nx_start);
map.virtual = __phys_to_virt(kernel_nx_start);
map.length = kernel_nx_end - kernel_nx_start;
map.type = MT_MEMORY_RW;
create_mapping(&map);
}
#ifdef CONFIG_ARM_PV_FIXUP
typedef void pgtables_remap(long long offset, unsigned long pgd);
pgtables_remap lpae_pgtables_remap_asm;
/*
* early_paging_init() recreates boot time page table setup, allowing machines
* to switch over to a high (>4G) address space on LPAE systems
*/
static void __init early_paging_init(const struct machine_desc *mdesc)
{
pgtables_remap *lpae_pgtables_remap;
unsigned long pa_pgd;
unsigned int cr, ttbcr;
long long offset;
if (!mdesc->pv_fixup)
return;
offset = mdesc->pv_fixup();
if (offset == 0)
return;
/*
* Offset the kernel section physical offsets so that the kernel
* mapping will work out later on.
*/
kernel_sec_start += offset;
kernel_sec_end += offset;
/*
* Get the address of the remap function in the 1:1 identity
* mapping setup by the early page table assembly code. We
* must get this prior to the pv update. The following barrier
* ensures that this is complete before we fixup any P:V offsets.
*/
lpae_pgtables_remap = (pgtables_remap *)(unsigned long)__pa(lpae_pgtables_remap_asm);
pa_pgd = __pa(swapper_pg_dir);
barrier();
pr_info("Switching physical address space to 0x%08llx\n",
(u64)PHYS_OFFSET + offset);
/* Re-set the phys pfn offset, and the pv offset */
__pv_offset += offset;
__pv_phys_pfn_offset += PFN_DOWN(offset);
/* Run the patch stub to update the constants */
fixup_pv_table(&__pv_table_begin,
(&__pv_table_end - &__pv_table_begin) << 2);
/*
* We changing not only the virtual to physical mapping, but also
* the physical addresses used to access memory. We need to flush
* all levels of cache in the system with caching disabled to
* ensure that all data is written back, and nothing is prefetched
* into the caches. We also need to prevent the TLB walkers
* allocating into the caches too. Note that this is ARMv7 LPAE
* specific.
*/
cr = get_cr();
set_cr(cr & ~(CR_I | CR_C));
asm("mrc p15, 0, %0, c2, c0, 2" : "=r" (ttbcr));
asm volatile("mcr p15, 0, %0, c2, c0, 2"
: : "r" (ttbcr & ~(3 << 8 | 3 << 10)));
flush_cache_all();
/*
* Fixup the page tables - this must be in the idmap region as
* we need to disable the MMU to do this safely, and hence it
* needs to be assembly. It's fairly simple, as we're using the
* temporary tables setup by the initial assembly code.
*/
lpae_pgtables_remap(offset, pa_pgd);
/* Re-enable the caches and cacheable TLB walks */
asm volatile("mcr p15, 0, %0, c2, c0, 2" : : "r" (ttbcr));
set_cr(cr);
}
#else
static void __init early_paging_init(const struct machine_desc *mdesc)
{
long long offset;
if (!mdesc->pv_fixup)
return;
offset = mdesc->pv_fixup();
if (offset == 0)
return;
pr_crit("Physical address space modification is only to support Keystone2.\n");
pr_crit("Please enable ARM_LPAE and ARM_PATCH_PHYS_VIRT support to use this\n");
pr_crit("feature. Your kernel may crash now, have a good day.\n");
add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
}
#endif
static void __init early_fixmap_shutdown(void)
{
int i;
unsigned long va = fix_to_virt(__end_of_permanent_fixed_addresses - 1);
pte_offset_fixmap = pte_offset_late_fixmap;
pmd_clear(fixmap_pmd(va));
local_flush_tlb_kernel_page(va);
for (i = 0; i < __end_of_permanent_fixed_addresses; i++) {
pte_t *pte;
struct map_desc map;
map.virtual = fix_to_virt(i);
pte = pte_offset_early_fixmap(pmd_off_k(map.virtual), map.virtual);
/* Only i/o device mappings are supported ATM */
if (pte_none(*pte) ||
(pte_val(*pte) & L_PTE_MT_MASK) != L_PTE_MT_DEV_SHARED)
continue;
map.pfn = pte_pfn(*pte);
map.type = MT_DEVICE;
map.length = PAGE_SIZE;
create_mapping(&map);
}
}
/*
* paging_init() sets up the page tables, initialises the zone memory
* maps, and sets up the zero page, bad page and bad page tables.
*/
void __init paging_init(const struct machine_desc *mdesc)
{
void *zero_page;
pr_debug("physical kernel sections: 0x%08llx-0x%08llx\n",
kernel_sec_start, kernel_sec_end);
prepare_page_table();
map_lowmem();
memblock_set_current_limit(arm_lowmem_limit);
pr_debug("lowmem limit is %08llx\n", (long long)arm_lowmem_limit);
/*
* After this point early_alloc(), i.e. the memblock allocator, can
* be used
*/
map_kernel();
dma_contiguous_remap();
early_fixmap_shutdown();
devicemaps_init(mdesc);
kmap_init();
tcm_init();
top_pmd = pmd_off_k(0xffff0000);
/* allocate the zero page. */
zero_page = early_alloc(PAGE_SIZE);
bootmem_init();
empty_zero_page = virt_to_page(zero_page);
__flush_dcache_folio(NULL, page_folio(empty_zero_page));
}
void __init early_mm_init(const struct machine_desc *mdesc)
{
build_mem_type_table();
early_paging_init(mdesc);
}
void set_ptes(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pteval, unsigned int nr)
{
unsigned long ext = 0;
if (addr < TASK_SIZE && pte_valid_user(pteval)) {
if (!pte_special(pteval))
__sync_icache_dcache(pteval);
ext |= PTE_EXT_NG;
}
for (;;) {
set_pte_ext(ptep, pteval, ext);
if (--nr == 0)
break;
ptep++;
pte_val(pteval) += PAGE_SIZE;
}
}
| linux-master | arch/arm/mm/mmu.c |
// SPDX-License-Identifier: GPL-2.0
/*
* linux/arch/arm/mm/iomap.c
*
* Map IO port and PCI memory spaces so that {read,write}[bwl] can
* be used to access this memory.
*/
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/ioport.h>
#include <linux/io.h>
#include <asm/vga.h>
unsigned long vga_base;
EXPORT_SYMBOL(vga_base);
#ifdef __io
void __iomem *ioport_map(unsigned long port, unsigned int nr)
{
return __io(port);
}
EXPORT_SYMBOL(ioport_map);
void ioport_unmap(void __iomem *addr)
{
}
EXPORT_SYMBOL(ioport_unmap);
#endif
#ifdef CONFIG_PCI
unsigned long pcibios_min_io = 0x1000;
EXPORT_SYMBOL(pcibios_min_io);
unsigned long pcibios_min_mem = 0x01000000;
EXPORT_SYMBOL(pcibios_min_mem);
void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
{
if ((unsigned long)addr >= VMALLOC_START &&
(unsigned long)addr < VMALLOC_END)
iounmap(addr);
}
EXPORT_SYMBOL(pci_iounmap);
#endif
| linux-master | arch/arm/mm/iomap.c |
// SPDX-License-Identifier: GPL-2.0
static struct fsr_info fsr_info[] = {
/*
* The following are the standard ARMv3 and ARMv4 aborts. ARMv5
* defines these to be "precise" aborts.
*/
{ do_bad, SIGSEGV, 0, "vector exception" },
{ do_bad, SIGBUS, BUS_ADRALN, "alignment exception" },
{ do_bad, SIGKILL, 0, "terminal exception" },
{ do_bad, SIGBUS, BUS_ADRALN, "alignment exception" },
{ do_bad, SIGBUS, 0, "external abort on linefetch" },
{ do_translation_fault, SIGSEGV, SEGV_MAPERR, "section translation fault" },
{ do_bad, SIGBUS, 0, "external abort on linefetch" },
{ do_page_fault, SIGSEGV, SEGV_MAPERR, "page translation fault" },
{ do_bad, SIGBUS, 0, "external abort on non-linefetch" },
{ do_bad, SIGSEGV, SEGV_ACCERR, "section domain fault" },
{ do_bad, SIGBUS, 0, "external abort on non-linefetch" },
{ do_bad, SIGSEGV, SEGV_ACCERR, "page domain fault" },
{ do_bad, SIGBUS, 0, "external abort on translation" },
{ do_sect_fault, SIGSEGV, SEGV_ACCERR, "section permission fault" },
{ do_bad, SIGBUS, 0, "external abort on translation" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "page permission fault" },
/*
* The following are "imprecise" aborts, which are signalled by bit
* 10 of the FSR, and may not be recoverable. These are only
* supported if the CPU abort handler supports bit 10.
*/
{ do_bad, SIGBUS, 0, "unknown 16" },
{ do_bad, SIGBUS, 0, "unknown 17" },
{ do_bad, SIGBUS, 0, "unknown 18" },
{ do_bad, SIGBUS, 0, "unknown 19" },
{ do_bad, SIGBUS, 0, "lock abort" }, /* xscale */
{ do_bad, SIGBUS, 0, "unknown 21" },
{ do_bad, SIGBUS, BUS_OBJERR, "imprecise external abort" }, /* xscale */
{ do_bad, SIGBUS, 0, "unknown 23" },
{ do_bad, SIGBUS, 0, "dcache parity error" }, /* xscale */
{ do_bad, SIGBUS, 0, "unknown 25" },
{ do_bad, SIGBUS, 0, "unknown 26" },
{ do_bad, SIGBUS, 0, "unknown 27" },
{ do_bad, SIGBUS, 0, "unknown 28" },
{ do_bad, SIGBUS, 0, "unknown 29" },
{ do_bad, SIGBUS, 0, "unknown 30" },
{ do_bad, SIGBUS, 0, "unknown 31" },
};
static struct fsr_info ifsr_info[] = {
{ do_bad, SIGBUS, 0, "unknown 0" },
{ do_bad, SIGBUS, 0, "unknown 1" },
{ do_bad, SIGBUS, 0, "debug event" },
{ do_bad, SIGSEGV, SEGV_ACCERR, "section access flag fault" },
{ do_bad, SIGBUS, 0, "unknown 4" },
{ do_translation_fault, SIGSEGV, SEGV_MAPERR, "section translation fault" },
{ do_bad, SIGSEGV, SEGV_ACCERR, "page access flag fault" },
{ do_page_fault, SIGSEGV, SEGV_MAPERR, "page translation fault" },
{ do_bad, SIGBUS, 0, "external abort on non-linefetch" },
{ do_bad, SIGSEGV, SEGV_ACCERR, "section domain fault" },
{ do_bad, SIGBUS, 0, "unknown 10" },
{ do_bad, SIGSEGV, SEGV_ACCERR, "page domain fault" },
{ do_bad, SIGBUS, 0, "external abort on translation" },
{ do_sect_fault, SIGSEGV, SEGV_ACCERR, "section permission fault" },
{ do_bad, SIGBUS, 0, "external abort on translation" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "page permission fault" },
{ do_bad, SIGBUS, 0, "unknown 16" },
{ do_bad, SIGBUS, 0, "unknown 17" },
{ do_bad, SIGBUS, 0, "unknown 18" },
{ do_bad, SIGBUS, 0, "unknown 19" },
{ do_bad, SIGBUS, 0, "unknown 20" },
{ do_bad, SIGBUS, 0, "unknown 21" },
{ do_bad, SIGBUS, 0, "unknown 22" },
{ do_bad, SIGBUS, 0, "unknown 23" },
{ do_bad, SIGBUS, 0, "unknown 24" },
{ do_bad, SIGBUS, 0, "unknown 25" },
{ do_bad, SIGBUS, 0, "unknown 26" },
{ do_bad, SIGBUS, 0, "unknown 27" },
{ do_bad, SIGBUS, 0, "unknown 28" },
{ do_bad, SIGBUS, 0, "unknown 29" },
{ do_bad, SIGBUS, 0, "unknown 30" },
{ do_bad, SIGBUS, 0, "unknown 31" },
};
| linux-master | arch/arm/mm/fsr-2level.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Setup code for SAMA5
*
* Copyright (C) 2013 Atmel,
* 2013 Ludovic Desroches <[email protected]>
*/
#include <linux/of.h>
#include <linux/of_platform.h>
#include <asm/hardware/cache-l2x0.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/outercache.h>
#include <asm/system_misc.h>
#include "generic.h"
#include "sam_secure.h"
static void sama5_l2c310_write_sec(unsigned long val, unsigned reg)
{
/* OP-TEE configures the L2 cache and does not allow modifying it yet */
}
static void __init sama5_secure_cache_init(void)
{
sam_secure_init();
if (IS_ENABLED(CONFIG_OUTER_CACHE) && sam_linux_is_optee_available())
outer_cache.write_sec = sama5_l2c310_write_sec;
}
static void __init sama5_dt_device_init(void)
{
of_platform_default_populate(NULL, NULL, NULL);
sama5_pm_init();
}
static const char *const sama5_dt_board_compat[] __initconst = {
"atmel,sama5",
NULL
};
DT_MACHINE_START(sama5_dt, "Atmel SAMA5")
/* Maintainer: Atmel */
.init_machine = sama5_dt_device_init,
.dt_compat = sama5_dt_board_compat,
MACHINE_END
static const char *const sama5_alt_dt_board_compat[] __initconst = {
"atmel,sama5d4",
NULL
};
DT_MACHINE_START(sama5_alt_dt, "Atmel SAMA5")
/* Maintainer: Atmel */
.init_machine = sama5_dt_device_init,
.dt_compat = sama5_alt_dt_board_compat,
.l2c_aux_mask = ~0UL,
MACHINE_END
static void __init sama5d2_init(void)
{
of_platform_default_populate(NULL, NULL, NULL);
sama5d2_pm_init();
}
static const char *const sama5d2_compat[] __initconst = {
"atmel,sama5d2",
NULL
};
DT_MACHINE_START(sama5d2, "Atmel SAMA5")
/* Maintainer: Atmel */
.init_machine = sama5d2_init,
.init_early = sama5_secure_cache_init,
.dt_compat = sama5d2_compat,
.l2c_aux_mask = ~0UL,
MACHINE_END
| linux-master | arch/arm/mach-at91/sama5.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Setup code for AT91SAM9
*
* Copyright (C) 2011 Atmel,
* 2011 Nicolas Ferre <[email protected]>
*/
#include <linux/of.h>
#include <linux/of_platform.h>
#include <asm/mach/arch.h>
#include <asm/system_misc.h>
#include "generic.h"
static void __init at91sam9_init(void)
{
of_platform_default_populate(NULL, NULL, NULL);
at91sam9_pm_init();
}
static const char *const at91_dt_board_compat[] __initconst = {
"atmel,at91sam9",
NULL
};
DT_MACHINE_START(at91sam_dt, "Atmel AT91SAM9")
/* Maintainer: Atmel */
.init_machine = at91sam9_init,
.dt_compat = at91_dt_board_compat,
MACHINE_END
| linux-master | arch/arm/mach-at91/at91sam9.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.