python_code
stringlengths
0
1.8M
repo_name
stringclasses
7 values
file_path
stringlengths
5
99
// SPDX-License-Identifier: GPL-2.0 /* * IXP4xx Device Tree boot support */ #include <asm/mach/arch.h> /* * We handle 4 different SoC families. These compatible strings are enough * to provide the core so that different boards can add their more detailed * specifics. */ static const char *ixp4xx_of_board_compat[] = { "intel,ixp42x", "intel,ixp43x", "intel,ixp45x", "intel,ixp46x", NULL, }; DT_MACHINE_START(IXP4XX_DT, "IXP4xx (Device Tree)") .dt_compat = ixp4xx_of_board_compat, MACHINE_END
linux-master
arch/arm/mach-ixp4xx/ixp4xx-of.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright Altera Corporation (C) 2016. All rights reserved. */ #include <linux/io.h> #include <linux/of.h> #include <linux/of_address.h> #include "core.h" /* A10 System Manager L2 ECC Control register */ #define A10_MPU_CTRL_L2_ECC_OFST 0x0 #define A10_MPU_CTRL_L2_ECC_EN BIT(0) /* A10 System Manager Global IRQ Mask register */ #define A10_SYSMGR_ECC_INTMASK_CLR_OFST 0x98 #define A10_SYSMGR_ECC_INTMASK_CLR_L2 BIT(0) /* A10 System Manager L2 ECC IRQ Clear register */ #define A10_SYSMGR_MPU_CLEAR_L2_ECC_OFST 0xA8 #define A10_SYSMGR_MPU_CLEAR_L2_ECC (BIT(31) | BIT(15)) void socfpga_init_l2_ecc(void) { struct device_node *np; void __iomem *mapped_l2_edac_addr; np = of_find_compatible_node(NULL, NULL, "altr,socfpga-l2-ecc"); if (!np) { pr_err("Unable to find socfpga-l2-ecc in dtb\n"); return; } mapped_l2_edac_addr = of_iomap(np, 0); of_node_put(np); if (!mapped_l2_edac_addr) { pr_err("Unable to find L2 ECC mapping in dtb\n"); return; } /* Enable ECC */ writel(0x01, mapped_l2_edac_addr); iounmap(mapped_l2_edac_addr); } void socfpga_init_arria10_l2_ecc(void) { struct device_node *np; void __iomem *mapped_l2_edac_addr; /* Find the L2 EDAC device tree node */ np = of_find_compatible_node(NULL, NULL, "altr,socfpga-a10-l2-ecc"); if (!np) { pr_err("Unable to find socfpga-a10-l2-ecc in dtb\n"); return; } mapped_l2_edac_addr = of_iomap(np, 0); of_node_put(np); if (!mapped_l2_edac_addr) { pr_err("Unable to find L2 ECC mapping in dtb\n"); return; } if (!sys_manager_base_addr) { pr_err("System Manager not mapped for L2 ECC\n"); goto exit; } /* Clear any pending IRQs */ writel(A10_SYSMGR_MPU_CLEAR_L2_ECC, (sys_manager_base_addr + A10_SYSMGR_MPU_CLEAR_L2_ECC_OFST)); /* Enable ECC */ writel(A10_SYSMGR_ECC_INTMASK_CLR_L2, sys_manager_base_addr + A10_SYSMGR_ECC_INTMASK_CLR_OFST); writel(A10_MPU_CTRL_L2_ECC_EN, mapped_l2_edac_addr + A10_MPU_CTRL_L2_ECC_OFST); exit: iounmap(mapped_l2_edac_addr); }
linux-master
arch/arm/mach-socfpga/l2_cache.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2012-2015 Altera Corporation */ #include <linux/irqchip.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/reboot.h> #include <linux/reset/socfpga.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/cacheflush.h> #include "core.h" void __iomem *sys_manager_base_addr; void __iomem *rst_manager_base_addr; void __iomem *sdr_ctl_base_addr; unsigned long socfpga_cpu1start_addr; static void __init socfpga_sysmgr_init(void) { struct device_node *np; np = of_find_compatible_node(NULL, NULL, "altr,sys-mgr"); if (of_property_read_u32(np, "cpu1-start-addr", (u32 *) &socfpga_cpu1start_addr)) pr_err("SMP: Need cpu1-start-addr in device tree.\n"); /* Ensure that socfpga_cpu1start_addr is visible to other CPUs */ smp_wmb(); sync_cache_w(&socfpga_cpu1start_addr); sys_manager_base_addr = of_iomap(np, 0); np = of_find_compatible_node(NULL, NULL, "altr,rst-mgr"); rst_manager_base_addr = of_iomap(np, 0); np = of_find_compatible_node(NULL, NULL, "altr,sdr-ctl"); sdr_ctl_base_addr = of_iomap(np, 0); } static void __init socfpga_init_irq(void) { irqchip_init(); socfpga_sysmgr_init(); if (IS_ENABLED(CONFIG_EDAC_ALTERA_L2C)) socfpga_init_l2_ecc(); if (IS_ENABLED(CONFIG_EDAC_ALTERA_OCRAM)) socfpga_init_ocram_ecc(); socfpga_reset_init(); } static void __init socfpga_arria10_init_irq(void) { irqchip_init(); socfpga_sysmgr_init(); if (IS_ENABLED(CONFIG_EDAC_ALTERA_L2C)) socfpga_init_arria10_l2_ecc(); if (IS_ENABLED(CONFIG_EDAC_ALTERA_OCRAM)) socfpga_init_arria10_ocram_ecc(); socfpga_reset_init(); } static void socfpga_cyclone5_restart(enum reboot_mode mode, const char *cmd) { u32 temp; temp = readl(rst_manager_base_addr + SOCFPGA_RSTMGR_CTRL); if (mode == REBOOT_WARM) temp |= RSTMGR_CTRL_SWWARMRSTREQ; else temp |= RSTMGR_CTRL_SWCOLDRSTREQ; writel(temp, rst_manager_base_addr + SOCFPGA_RSTMGR_CTRL); } static void socfpga_arria10_restart(enum reboot_mode mode, const char *cmd) { u32 temp; temp = readl(rst_manager_base_addr + SOCFPGA_A10_RSTMGR_CTRL); if (mode == REBOOT_WARM) temp |= RSTMGR_CTRL_SWWARMRSTREQ; else temp |= RSTMGR_CTRL_SWCOLDRSTREQ; writel(temp, rst_manager_base_addr + SOCFPGA_A10_RSTMGR_CTRL); } static const char *altera_dt_match[] = { "altr,socfpga", NULL }; DT_MACHINE_START(SOCFPGA, "Altera SOCFPGA") .l2c_aux_val = 0, .l2c_aux_mask = ~0, .init_irq = socfpga_init_irq, .restart = socfpga_cyclone5_restart, .dt_compat = altera_dt_match, MACHINE_END static const char *altera_a10_dt_match[] = { "altr,socfpga-arria10", NULL }; DT_MACHINE_START(SOCFPGA_A10, "Altera SOCFPGA Arria10") .l2c_aux_val = 0, .l2c_aux_mask = ~0, .init_irq = socfpga_arria10_init_irq, .restart = socfpga_arria10_restart, .dt_compat = altera_a10_dt_match, MACHINE_END
linux-master
arch/arm/mach-socfpga/socfpga.c
// SPDX-License-Identifier: GPL-2.0-only /* * arch/arm/mach-socfpga/pm.c * * Copyright (C) 2014-2015 Altera Corporation. All rights reserved. * * with code from pm-imx6.c * Copyright 2011-2014 Freescale Semiconductor, Inc. * Copyright 2011 Linaro Ltd. */ #include <linux/bitops.h> #include <linux/genalloc.h> #include <linux/init.h> #include <linux/io.h> #include <linux/of.h> #include <linux/of_platform.h> #include <linux/platform_device.h> #include <linux/suspend.h> #include <asm/suspend.h> #include <asm/fncpy.h> #include "core.h" /* Pointer to function copied to ocram */ static u32 (*socfpga_sdram_self_refresh_in_ocram)(u32 sdr_base); static int socfpga_setup_ocram_self_refresh(void) { struct platform_device *pdev; phys_addr_t ocram_pbase; struct device_node *np; struct gen_pool *ocram_pool; unsigned long ocram_base; void __iomem *suspend_ocram_base; int ret = 0; np = of_find_compatible_node(NULL, NULL, "mmio-sram"); if (!np) { pr_err("%s: Unable to find mmio-sram in dtb\n", __func__); return -ENODEV; } pdev = of_find_device_by_node(np); if (!pdev) { pr_warn("%s: failed to find ocram device!\n", __func__); ret = -ENODEV; goto put_node; } ocram_pool = gen_pool_get(&pdev->dev, NULL); if (!ocram_pool) { pr_warn("%s: ocram pool unavailable!\n", __func__); ret = -ENODEV; goto put_device; } ocram_base = gen_pool_alloc(ocram_pool, socfpga_sdram_self_refresh_sz); if (!ocram_base) { pr_warn("%s: unable to alloc ocram!\n", __func__); ret = -ENOMEM; goto put_device; } ocram_pbase = gen_pool_virt_to_phys(ocram_pool, ocram_base); suspend_ocram_base = __arm_ioremap_exec(ocram_pbase, socfpga_sdram_self_refresh_sz, false); if (!suspend_ocram_base) { pr_warn("%s: __arm_ioremap_exec failed!\n", __func__); ret = -ENOMEM; goto put_device; } /* Copy the code that puts DDR in self refresh to ocram */ socfpga_sdram_self_refresh_in_ocram = (void *)fncpy(suspend_ocram_base, &socfpga_sdram_self_refresh, socfpga_sdram_self_refresh_sz); WARN(!socfpga_sdram_self_refresh_in_ocram, "could not copy function to ocram"); if (!socfpga_sdram_self_refresh_in_ocram) ret = -EFAULT; put_device: put_device(&pdev->dev); put_node: of_node_put(np); return ret; } static int socfpga_pm_suspend(unsigned long arg) { u32 ret; if (!sdr_ctl_base_addr) return -EFAULT; ret = socfpga_sdram_self_refresh_in_ocram((u32)sdr_ctl_base_addr); pr_debug("%s self-refresh loops request=%d exit=%d\n", __func__, ret & 0xffff, (ret >> 16) & 0xffff); return 0; } static int socfpga_pm_enter(suspend_state_t state) { switch (state) { case PM_SUSPEND_MEM: outer_disable(); cpu_suspend(0, socfpga_pm_suspend); outer_resume(); break; default: return -EINVAL; } return 0; } static const struct platform_suspend_ops socfpga_pm_ops = { .valid = suspend_valid_only_mem, .enter = socfpga_pm_enter, }; static int __init socfpga_pm_init(void) { int ret; ret = socfpga_setup_ocram_self_refresh(); if (ret) return ret; suspend_set_ops(&socfpga_pm_ops); pr_info("SoCFPGA initialized for DDR self-refresh during suspend.\n"); return 0; } arch_initcall(socfpga_pm_init);
linux-master
arch/arm/mach-socfpga/pm.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2010-2011 Calxeda, Inc. * Copyright 2012 Pavel Machek <[email protected]> * Based on platsmp.c, Copyright (C) 2002 ARM Ltd. * Copyright (C) 2012 Altera Corporation */ #include <linux/delay.h> #include <linux/init.h> #include <linux/smp.h> #include <linux/io.h> #include <linux/of.h> #include <linux/of_address.h> #include <asm/cacheflush.h> #include <asm/smp_scu.h> #include <asm/smp_plat.h> #include "core.h" static int socfpga_boot_secondary(unsigned int cpu, struct task_struct *idle) { int trampoline_size = secondary_trampoline_end - secondary_trampoline; if (socfpga_cpu1start_addr) { /* This will put CPU #1 into reset. */ writel(RSTMGR_MPUMODRST_CPU1, rst_manager_base_addr + SOCFPGA_RSTMGR_MODMPURST); memcpy(phys_to_virt(0), secondary_trampoline, trampoline_size); writel(__pa_symbol(secondary_startup), sys_manager_base_addr + (socfpga_cpu1start_addr & 0x000000ff)); flush_cache_all(); smp_wmb(); outer_clean_range(0, trampoline_size); /* This will release CPU #1 out of reset. */ writel(0, rst_manager_base_addr + SOCFPGA_RSTMGR_MODMPURST); } return 0; } static int socfpga_a10_boot_secondary(unsigned int cpu, struct task_struct *idle) { int trampoline_size = secondary_trampoline_end - secondary_trampoline; if (socfpga_cpu1start_addr) { writel(RSTMGR_MPUMODRST_CPU1, rst_manager_base_addr + SOCFPGA_A10_RSTMGR_MODMPURST); memcpy(phys_to_virt(0), secondary_trampoline, trampoline_size); writel(__pa_symbol(secondary_startup), sys_manager_base_addr + (socfpga_cpu1start_addr & 0x00000fff)); flush_cache_all(); smp_wmb(); outer_clean_range(0, trampoline_size); /* This will release CPU #1 out of reset. */ writel(0, rst_manager_base_addr + SOCFPGA_A10_RSTMGR_MODMPURST); } return 0; } static void __init socfpga_smp_prepare_cpus(unsigned int max_cpus) { struct device_node *np; void __iomem *socfpga_scu_base_addr; np = of_find_compatible_node(NULL, NULL, "arm,cortex-a9-scu"); if (!np) { pr_err("%s: missing scu\n", __func__); return; } socfpga_scu_base_addr = of_iomap(np, 0); if (!socfpga_scu_base_addr) return; scu_enable(socfpga_scu_base_addr); } #ifdef CONFIG_HOTPLUG_CPU /* * platform-specific code to shutdown a CPU * * Called with IRQs disabled */ static void socfpga_cpu_die(unsigned int cpu) { /* Do WFI. If we wake up early, go back into WFI */ while (1) cpu_do_idle(); } /* * We need a dummy function so that platform_can_cpu_hotplug() knows * we support CPU hotplug. However, the function does not need to do * anything, because CPUs going offline just do WFI. We could reset * the CPUs but it would increase power consumption. */ static int socfpga_cpu_kill(unsigned int cpu) { return 1; } #endif static const struct smp_operations socfpga_smp_ops __initconst = { .smp_prepare_cpus = socfpga_smp_prepare_cpus, .smp_boot_secondary = socfpga_boot_secondary, #ifdef CONFIG_HOTPLUG_CPU .cpu_die = socfpga_cpu_die, .cpu_kill = socfpga_cpu_kill, #endif }; static const struct smp_operations socfpga_a10_smp_ops __initconst = { .smp_prepare_cpus = socfpga_smp_prepare_cpus, .smp_boot_secondary = socfpga_a10_boot_secondary, #ifdef CONFIG_HOTPLUG_CPU .cpu_die = socfpga_cpu_die, .cpu_kill = socfpga_cpu_kill, #endif }; CPU_METHOD_OF_DECLARE(socfpga_smp, "altr,socfpga-smp", &socfpga_smp_ops); CPU_METHOD_OF_DECLARE(socfpga_a10_smp, "altr,socfpga-a10-smp", &socfpga_a10_smp_ops);
linux-master
arch/arm/mach-socfpga/platsmp.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright Altera Corporation (C) 2016. All rights reserved. */ #include <linux/delay.h> #include <linux/io.h> #include <linux/of.h> #include <linux/of_address.h> #include "core.h" #define ALTR_OCRAM_CLEAR_ECC 0x00000018 #define ALTR_OCRAM_ECC_EN 0x00000019 void socfpga_init_ocram_ecc(void) { struct device_node *np; void __iomem *mapped_ocr_edac_addr; /* Find the OCRAM EDAC device tree node */ np = of_find_compatible_node(NULL, NULL, "altr,socfpga-ocram-ecc"); if (!np) { pr_err("Unable to find socfpga-ocram-ecc\n"); return; } mapped_ocr_edac_addr = of_iomap(np, 0); of_node_put(np); if (!mapped_ocr_edac_addr) { pr_err("Unable to map OCRAM ecc regs.\n"); return; } /* Clear any pending OCRAM ECC interrupts, then enable ECC */ writel(ALTR_OCRAM_CLEAR_ECC, mapped_ocr_edac_addr); writel(ALTR_OCRAM_ECC_EN, mapped_ocr_edac_addr); iounmap(mapped_ocr_edac_addr); } /* Arria10 OCRAM Section */ #define ALTR_A10_ECC_CTRL_OFST 0x08 #define ALTR_A10_OCRAM_ECC_EN_CTL (BIT(1) | BIT(0)) #define ALTR_A10_ECC_INITA BIT(16) #define ALTR_A10_ECC_INITSTAT_OFST 0x0C #define ALTR_A10_ECC_INITCOMPLETEA BIT(0) #define ALTR_A10_ECC_INITCOMPLETEB BIT(8) #define ALTR_A10_ECC_ERRINTEN_OFST 0x10 #define ALTR_A10_ECC_SERRINTEN BIT(0) #define ALTR_A10_ECC_INTSTAT_OFST 0x20 #define ALTR_A10_ECC_SERRPENA BIT(0) #define ALTR_A10_ECC_DERRPENA BIT(8) #define ALTR_A10_ECC_ERRPENA_MASK (ALTR_A10_ECC_SERRPENA | \ ALTR_A10_ECC_DERRPENA) /* ECC Manager Defines */ #define A10_SYSMGR_ECC_INTMASK_SET_OFST 0x94 #define A10_SYSMGR_ECC_INTMASK_CLR_OFST 0x98 #define A10_SYSMGR_ECC_INTMASK_OCRAM BIT(1) #define ALTR_A10_ECC_INIT_WATCHDOG_10US 10000 static inline void ecc_set_bits(u32 bit_mask, void __iomem *ioaddr) { u32 value = readl(ioaddr); value |= bit_mask; writel(value, ioaddr); } static inline void ecc_clear_bits(u32 bit_mask, void __iomem *ioaddr) { u32 value = readl(ioaddr); value &= ~bit_mask; writel(value, ioaddr); } static inline int ecc_test_bits(u32 bit_mask, void __iomem *ioaddr) { u32 value = readl(ioaddr); return (value & bit_mask) ? 1 : 0; } /* * This function uses the memory initialization block in the Arria10 ECC * controller to initialize/clear the entire memory data and ECC data. */ static int altr_init_memory_port(void __iomem *ioaddr) { int limit = ALTR_A10_ECC_INIT_WATCHDOG_10US; ecc_set_bits(ALTR_A10_ECC_INITA, (ioaddr + ALTR_A10_ECC_CTRL_OFST)); while (limit--) { if (ecc_test_bits(ALTR_A10_ECC_INITCOMPLETEA, (ioaddr + ALTR_A10_ECC_INITSTAT_OFST))) break; udelay(1); } if (limit < 0) return -EBUSY; /* Clear any pending ECC interrupts */ writel(ALTR_A10_ECC_ERRPENA_MASK, (ioaddr + ALTR_A10_ECC_INTSTAT_OFST)); return 0; } void socfpga_init_arria10_ocram_ecc(void) { struct device_node *np; int ret = 0; void __iomem *ecc_block_base; if (!sys_manager_base_addr) { pr_err("SOCFPGA: sys-mgr is not initialized\n"); return; } /* Find the OCRAM EDAC device tree node */ np = of_find_compatible_node(NULL, NULL, "altr,socfpga-a10-ocram-ecc"); if (!np) { pr_err("Unable to find socfpga-a10-ocram-ecc\n"); return; } /* Map the ECC Block */ ecc_block_base = of_iomap(np, 0); of_node_put(np); if (!ecc_block_base) { pr_err("Unable to map OCRAM ECC block\n"); return; } /* Disable ECC */ writel(ALTR_A10_OCRAM_ECC_EN_CTL, sys_manager_base_addr + A10_SYSMGR_ECC_INTMASK_SET_OFST); ecc_clear_bits(ALTR_A10_ECC_SERRINTEN, (ecc_block_base + ALTR_A10_ECC_ERRINTEN_OFST)); ecc_clear_bits(ALTR_A10_OCRAM_ECC_EN_CTL, (ecc_block_base + ALTR_A10_ECC_CTRL_OFST)); /* Ensure all writes complete */ wmb(); /* Use HW initialization block to initialize memory for ECC */ ret = altr_init_memory_port(ecc_block_base); if (ret) { pr_err("ECC: cannot init OCRAM PORTA memory\n"); goto exit; } /* Enable ECC */ ecc_set_bits(ALTR_A10_OCRAM_ECC_EN_CTL, (ecc_block_base + ALTR_A10_ECC_CTRL_OFST)); ecc_set_bits(ALTR_A10_ECC_SERRINTEN, (ecc_block_base + ALTR_A10_ECC_ERRINTEN_OFST)); writel(ALTR_A10_OCRAM_ECC_EN_CTL, sys_manager_base_addr + A10_SYSMGR_ECC_INTMASK_CLR_OFST); /* Ensure all writes complete */ wmb(); exit: iounmap(ecc_block_base); }
linux-master
arch/arm/mach-socfpga/ocram.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2014 Marvell * * Thomas Petazzoni <[email protected]> */ #define pr_fmt(fmt) "mvebu-cpureset: " fmt #include <linux/kernel.h> #include <linux/init.h> #include <linux/of_address.h> #include <linux/io.h> #include <linux/resource.h> #include "common.h" static void __iomem *cpu_reset_base; static size_t cpu_reset_size; #define CPU_RESET_OFFSET(cpu) (cpu * 0x8) #define CPU_RESET_ASSERT BIT(0) int mvebu_cpu_reset_deassert(int cpu) { u32 reg; if (!cpu_reset_base) return -ENODEV; if (CPU_RESET_OFFSET(cpu) >= cpu_reset_size) return -EINVAL; reg = readl(cpu_reset_base + CPU_RESET_OFFSET(cpu)); reg &= ~CPU_RESET_ASSERT; writel(reg, cpu_reset_base + CPU_RESET_OFFSET(cpu)); return 0; } static int mvebu_cpu_reset_map(struct device_node *np, int res_idx) { struct resource res; if (of_address_to_resource(np, res_idx, &res)) { pr_err("unable to get resource\n"); return -ENOENT; } if (!request_mem_region(res.start, resource_size(&res), np->full_name)) { pr_err("unable to request region\n"); return -EBUSY; } cpu_reset_base = ioremap(res.start, resource_size(&res)); if (!cpu_reset_base) { pr_err("unable to map registers\n"); release_mem_region(res.start, resource_size(&res)); return -ENOMEM; } cpu_reset_size = resource_size(&res); return 0; } static int __init mvebu_cpu_reset_init(void) { struct device_node *np; int res_idx; int ret; np = of_find_compatible_node(NULL, NULL, "marvell,armada-370-cpu-reset"); if (np) { res_idx = 0; } else { /* * This code is kept for backward compatibility with * old Device Trees. */ np = of_find_compatible_node(NULL, NULL, "marvell,armada-370-xp-pmsu"); if (np) { pr_warn(FW_WARN "deprecated pmsu binding\n"); res_idx = 1; } } /* No reset node found */ if (!np) return -ENODEV; ret = mvebu_cpu_reset_map(np, res_idx); of_node_put(np); return ret; } early_initcall(mvebu_cpu_reset_init);
linux-master
arch/arm/mach-mvebu/cpu-reset.c
// SPDX-License-Identifier: GPL-2.0-only /* * Power Management Service Unit(PMSU) support for Armada 370/XP platforms. * * Copyright (C) 2012 Marvell * * Yehuda Yitschak <[email protected]> * Gregory Clement <[email protected]> * Thomas Petazzoni <[email protected]> * * The Armada 370 and Armada XP SOCs have a power management service * unit which is responsible for powering down and waking up CPUs and * other SOC units */ #define pr_fmt(fmt) "mvebu-pmsu: " fmt #include <linux/clk.h> #include <linux/cpu_pm.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/mbus.h> #include <linux/mvebu-pmsu.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/platform_device.h> #include <linux/resource.h> #include <linux/slab.h> #include <linux/smp.h> #include <asm/cacheflush.h> #include <asm/cp15.h> #include <asm/smp_scu.h> #include <asm/smp_plat.h> #include <asm/suspend.h> #include <asm/tlbflush.h> #include "common.h" #include "pmsu.h" #define PMSU_BASE_OFFSET 0x100 #define PMSU_REG_SIZE 0x1000 /* PMSU MP registers */ #define PMSU_CONTROL_AND_CONFIG(cpu) ((cpu * 0x100) + 0x104) #define PMSU_CONTROL_AND_CONFIG_DFS_REQ BIT(18) #define PMSU_CONTROL_AND_CONFIG_PWDDN_REQ BIT(16) #define PMSU_CONTROL_AND_CONFIG_L2_PWDDN BIT(20) #define PMSU_CPU_POWER_DOWN_CONTROL(cpu) ((cpu * 0x100) + 0x108) #define PMSU_CPU_POWER_DOWN_DIS_SNP_Q_SKIP BIT(0) #define PMSU_STATUS_AND_MASK(cpu) ((cpu * 0x100) + 0x10c) #define PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT BIT(16) #define PMSU_STATUS_AND_MASK_SNP_Q_EMPTY_WAIT BIT(17) #define PMSU_STATUS_AND_MASK_IRQ_WAKEUP BIT(20) #define PMSU_STATUS_AND_MASK_FIQ_WAKEUP BIT(21) #define PMSU_STATUS_AND_MASK_DBG_WAKEUP BIT(22) #define PMSU_STATUS_AND_MASK_IRQ_MASK BIT(24) #define PMSU_STATUS_AND_MASK_FIQ_MASK BIT(25) #define PMSU_EVENT_STATUS_AND_MASK(cpu) ((cpu * 0x100) + 0x120) #define PMSU_EVENT_STATUS_AND_MASK_DFS_DONE BIT(1) #define PMSU_EVENT_STATUS_AND_MASK_DFS_DONE_MASK BIT(17) #define PMSU_BOOT_ADDR_REDIRECT_OFFSET(cpu) ((cpu * 0x100) + 0x124) /* PMSU fabric registers */ #define L2C_NFABRIC_PM_CTL 0x4 #define L2C_NFABRIC_PM_CTL_PWR_DOWN BIT(20) /* PMSU delay registers */ #define PMSU_POWERDOWN_DELAY 0xF04 #define PMSU_POWERDOWN_DELAY_PMU BIT(1) #define PMSU_POWERDOWN_DELAY_MASK 0xFFFE #define PMSU_DFLT_ARMADA38X_DELAY 0x64 /* CA9 MPcore SoC Control registers */ #define MPCORE_RESET_CTL 0x64 #define MPCORE_RESET_CTL_L2 BIT(0) #define MPCORE_RESET_CTL_DEBUG BIT(16) #define SRAM_PHYS_BASE 0xFFFF0000 #define BOOTROM_BASE 0xFFF00000 #define BOOTROM_SIZE 0x100000 #define ARMADA_370_CRYPT0_ENG_TARGET 0x9 #define ARMADA_370_CRYPT0_ENG_ATTR 0x1 extern void ll_disable_coherency(void); extern void ll_enable_coherency(void); extern void armada_370_xp_cpu_resume(void); extern void armada_38x_cpu_resume(void); static phys_addr_t pmsu_mp_phys_base; static void __iomem *pmsu_mp_base; static void *mvebu_cpu_resume; static const struct of_device_id of_pmsu_table[] = { { .compatible = "marvell,armada-370-pmsu", }, { .compatible = "marvell,armada-370-xp-pmsu", }, { .compatible = "marvell,armada-380-pmsu", }, { /* end of list */ }, }; void mvebu_pmsu_set_cpu_boot_addr(int hw_cpu, void *boot_addr) { writel(__pa_symbol(boot_addr), pmsu_mp_base + PMSU_BOOT_ADDR_REDIRECT_OFFSET(hw_cpu)); } extern unsigned char mvebu_boot_wa_start[]; extern unsigned char mvebu_boot_wa_end[]; /* * This function sets up the boot address workaround needed for SMP * boot on Armada 375 Z1 and cpuidle on Armada 370. It unmaps the * BootROM Mbus window, and instead remaps a crypto SRAM into which a * custom piece of code is copied to replace the problematic BootROM. */ int mvebu_setup_boot_addr_wa(unsigned int crypto_eng_target, unsigned int crypto_eng_attribute, phys_addr_t resume_addr_reg) { void __iomem *sram_virt_base; u32 code_len = mvebu_boot_wa_end - mvebu_boot_wa_start; mvebu_mbus_del_window(BOOTROM_BASE, BOOTROM_SIZE); mvebu_mbus_add_window_by_id(crypto_eng_target, crypto_eng_attribute, SRAM_PHYS_BASE, SZ_64K); sram_virt_base = ioremap(SRAM_PHYS_BASE, SZ_64K); if (!sram_virt_base) { pr_err("Unable to map SRAM to setup the boot address WA\n"); return -ENOMEM; } memcpy(sram_virt_base, &mvebu_boot_wa_start, code_len); /* * The last word of the code copied in SRAM must contain the * physical base address of the PMSU register. We * intentionally store this address in the native endianness * of the system. */ __raw_writel((unsigned long)resume_addr_reg, sram_virt_base + code_len - 4); iounmap(sram_virt_base); return 0; } static int __init mvebu_v7_pmsu_init(void) { struct device_node *np; struct resource res; int ret = 0; np = of_find_matching_node(NULL, of_pmsu_table); if (!np) return 0; pr_info("Initializing Power Management Service Unit\n"); if (of_address_to_resource(np, 0, &res)) { pr_err("unable to get resource\n"); ret = -ENOENT; goto out; } if (of_device_is_compatible(np, "marvell,armada-370-xp-pmsu")) { pr_warn(FW_WARN "deprecated pmsu binding\n"); res.start = res.start - PMSU_BASE_OFFSET; res.end = res.start + PMSU_REG_SIZE - 1; } if (!request_mem_region(res.start, resource_size(&res), np->full_name)) { pr_err("unable to request region\n"); ret = -EBUSY; goto out; } pmsu_mp_phys_base = res.start; pmsu_mp_base = ioremap(res.start, resource_size(&res)); if (!pmsu_mp_base) { pr_err("unable to map registers\n"); release_mem_region(res.start, resource_size(&res)); ret = -ENOMEM; goto out; } out: of_node_put(np); return ret; } static void mvebu_v7_pmsu_enable_l2_powerdown_onidle(void) { u32 reg; if (pmsu_mp_base == NULL) return; /* Enable L2 & Fabric powerdown in Deep-Idle mode - Fabric */ reg = readl(pmsu_mp_base + L2C_NFABRIC_PM_CTL); reg |= L2C_NFABRIC_PM_CTL_PWR_DOWN; writel(reg, pmsu_mp_base + L2C_NFABRIC_PM_CTL); } enum pmsu_idle_prepare_flags { PMSU_PREPARE_NORMAL = 0, PMSU_PREPARE_DEEP_IDLE = BIT(0), PMSU_PREPARE_SNOOP_DISABLE = BIT(1), }; /* No locking is needed because we only access per-CPU registers */ static int mvebu_v7_pmsu_idle_prepare(unsigned long flags) { unsigned int hw_cpu = cpu_logical_map(smp_processor_id()); u32 reg; if (pmsu_mp_base == NULL) return -EINVAL; /* * Adjust the PMSU configuration to wait for WFI signal, enable * IRQ and FIQ as wakeup events, set wait for snoop queue empty * indication and mask IRQ and FIQ from CPU */ reg = readl(pmsu_mp_base + PMSU_STATUS_AND_MASK(hw_cpu)); reg |= PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT | PMSU_STATUS_AND_MASK_IRQ_WAKEUP | PMSU_STATUS_AND_MASK_FIQ_WAKEUP | PMSU_STATUS_AND_MASK_SNP_Q_EMPTY_WAIT | PMSU_STATUS_AND_MASK_IRQ_MASK | PMSU_STATUS_AND_MASK_FIQ_MASK; writel(reg, pmsu_mp_base + PMSU_STATUS_AND_MASK(hw_cpu)); reg = readl(pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(hw_cpu)); /* ask HW to power down the L2 Cache if needed */ if (flags & PMSU_PREPARE_DEEP_IDLE) reg |= PMSU_CONTROL_AND_CONFIG_L2_PWDDN; /* request power down */ reg |= PMSU_CONTROL_AND_CONFIG_PWDDN_REQ; writel(reg, pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(hw_cpu)); if (flags & PMSU_PREPARE_SNOOP_DISABLE) { /* Disable snoop disable by HW - SW is taking care of it */ reg = readl(pmsu_mp_base + PMSU_CPU_POWER_DOWN_CONTROL(hw_cpu)); reg |= PMSU_CPU_POWER_DOWN_DIS_SNP_Q_SKIP; writel(reg, pmsu_mp_base + PMSU_CPU_POWER_DOWN_CONTROL(hw_cpu)); } return 0; } int armada_370_xp_pmsu_idle_enter(unsigned long deepidle) { unsigned long flags = PMSU_PREPARE_SNOOP_DISABLE; int ret; if (deepidle) flags |= PMSU_PREPARE_DEEP_IDLE; ret = mvebu_v7_pmsu_idle_prepare(flags); if (ret) return ret; v7_exit_coherency_flush(all); ll_disable_coherency(); dsb(); wfi(); /* If we are here, wfi failed. As processors run out of * coherency for some time, tlbs might be stale, so flush them */ local_flush_tlb_all(); ll_enable_coherency(); /* Test the CR_C bit and set it if it was cleared */ asm volatile( ".arch armv7-a\n\t" "mrc p15, 0, r0, c1, c0, 0 \n\t" "tst r0, %0 \n\t" "orreq r0, r0, #(1 << 2) \n\t" "mcreq p15, 0, r0, c1, c0, 0 \n\t" "isb " : : "Ir" (CR_C) : "r0"); pr_debug("Failed to suspend the system\n"); return 0; } static int armada_370_xp_cpu_suspend(unsigned long deepidle) { return cpu_suspend(deepidle, armada_370_xp_pmsu_idle_enter); } int armada_38x_do_cpu_suspend(unsigned long deepidle) { unsigned long flags = 0; if (deepidle) flags |= PMSU_PREPARE_DEEP_IDLE; mvebu_v7_pmsu_idle_prepare(flags); /* * Already flushed cache, but do it again as the outer cache * functions dirty the cache with spinlocks */ v7_exit_coherency_flush(louis); scu_power_mode(mvebu_get_scu_base(), SCU_PM_POWEROFF); cpu_do_idle(); return 1; } static int armada_38x_cpu_suspend(unsigned long deepidle) { return cpu_suspend(false, armada_38x_do_cpu_suspend); } /* No locking is needed because we only access per-CPU registers */ void mvebu_v7_pmsu_idle_exit(void) { unsigned int hw_cpu = cpu_logical_map(smp_processor_id()); u32 reg; if (pmsu_mp_base == NULL) return; /* cancel ask HW to power down the L2 Cache if possible */ reg = readl(pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(hw_cpu)); reg &= ~PMSU_CONTROL_AND_CONFIG_L2_PWDDN; writel(reg, pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(hw_cpu)); /* cancel Enable wakeup events and mask interrupts */ reg = readl(pmsu_mp_base + PMSU_STATUS_AND_MASK(hw_cpu)); reg &= ~(PMSU_STATUS_AND_MASK_IRQ_WAKEUP | PMSU_STATUS_AND_MASK_FIQ_WAKEUP); reg &= ~PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT; reg &= ~PMSU_STATUS_AND_MASK_SNP_Q_EMPTY_WAIT; reg &= ~(PMSU_STATUS_AND_MASK_IRQ_MASK | PMSU_STATUS_AND_MASK_FIQ_MASK); writel(reg, pmsu_mp_base + PMSU_STATUS_AND_MASK(hw_cpu)); } static int mvebu_v7_cpu_pm_notify(struct notifier_block *self, unsigned long action, void *hcpu) { if (action == CPU_PM_ENTER) { unsigned int hw_cpu = cpu_logical_map(smp_processor_id()); mvebu_pmsu_set_cpu_boot_addr(hw_cpu, mvebu_cpu_resume); } else if (action == CPU_PM_EXIT) { mvebu_v7_pmsu_idle_exit(); } return NOTIFY_OK; } static struct notifier_block mvebu_v7_cpu_pm_notifier = { .notifier_call = mvebu_v7_cpu_pm_notify, }; static struct platform_device mvebu_v7_cpuidle_device; static int broken_idle(struct device_node *np) { if (of_property_read_bool(np, "broken-idle")) { pr_warn("CPU idle is currently broken: disabling\n"); return 1; } return 0; } static __init int armada_370_cpuidle_init(void) { struct device_node *np; phys_addr_t redirect_reg; np = of_find_compatible_node(NULL, NULL, "marvell,coherency-fabric"); if (!np) return -ENODEV; if (broken_idle(np)) goto end; /* * On Armada 370, there is "a slow exit process from the deep * idle state due to heavy L1/L2 cache cleanup operations * performed by the BootROM software". To avoid this, we * replace the restart code of the bootrom by a a simple jump * to the boot address. Then the code located at this boot * address will take care of the initialization. */ redirect_reg = pmsu_mp_phys_base + PMSU_BOOT_ADDR_REDIRECT_OFFSET(0); mvebu_setup_boot_addr_wa(ARMADA_370_CRYPT0_ENG_TARGET, ARMADA_370_CRYPT0_ENG_ATTR, redirect_reg); mvebu_cpu_resume = armada_370_xp_cpu_resume; mvebu_v7_cpuidle_device.dev.platform_data = armada_370_xp_cpu_suspend; mvebu_v7_cpuidle_device.name = "cpuidle-armada-370"; end: of_node_put(np); return 0; } static __init int armada_38x_cpuidle_init(void) { struct device_node *np; void __iomem *mpsoc_base; u32 reg; pr_warn("CPU idle is currently broken on Armada 38x: disabling\n"); return 0; np = of_find_compatible_node(NULL, NULL, "marvell,armada-380-coherency-fabric"); if (!np) return -ENODEV; if (broken_idle(np)) goto end; of_node_put(np); np = of_find_compatible_node(NULL, NULL, "marvell,armada-380-mpcore-soc-ctrl"); if (!np) return -ENODEV; mpsoc_base = of_iomap(np, 0); BUG_ON(!mpsoc_base); /* Set up reset mask when powering down the cpus */ reg = readl(mpsoc_base + MPCORE_RESET_CTL); reg |= MPCORE_RESET_CTL_L2; reg |= MPCORE_RESET_CTL_DEBUG; writel(reg, mpsoc_base + MPCORE_RESET_CTL); iounmap(mpsoc_base); /* Set up delay */ reg = readl(pmsu_mp_base + PMSU_POWERDOWN_DELAY); reg &= ~PMSU_POWERDOWN_DELAY_MASK; reg |= PMSU_DFLT_ARMADA38X_DELAY; reg |= PMSU_POWERDOWN_DELAY_PMU; writel(reg, pmsu_mp_base + PMSU_POWERDOWN_DELAY); mvebu_cpu_resume = armada_38x_cpu_resume; mvebu_v7_cpuidle_device.dev.platform_data = armada_38x_cpu_suspend; mvebu_v7_cpuidle_device.name = "cpuidle-armada-38x"; end: of_node_put(np); return 0; } static __init int armada_xp_cpuidle_init(void) { struct device_node *np; np = of_find_compatible_node(NULL, NULL, "marvell,coherency-fabric"); if (!np) return -ENODEV; if (broken_idle(np)) goto end; mvebu_cpu_resume = armada_370_xp_cpu_resume; mvebu_v7_cpuidle_device.dev.platform_data = armada_370_xp_cpu_suspend; mvebu_v7_cpuidle_device.name = "cpuidle-armada-xp"; end: of_node_put(np); return 0; } static int __init mvebu_v7_cpu_pm_init(void) { struct device_node *np; int ret; np = of_find_matching_node(NULL, of_pmsu_table); if (!np) return 0; of_node_put(np); /* * Currently the CPU idle support for Armada 38x is broken, as * the CPU hotplug uses some of the CPU idle functions it is * broken too, so let's disable it */ if (of_machine_is_compatible("marvell,armada380")) { cpu_hotplug_disable(); pr_warn("CPU hotplug support is currently broken on Armada 38x: disabling\n"); } if (of_machine_is_compatible("marvell,armadaxp")) ret = armada_xp_cpuidle_init(); else if (of_machine_is_compatible("marvell,armada370")) ret = armada_370_cpuidle_init(); else if (of_machine_is_compatible("marvell,armada380")) ret = armada_38x_cpuidle_init(); else return 0; if (ret) return ret; mvebu_v7_pmsu_enable_l2_powerdown_onidle(); if (mvebu_v7_cpuidle_device.name) platform_device_register(&mvebu_v7_cpuidle_device); cpu_pm_register_notifier(&mvebu_v7_cpu_pm_notifier); return 0; } arch_initcall(mvebu_v7_cpu_pm_init); early_initcall(mvebu_v7_pmsu_init); static void mvebu_pmsu_dfs_request_local(void *data) { u32 reg; u32 cpu = smp_processor_id(); unsigned long flags; local_irq_save(flags); /* Prepare to enter idle */ reg = readl(pmsu_mp_base + PMSU_STATUS_AND_MASK(cpu)); reg |= PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT | PMSU_STATUS_AND_MASK_IRQ_MASK | PMSU_STATUS_AND_MASK_FIQ_MASK; writel(reg, pmsu_mp_base + PMSU_STATUS_AND_MASK(cpu)); /* Request the DFS transition */ reg = readl(pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(cpu)); reg |= PMSU_CONTROL_AND_CONFIG_DFS_REQ; writel(reg, pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(cpu)); /* The fact of entering idle will trigger the DFS transition */ wfi(); /* * We're back from idle, the DFS transition has completed, * clear the idle wait indication. */ reg = readl(pmsu_mp_base + PMSU_STATUS_AND_MASK(cpu)); reg &= ~PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT; writel(reg, pmsu_mp_base + PMSU_STATUS_AND_MASK(cpu)); local_irq_restore(flags); } int mvebu_pmsu_dfs_request(int cpu) { unsigned long timeout; int hwcpu = cpu_logical_map(cpu); u32 reg; /* Clear any previous DFS DONE event */ reg = readl(pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu)); reg &= ~PMSU_EVENT_STATUS_AND_MASK_DFS_DONE; writel(reg, pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu)); /* Mask the DFS done interrupt, since we are going to poll */ reg = readl(pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu)); reg |= PMSU_EVENT_STATUS_AND_MASK_DFS_DONE_MASK; writel(reg, pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu)); /* Trigger the DFS on the appropriate CPU */ smp_call_function_single(cpu, mvebu_pmsu_dfs_request_local, NULL, false); /* Poll until the DFS done event is generated */ timeout = jiffies + HZ; while (time_before(jiffies, timeout)) { reg = readl(pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu)); if (reg & PMSU_EVENT_STATUS_AND_MASK_DFS_DONE) break; udelay(10); } if (time_after(jiffies, timeout)) return -ETIME; /* Restore the DFS mask to its original state */ reg = readl(pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu)); reg &= ~PMSU_EVENT_STATUS_AND_MASK_DFS_DONE_MASK; writel(reg, pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu)); return 0; }
linux-master
arch/arm/mach-mvebu/pmsu.c
// SPDX-License-Identifier: GPL-2.0-only /* * Coherency fabric (Aurora) support for Armada 370, 375, 38x and XP * platforms. * * Copyright (C) 2012 Marvell * * Yehuda Yitschak <[email protected]> * Gregory Clement <[email protected]> * Thomas Petazzoni <[email protected]> * * The Armada 370, 375, 38x and XP SOCs have a coherency fabric which is * responsible for ensuring hardware coherency between all CPUs and between * CPUs and I/O masters. This file initializes the coherency fabric and * supplies basic routines for configuring and controlling hardware coherency */ #define pr_fmt(fmt) "mvebu-coherency: " fmt #include <linux/kernel.h> #include <linux/init.h> #include <linux/of_address.h> #include <linux/io.h> #include <linux/smp.h> #include <linux/dma-map-ops.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/mbus.h> #include <linux/pci.h> #include <asm/smp_plat.h> #include <asm/cacheflush.h> #include <asm/mach/map.h> #include <asm/dma-mapping.h> #include "coherency.h" #include "mvebu-soc-id.h" unsigned long coherency_phys_base; void __iomem *coherency_base; static void __iomem *coherency_cpu_base; static void __iomem *cpu_config_base; /* Coherency fabric registers */ #define IO_SYNC_BARRIER_CTL_OFFSET 0x0 enum { COHERENCY_FABRIC_TYPE_NONE, COHERENCY_FABRIC_TYPE_ARMADA_370_XP, COHERENCY_FABRIC_TYPE_ARMADA_375, COHERENCY_FABRIC_TYPE_ARMADA_380, }; static const struct of_device_id of_coherency_table[] = { {.compatible = "marvell,coherency-fabric", .data = (void *) COHERENCY_FABRIC_TYPE_ARMADA_370_XP }, {.compatible = "marvell,armada-375-coherency-fabric", .data = (void *) COHERENCY_FABRIC_TYPE_ARMADA_375 }, {.compatible = "marvell,armada-380-coherency-fabric", .data = (void *) COHERENCY_FABRIC_TYPE_ARMADA_380 }, { /* end of list */ }, }; /* Functions defined in coherency_ll.S */ int ll_enable_coherency(void); void ll_add_cpu_to_smp_group(void); #define CPU_CONFIG_SHARED_L2 BIT(16) /* * Disable the "Shared L2 Present" bit in CPU Configuration register * on Armada XP. * * The "Shared L2 Present" bit affects the "level of coherence" value * in the clidr CP15 register. Cache operation functions such as * "flush all" and "invalidate all" operate on all the cache levels * that included in the defined level of coherence. When HW I/O * coherency is used, this bit causes unnecessary flushes of the L2 * cache. */ static void armada_xp_clear_shared_l2(void) { u32 reg; if (!cpu_config_base) return; reg = readl(cpu_config_base); reg &= ~CPU_CONFIG_SHARED_L2; writel(reg, cpu_config_base); } static int mvebu_hwcc_notifier(struct notifier_block *nb, unsigned long event, void *__dev) { struct device *dev = __dev; if (event != BUS_NOTIFY_ADD_DEVICE) return NOTIFY_DONE; dev->dma_coherent = true; return NOTIFY_OK; } static struct notifier_block mvebu_hwcc_nb = { .notifier_call = mvebu_hwcc_notifier, }; static struct notifier_block mvebu_hwcc_pci_nb __maybe_unused = { .notifier_call = mvebu_hwcc_notifier, }; static int armada_xp_clear_l2_starting(unsigned int cpu) { armada_xp_clear_shared_l2(); return 0; } static void __init armada_370_coherency_init(struct device_node *np) { struct resource res; struct device_node *cpu_config_np; of_address_to_resource(np, 0, &res); coherency_phys_base = res.start; /* * Ensure secondary CPUs will see the updated value, * which they read before they join the coherency * fabric, and therefore before they are coherent with * the boot CPU cache. */ sync_cache_w(&coherency_phys_base); coherency_base = of_iomap(np, 0); coherency_cpu_base = of_iomap(np, 1); cpu_config_np = of_find_compatible_node(NULL, NULL, "marvell,armada-xp-cpu-config"); if (!cpu_config_np) goto exit; cpu_config_base = of_iomap(cpu_config_np, 0); if (!cpu_config_base) { of_node_put(cpu_config_np); goto exit; } of_node_put(cpu_config_np); cpuhp_setup_state_nocalls(CPUHP_AP_ARM_MVEBU_COHERENCY, "arm/mvebu/coherency:starting", armada_xp_clear_l2_starting, NULL); exit: set_cpu_coherent(); } /* * This ioremap hook is used on Armada 375/38x to ensure that all MMIO * areas are mapped as MT_UNCACHED instead of MT_DEVICE. This is * needed for the HW I/O coherency mechanism to work properly without * deadlock. */ static void __iomem * armada_wa_ioremap_caller(phys_addr_t phys_addr, size_t size, unsigned int mtype, void *caller) { mtype = MT_UNCACHED; return __arm_ioremap_caller(phys_addr, size, mtype, caller); } static void __init armada_375_380_coherency_init(struct device_node *np) { struct device_node *cache_dn; coherency_cpu_base = of_iomap(np, 0); arch_ioremap_caller = armada_wa_ioremap_caller; pci_ioremap_set_mem_type(MT_UNCACHED); /* * We should switch the PL310 to I/O coherency mode only if * I/O coherency is actually enabled. */ if (!coherency_available()) return; /* * Add the PL310 property "arm,io-coherent". This makes sure the * outer sync operation is not used, which allows to * workaround the system erratum that causes deadlocks when * doing PCIe in an SMP situation on Armada 375 and Armada * 38x. */ for_each_compatible_node(cache_dn, NULL, "arm,pl310-cache") { struct property *p; p = kzalloc(sizeof(*p), GFP_KERNEL); p->name = kstrdup("arm,io-coherent", GFP_KERNEL); of_add_property(cache_dn, p); } } static int coherency_type(void) { struct device_node *np; const struct of_device_id *match; int type; /* * The coherency fabric is needed: * - For coherency between processors on Armada XP, so only * when SMP is enabled. * - For coherency between the processor and I/O devices, but * this coherency requires many pre-requisites (write * allocate cache policy, shareable pages, SMP bit set) that * are only meant in SMP situations. * * Note that this means that on Armada 370, there is currently * no way to use hardware I/O coherency, because even when * CONFIG_SMP is enabled, is_smp() returns false due to the * Armada 370 being a single-core processor. To lift this * limitation, we would have to find a way to make the cache * policy set to write-allocate (on all Armada SoCs), and to * set the shareable attribute in page tables (on all Armada * SoCs except the Armada 370). Unfortunately, such decisions * are taken very early in the kernel boot process, at a point * where we don't know yet on which SoC we are running. */ if (!is_smp()) return COHERENCY_FABRIC_TYPE_NONE; np = of_find_matching_node_and_match(NULL, of_coherency_table, &match); if (!np) return COHERENCY_FABRIC_TYPE_NONE; type = (int) match->data; of_node_put(np); return type; } int set_cpu_coherent(void) { int type = coherency_type(); if (type == COHERENCY_FABRIC_TYPE_ARMADA_370_XP) { if (!coherency_base) { pr_warn("Can't make current CPU cache coherent.\n"); pr_warn("Coherency fabric is not initialized\n"); return 1; } armada_xp_clear_shared_l2(); ll_add_cpu_to_smp_group(); return ll_enable_coherency(); } return 0; } int coherency_available(void) { return coherency_type() != COHERENCY_FABRIC_TYPE_NONE; } int __init coherency_init(void) { int type = coherency_type(); struct device_node *np; np = of_find_matching_node(NULL, of_coherency_table); if (type == COHERENCY_FABRIC_TYPE_ARMADA_370_XP) armada_370_coherency_init(np); else if (type == COHERENCY_FABRIC_TYPE_ARMADA_375 || type == COHERENCY_FABRIC_TYPE_ARMADA_380) armada_375_380_coherency_init(np); of_node_put(np); return 0; } static int __init coherency_late_init(void) { if (coherency_available()) bus_register_notifier(&platform_bus_type, &mvebu_hwcc_nb); return 0; } postcore_initcall(coherency_late_init); #if IS_ENABLED(CONFIG_PCI) static int __init coherency_pci_init(void) { if (coherency_available()) bus_register_notifier(&pci_bus_type, &mvebu_hwcc_pci_nb); return 0; } arch_initcall(coherency_pci_init); #endif
linux-master
arch/arm/mach-mvebu/coherency.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2012 (C), Jason Cooper <[email protected]> * * arch/arm/mach-mvebu/kirkwood.c * * Flattened Device Tree board initialization */ #include <linux/clk.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/mbus.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_net.h> #include <linux/of_platform.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <asm/hardware/cache-feroceon-l2.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include "kirkwood.h" #include "kirkwood-pm.h" #include "common.h" static struct resource kirkwood_cpufreq_resources[] = { [0] = { .start = CPU_CONTROL_PHYS, .end = CPU_CONTROL_PHYS + 3, .flags = IORESOURCE_MEM, }, }; static struct platform_device kirkwood_cpufreq_device = { .name = "kirkwood-cpufreq", .id = -1, .num_resources = ARRAY_SIZE(kirkwood_cpufreq_resources), .resource = kirkwood_cpufreq_resources, }; static void __init kirkwood_cpufreq_init(void) { platform_device_register(&kirkwood_cpufreq_device); } static struct resource kirkwood_cpuidle_resource[] = { { .flags = IORESOURCE_MEM, .start = DDR_OPERATION_BASE, .end = DDR_OPERATION_BASE + 3, }, }; static struct platform_device kirkwood_cpuidle = { .name = "kirkwood_cpuidle", .id = -1, .resource = kirkwood_cpuidle_resource, .num_resources = 1, }; static void __init kirkwood_cpuidle_init(void) { platform_device_register(&kirkwood_cpuidle); } #define MV643XX_ETH_MAC_ADDR_LOW 0x0414 #define MV643XX_ETH_MAC_ADDR_HIGH 0x0418 static void __init kirkwood_dt_eth_fixup(void) { struct device_node *np; /* * The ethernet interfaces forget the MAC address assigned by u-boot * if the clocks are turned off. Usually, u-boot on kirkwood boards * has no DT support to properly set local-mac-address property. * As a workaround, we get the MAC address from mv643xx_eth registers * and update the port device node if no valid MAC address is set. */ for_each_compatible_node(np, NULL, "marvell,kirkwood-eth-port") { struct device_node *pnp = of_get_parent(np); struct clk *clk; struct property *pmac; u8 tmpmac[ETH_ALEN]; void __iomem *io; u8 *macaddr; u32 reg; if (!pnp) continue; /* skip disabled nodes or nodes with valid MAC address*/ if (!of_device_is_available(pnp) || !of_get_mac_address(np, tmpmac)) goto eth_fixup_skip; clk = of_clk_get(pnp, 0); if (IS_ERR(clk)) goto eth_fixup_skip; io = of_iomap(pnp, 0); if (!io) goto eth_fixup_no_map; /* ensure port clock is not gated to not hang CPU */ clk_prepare_enable(clk); /* store MAC address register contents in local-mac-address */ pmac = kzalloc(sizeof(*pmac) + 6, GFP_KERNEL); if (!pmac) goto eth_fixup_no_mem; pmac->value = pmac + 1; pmac->length = 6; pmac->name = kstrdup("local-mac-address", GFP_KERNEL); if (!pmac->name) { kfree(pmac); goto eth_fixup_no_mem; } macaddr = pmac->value; reg = readl(io + MV643XX_ETH_MAC_ADDR_HIGH); macaddr[0] = (reg >> 24) & 0xff; macaddr[1] = (reg >> 16) & 0xff; macaddr[2] = (reg >> 8) & 0xff; macaddr[3] = reg & 0xff; reg = readl(io + MV643XX_ETH_MAC_ADDR_LOW); macaddr[4] = (reg >> 8) & 0xff; macaddr[5] = reg & 0xff; of_update_property(np, pmac); eth_fixup_no_mem: iounmap(io); clk_disable_unprepare(clk); eth_fixup_no_map: clk_put(clk); eth_fixup_skip: of_node_put(pnp); } } /* * Disable propagation of mbus errors to the CPU local bus, as this * causes mbus errors (which can occur for example for PCI aborts) to * throw CPU aborts, which we're not set up to deal with. */ static void kirkwood_disable_mbus_error_propagation(void) { void __iomem *cpu_config; cpu_config = ioremap(CPU_CONFIG_PHYS, 4); writel(readl(cpu_config) & ~CPU_CONFIG_ERROR_PROP, cpu_config); } static struct of_dev_auxdata auxdata[] __initdata = { OF_DEV_AUXDATA("marvell,kirkwood-audio", 0xf10a0000, "mvebu-audio", NULL), { /* sentinel */ } }; static void __init kirkwood_dt_init(void) { kirkwood_disable_mbus_error_propagation(); BUG_ON(mvebu_mbus_dt_init(false)); #ifdef CONFIG_CACHE_FEROCEON_L2 feroceon_of_init(); #endif kirkwood_cpufreq_init(); kirkwood_cpuidle_init(); kirkwood_pm_init(); kirkwood_dt_eth_fixup(); of_platform_default_populate(NULL, auxdata, NULL); } static const char * const kirkwood_dt_board_compat[] __initconst = { "marvell,kirkwood", NULL }; DT_MACHINE_START(KIRKWOOD_DT, "Marvell Kirkwood (Flattened Device Tree)") /* Maintainer: Jason Cooper <[email protected]> */ .init_machine = kirkwood_dt_init, .restart = mvebu_restart, .dt_compat = kirkwood_dt_board_compat, MACHINE_END
linux-master
arch/arm/mach-mvebu/kirkwood.c
// SPDX-License-Identifier: GPL-2.0-only /* * Symmetric Multi Processing (SMP) support for Marvell EBU Cortex-A9 * based SOCs (Armada 375/38x). * * Copyright (C) 2014 Marvell * * Gregory CLEMENT <[email protected]> * Thomas Petazzoni <[email protected]> */ #include <linux/init.h> #include <linux/io.h> #include <linux/of.h> #include <linux/smp.h> #include <linux/mbus.h> #include <asm/smp_scu.h> #include <asm/smp_plat.h> #include "common.h" #include "pmsu.h" extern void mvebu_cortex_a9_secondary_startup(void); static int mvebu_cortex_a9_boot_secondary(unsigned int cpu, struct task_struct *idle) { int ret, hw_cpu; pr_info("Booting CPU %d\n", cpu); /* * Write the address of secondary startup into the system-wide * flags register. The boot monitor waits until it receives a * soft interrupt, and then the secondary CPU branches to this * address. */ hw_cpu = cpu_logical_map(cpu); if (of_machine_is_compatible("marvell,armada375")) mvebu_system_controller_set_cpu_boot_addr(mvebu_cortex_a9_secondary_startup); else mvebu_pmsu_set_cpu_boot_addr(hw_cpu, mvebu_cortex_a9_secondary_startup); smp_wmb(); /* * Doing this before deasserting the CPUs is needed to wake up CPUs * in the offline state after using CPU hotplug. */ arch_send_wakeup_ipi_mask(cpumask_of(cpu)); ret = mvebu_cpu_reset_deassert(hw_cpu); if (ret) { pr_err("Could not start the secondary CPU: %d\n", ret); return ret; } return 0; } /* * When a CPU is brought back online, either through CPU hotplug, or * because of the boot of a kexec'ed kernel, the PMSU configuration * for this CPU might be in the deep idle state, preventing this CPU * from receiving interrupts. Here, we therefore take out the current * CPU from this state, which was entered by armada_38x_cpu_die() * below. */ static void armada_38x_secondary_init(unsigned int cpu) { mvebu_v7_pmsu_idle_exit(); } #ifdef CONFIG_HOTPLUG_CPU static void armada_38x_cpu_die(unsigned int cpu) { /* * CPU hotplug is implemented by putting offline CPUs into the * deep idle sleep state. */ armada_38x_do_cpu_suspend(true); } /* * We need a dummy function, so that platform_can_cpu_hotplug() knows * we support CPU hotplug. However, the function does not need to do * anything, because CPUs going offline can enter the deep idle state * by themselves, without any help from a still alive CPU. */ static int armada_38x_cpu_kill(unsigned int cpu) { return 1; } #endif static const struct smp_operations mvebu_cortex_a9_smp_ops __initconst = { .smp_boot_secondary = mvebu_cortex_a9_boot_secondary, }; static const struct smp_operations armada_38x_smp_ops __initconst = { .smp_boot_secondary = mvebu_cortex_a9_boot_secondary, .smp_secondary_init = armada_38x_secondary_init, #ifdef CONFIG_HOTPLUG_CPU .cpu_die = armada_38x_cpu_die, .cpu_kill = armada_38x_cpu_kill, #endif }; CPU_METHOD_OF_DECLARE(mvebu_armada_375_smp, "marvell,armada-375-smp", &mvebu_cortex_a9_smp_ops); CPU_METHOD_OF_DECLARE(mvebu_armada_380_smp, "marvell,armada-380-smp", &armada_38x_smp_ops); CPU_METHOD_OF_DECLARE(mvebu_armada_390_smp, "marvell,armada-390-smp", &armada_38x_smp_ops);
linux-master
arch/arm/mach-mvebu/platsmp-a9.c
// SPDX-License-Identifier: GPL-2.0-only /* * System controller support for Armada 370, 375 and XP platforms. * * Copyright (C) 2012 Marvell * * Lior Amsalem <[email protected]> * Gregory CLEMENT <[email protected]> * Thomas Petazzoni <[email protected]> * * The Armada 370, 375 and Armada XP SoCs have a range of * miscellaneous registers, that do not belong to a particular device, * but rather provide system-level features. This basic * system-controller driver provides a device tree binding for those * registers, and implements utility functions offering various * features related to those registers. * * For now, the feature set is limited to restarting the platform by a * soft-reset, but it might be extended in the future. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/of_address.h> #include <linux/io.h> #include <linux/reboot.h> #include "common.h" #include "mvebu-soc-id.h" #include "pmsu.h" #define ARMADA_375_CRYPT0_ENG_TARGET 41 #define ARMADA_375_CRYPT0_ENG_ATTR 1 static void __iomem *system_controller_base; static phys_addr_t system_controller_phys_base; struct mvebu_system_controller { u32 rstoutn_mask_offset; u32 system_soft_reset_offset; u32 rstoutn_mask_reset_out_en; u32 system_soft_reset; u32 resume_boot_addr; u32 dev_id; u32 rev_id; }; static struct mvebu_system_controller *mvebu_sc; static const struct mvebu_system_controller armada_370_xp_system_controller = { .rstoutn_mask_offset = 0x60, .system_soft_reset_offset = 0x64, .rstoutn_mask_reset_out_en = 0x1, .system_soft_reset = 0x1, .dev_id = 0x38, .rev_id = 0x3c, }; static const struct mvebu_system_controller armada_375_system_controller = { .rstoutn_mask_offset = 0x54, .system_soft_reset_offset = 0x58, .rstoutn_mask_reset_out_en = 0x1, .system_soft_reset = 0x1, .resume_boot_addr = 0xd4, .dev_id = 0x38, .rev_id = 0x3c, }; static const struct mvebu_system_controller orion_system_controller = { .rstoutn_mask_offset = 0x108, .system_soft_reset_offset = 0x10c, .rstoutn_mask_reset_out_en = 0x4, .system_soft_reset = 0x1, }; static const struct of_device_id of_system_controller_table[] = { { .compatible = "marvell,orion-system-controller", .data = (void *) &orion_system_controller, }, { .compatible = "marvell,armada-370-xp-system-controller", .data = (void *) &armada_370_xp_system_controller, }, { .compatible = "marvell,armada-375-system-controller", .data = (void *) &armada_375_system_controller, }, { /* end of list */ }, }; void mvebu_restart(enum reboot_mode mode, const char *cmd) { if (!system_controller_base) { pr_err("Cannot restart, system-controller not available: check the device tree\n"); } else { /* * Enable soft reset to assert RSTOUTn. */ writel(mvebu_sc->rstoutn_mask_reset_out_en, system_controller_base + mvebu_sc->rstoutn_mask_offset); /* * Assert soft reset. */ writel(mvebu_sc->system_soft_reset, system_controller_base + mvebu_sc->system_soft_reset_offset); } while (1) ; } int mvebu_system_controller_get_soc_id(u32 *dev, u32 *rev) { if (of_machine_is_compatible("marvell,armada380") && system_controller_base) { *dev = readl(system_controller_base + mvebu_sc->dev_id) >> 16; *rev = (readl(system_controller_base + mvebu_sc->rev_id) >> 8) & 0xF; return 0; } else return -ENODEV; } #if defined(CONFIG_SMP) && defined(CONFIG_MACH_MVEBU_V7) static void mvebu_armada375_smp_wa_init(void) { u32 dev, rev; phys_addr_t resume_addr_reg; if (mvebu_get_soc_id(&dev, &rev) != 0) return; if (rev != ARMADA_375_Z1_REV) return; resume_addr_reg = system_controller_phys_base + mvebu_sc->resume_boot_addr; mvebu_setup_boot_addr_wa(ARMADA_375_CRYPT0_ENG_TARGET, ARMADA_375_CRYPT0_ENG_ATTR, resume_addr_reg); } void mvebu_system_controller_set_cpu_boot_addr(void *boot_addr) { BUG_ON(system_controller_base == NULL); BUG_ON(mvebu_sc->resume_boot_addr == 0); if (of_machine_is_compatible("marvell,armada375")) mvebu_armada375_smp_wa_init(); writel(__pa_symbol(boot_addr), system_controller_base + mvebu_sc->resume_boot_addr); } #endif static int __init mvebu_system_controller_init(void) { const struct of_device_id *match; struct device_node *np; np = of_find_matching_node_and_match(NULL, of_system_controller_table, &match); if (np) { struct resource res; system_controller_base = of_iomap(np, 0); of_address_to_resource(np, 0, &res); system_controller_phys_base = res.start; mvebu_sc = (struct mvebu_system_controller *)match->data; of_node_put(np); } return 0; } early_initcall(mvebu_system_controller_init);
linux-master
arch/arm/mach-mvebu/system-controller.c
// SPDX-License-Identifier: GPL-2.0-only /* * Suspend/resume support. Currently supporting Armada XP only. * * Copyright (C) 2014 Marvell * * Thomas Petazzoni <[email protected]> */ #include <linux/cpu_pm.h> #include <linux/delay.h> #include <linux/gpio.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/mbus.h> #include <linux/of_address.h> #include <linux/suspend.h> #include <asm/cacheflush.h> #include <asm/outercache.h> #include <asm/suspend.h> #include "coherency.h" #include "common.h" #include "pmsu.h" #define SDRAM_CONFIG_OFFS 0x0 #define SDRAM_CONFIG_SR_MODE_BIT BIT(24) #define SDRAM_OPERATION_OFFS 0x18 #define SDRAM_OPERATION_SELF_REFRESH 0x7 #define SDRAM_DLB_EVICTION_OFFS 0x30c #define SDRAM_DLB_EVICTION_THRESHOLD_MASK 0xff static void (*mvebu_board_pm_enter)(void __iomem *sdram_reg, u32 srcmd); static void __iomem *sdram_ctrl; static int mvebu_pm_powerdown(unsigned long data) { u32 reg, srcmd; flush_cache_all(); outer_flush_all(); /* * Issue a Data Synchronization Barrier instruction to ensure * that all state saving has been completed. */ dsb(); /* Flush the DLB and wait ~7 usec */ reg = readl(sdram_ctrl + SDRAM_DLB_EVICTION_OFFS); reg &= ~SDRAM_DLB_EVICTION_THRESHOLD_MASK; writel(reg, sdram_ctrl + SDRAM_DLB_EVICTION_OFFS); udelay(7); /* Set DRAM in battery backup mode */ reg = readl(sdram_ctrl + SDRAM_CONFIG_OFFS); reg &= ~SDRAM_CONFIG_SR_MODE_BIT; writel(reg, sdram_ctrl + SDRAM_CONFIG_OFFS); /* Prepare to go to self-refresh */ srcmd = readl(sdram_ctrl + SDRAM_OPERATION_OFFS); srcmd &= ~0x1F; srcmd |= SDRAM_OPERATION_SELF_REFRESH; mvebu_board_pm_enter(sdram_ctrl + SDRAM_OPERATION_OFFS, srcmd); return 0; } #define BOOT_INFO_ADDR 0x3000 #define BOOT_MAGIC_WORD 0xdeadb002 #define BOOT_MAGIC_LIST_END 0xffffffff /* * Those registers are accessed before switching the internal register * base, which is why we hardcode the 0xd0000000 base address, the one * used by the SoC out of reset. */ #define MBUS_WINDOW_12_CTRL 0xd00200b0 #define MBUS_INTERNAL_REG_ADDRESS 0xd0020080 #define SDRAM_WIN_BASE_REG(x) (0x20180 + (0x8*x)) #define SDRAM_WIN_CTRL_REG(x) (0x20184 + (0x8*x)) static phys_addr_t mvebu_internal_reg_base(void) { struct device_node *np; __be32 in_addr[2]; np = of_find_node_by_name(NULL, "internal-regs"); BUG_ON(!np); /* * Ask the DT what is the internal register address on this * platform. In the mvebu-mbus DT binding, 0xf0010000 * corresponds to the internal register window. */ in_addr[0] = cpu_to_be32(0xf0010000); in_addr[1] = 0x0; return of_translate_address(np, in_addr); } static void mvebu_pm_store_armadaxp_bootinfo(u32 *store_addr) { phys_addr_t resume_pc; resume_pc = __pa_symbol(armada_370_xp_cpu_resume); /* * The bootloader expects the first two words to be a magic * value (BOOT_MAGIC_WORD), followed by the address of the * resume code to jump to. Then, it expects a sequence of * (address, value) pairs, which can be used to restore the * value of certain registers. This sequence must end with the * BOOT_MAGIC_LIST_END magic value. */ writel(BOOT_MAGIC_WORD, store_addr++); writel(resume_pc, store_addr++); /* * Some platforms remap their internal register base address * to 0xf1000000. However, out of reset, window 12 starts at * 0xf0000000 and ends at 0xf7ffffff, which would overlap with * the internal registers. Therefore, disable window 12. */ writel(MBUS_WINDOW_12_CTRL, store_addr++); writel(0x0, store_addr++); /* * Set the internal register base address to the value * expected by Linux, as read from the Device Tree. */ writel(MBUS_INTERNAL_REG_ADDRESS, store_addr++); writel(mvebu_internal_reg_base(), store_addr++); /* * Ask the mvebu-mbus driver to store the SDRAM window * configuration, which has to be restored by the bootloader * before re-entering the kernel on resume. */ store_addr += mvebu_mbus_save_cpu_target(store_addr); writel(BOOT_MAGIC_LIST_END, store_addr); } static int mvebu_pm_store_bootinfo(void) { u32 *store_addr; store_addr = phys_to_virt(BOOT_INFO_ADDR); if (of_machine_is_compatible("marvell,armadaxp")) mvebu_pm_store_armadaxp_bootinfo(store_addr); else return -ENODEV; return 0; } static int mvebu_enter_suspend(void) { int ret; ret = mvebu_pm_store_bootinfo(); if (ret) return ret; cpu_pm_enter(); cpu_suspend(0, mvebu_pm_powerdown); outer_resume(); mvebu_v7_pmsu_idle_exit(); set_cpu_coherent(); cpu_pm_exit(); return 0; } static int mvebu_pm_enter(suspend_state_t state) { switch (state) { case PM_SUSPEND_STANDBY: cpu_do_idle(); break; case PM_SUSPEND_MEM: pr_warn("Entering suspend to RAM. Only special wake-up sources will resume the system\n"); return mvebu_enter_suspend(); default: return -EINVAL; } return 0; } static int mvebu_pm_valid(suspend_state_t state) { if (state == PM_SUSPEND_STANDBY) return 1; if (state == PM_SUSPEND_MEM && mvebu_board_pm_enter != NULL) return 1; return 0; } static const struct platform_suspend_ops mvebu_pm_ops = { .enter = mvebu_pm_enter, .valid = mvebu_pm_valid, }; static int __init mvebu_pm_init(void) { if (!of_machine_is_compatible("marvell,armadaxp") && !of_machine_is_compatible("marvell,armada370") && !of_machine_is_compatible("marvell,armada380") && !of_machine_is_compatible("marvell,armada390")) return -ENODEV; suspend_set_ops(&mvebu_pm_ops); return 0; } late_initcall(mvebu_pm_init); int __init mvebu_pm_suspend_init(void (*board_pm_enter)(void __iomem *sdram_reg, u32 srcmd)) { struct device_node *np; struct resource res; np = of_find_compatible_node(NULL, NULL, "marvell,armada-xp-sdram-controller"); if (!np) return -ENODEV; if (of_address_to_resource(np, 0, &res)) { of_node_put(np); return -ENODEV; } if (!request_mem_region(res.start, resource_size(&res), np->full_name)) { of_node_put(np); return -EBUSY; } sdram_ctrl = ioremap(res.start, resource_size(&res)); if (!sdram_ctrl) { release_mem_region(res.start, resource_size(&res)); of_node_put(np); return -ENOMEM; } of_node_put(np); mvebu_board_pm_enter = board_pm_enter; return 0; }
linux-master
arch/arm/mach-mvebu/pm.c
// SPDX-License-Identifier: GPL-2.0-only /* * Power Management driver for Marvell Kirkwood SoCs * * Copyright (C) 2013 Ezequiel Garcia <[email protected]> * Copyright (C) 2010 Simon Guinot <[email protected]> */ #include <linux/kernel.h> #include <linux/suspend.h> #include <linux/io.h> #include "kirkwood.h" #include "kirkwood-pm.h" static void __iomem *ddr_operation_base; static void __iomem *memory_pm_ctrl; static void kirkwood_low_power(void) { u32 mem_pm_ctrl; mem_pm_ctrl = readl(memory_pm_ctrl); /* Set peripherals to low-power mode */ writel_relaxed(~0, memory_pm_ctrl); /* Set DDR in self-refresh */ writel_relaxed(0x7, ddr_operation_base); /* * Set CPU in wait-for-interrupt state. * This disables the CPU core clocks, * the array clocks, and also the L2 controller. */ cpu_do_idle(); writel_relaxed(mem_pm_ctrl, memory_pm_ctrl); } static int kirkwood_suspend_enter(suspend_state_t state) { switch (state) { case PM_SUSPEND_STANDBY: kirkwood_low_power(); break; default: return -EINVAL; } return 0; } static int kirkwood_pm_valid_standby(suspend_state_t state) { return state == PM_SUSPEND_STANDBY; } static const struct platform_suspend_ops kirkwood_suspend_ops = { .enter = kirkwood_suspend_enter, .valid = kirkwood_pm_valid_standby, }; void __init kirkwood_pm_init(void) { ddr_operation_base = ioremap(DDR_OPERATION_BASE, 4); memory_pm_ctrl = ioremap(MEMORY_PM_CTRL_PHYS, 4); suspend_set_ops(&kirkwood_suspend_ops); }
linux-master
arch/arm/mach-mvebu/kirkwood-pm.c
// SPDX-License-Identifier: GPL-2.0-only /* * ID and revision information for mvebu SoCs * * Copyright (C) 2014 Marvell * * Gregory CLEMENT <[email protected]> * * All the mvebu SoCs have information related to their variant and * revision that can be read from the PCI control register. This is * done before the PCI initialization to avoid any conflict. Once the * ID and revision are retrieved, the mapping is freed. */ #define pr_fmt(fmt) "mvebu-soc-id: " fmt #include <linux/clk.h> #include <linux/init.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/slab.h> #include <linux/sys_soc.h> #include "common.h" #include "mvebu-soc-id.h" #define PCIE_DEV_ID_OFF 0x0 #define PCIE_DEV_REV_OFF 0x8 #define SOC_ID_MASK 0xFFFF0000 #define SOC_REV_MASK 0xFF static u32 soc_dev_id; static u32 soc_rev; static bool is_id_valid; static const struct of_device_id mvebu_pcie_of_match_table[] = { { .compatible = "marvell,armada-xp-pcie", }, { .compatible = "marvell,armada-370-pcie", }, { .compatible = "marvell,kirkwood-pcie" }, {}, }; int mvebu_get_soc_id(u32 *dev, u32 *rev) { if (is_id_valid) { *dev = soc_dev_id; *rev = soc_rev; return 0; } else return -ENODEV; } static int __init get_soc_id_by_pci(void) { struct device_node *np; int ret = 0; void __iomem *pci_base; struct clk *clk; struct device_node *child; np = of_find_matching_node(NULL, mvebu_pcie_of_match_table); if (!np) return ret; /* * ID and revision are available from any port, so we * just pick the first one */ child = of_get_next_child(np, NULL); if (child == NULL) { pr_err("cannot get pci node\n"); ret = -ENOMEM; goto clk_err; } clk = of_clk_get_by_name(child, NULL); if (IS_ERR(clk)) { pr_err("cannot get clock\n"); ret = -ENOMEM; goto clk_err; } ret = clk_prepare_enable(clk); if (ret) { pr_err("cannot enable clock\n"); goto clk_err; } pci_base = of_iomap(child, 0); if (pci_base == NULL) { pr_err("cannot map registers\n"); ret = -ENOMEM; goto res_ioremap; } /* SoC ID */ soc_dev_id = readl(pci_base + PCIE_DEV_ID_OFF) >> 16; /* SoC revision */ soc_rev = readl(pci_base + PCIE_DEV_REV_OFF) & SOC_REV_MASK; is_id_valid = true; pr_info("MVEBU SoC ID=0x%X, Rev=0x%X\n", soc_dev_id, soc_rev); iounmap(pci_base); res_ioremap: /* * If the PCIe unit is actually enabled and we have PCI * support in the kernel, we intentionally do not release the * reference to the clock. We want to keep it running since * the bootloader does some PCIe link configuration that the * kernel is for now unable to do, and gating the clock would * make us loose this precious configuration. */ if (!of_device_is_available(child) || !IS_ENABLED(CONFIG_PCI_MVEBU)) { clk_disable_unprepare(clk); clk_put(clk); } clk_err: of_node_put(child); of_node_put(np); return ret; } static int __init mvebu_soc_id_init(void) { /* * First try to get the ID and the revision by the system * register and use PCI registers only if it is not possible */ if (!mvebu_system_controller_get_soc_id(&soc_dev_id, &soc_rev)) { is_id_valid = true; pr_info("MVEBU SoC ID=0x%X, Rev=0x%X\n", soc_dev_id, soc_rev); return 0; } return get_soc_id_by_pci(); } early_initcall(mvebu_soc_id_init); static int __init mvebu_soc_device(void) { struct soc_device_attribute *soc_dev_attr; struct soc_device *soc_dev; /* Also protects against running on non-mvebu systems */ if (!is_id_valid) return 0; soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL); if (!soc_dev_attr) return -ENOMEM; soc_dev_attr->family = kasprintf(GFP_KERNEL, "Marvell"); soc_dev_attr->revision = kasprintf(GFP_KERNEL, "%X", soc_rev); soc_dev_attr->soc_id = kasprintf(GFP_KERNEL, "%X", soc_dev_id); soc_dev = soc_device_register(soc_dev_attr); if (IS_ERR(soc_dev)) { kfree(soc_dev_attr->family); kfree(soc_dev_attr->revision); kfree(soc_dev_attr->soc_id); kfree(soc_dev_attr); } return 0; } postcore_initcall(mvebu_soc_device);
linux-master
arch/arm/mach-mvebu/mvebu-soc-id.c
// SPDX-License-Identifier: GPL-2.0-only /* * Board-level suspend/resume support. * * Copyright (C) 2014-2015 Marvell * * Thomas Petazzoni <[email protected]> */ #include <linux/delay.h> #include <linux/err.h> #include <linux/gpio/consumer.h> #include <linux/init.h> #include <linux/io.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/slab.h> #include "common.h" #define ARMADA_PIC_NR_GPIOS 3 static void __iomem *gpio_ctrl; static struct gpio_desc *pic_gpios[ARMADA_PIC_NR_GPIOS]; static int pic_raw_gpios[ARMADA_PIC_NR_GPIOS]; static void mvebu_armada_pm_enter(void __iomem *sdram_reg, u32 srcmd) { u32 reg, ackcmd; int i; /* Put 001 as value on the GPIOs */ reg = readl(gpio_ctrl); for (i = 0; i < ARMADA_PIC_NR_GPIOS; i++) reg &= ~BIT(pic_raw_gpios[i]); reg |= BIT(pic_raw_gpios[0]); writel(reg, gpio_ctrl); /* Prepare writing 111 to the GPIOs */ ackcmd = readl(gpio_ctrl); for (i = 0; i < ARMADA_PIC_NR_GPIOS; i++) ackcmd |= BIT(pic_raw_gpios[i]); srcmd = cpu_to_le32(srcmd); ackcmd = cpu_to_le32(ackcmd); /* * Wait a while, the PIC needs quite a bit of time between the * two GPIO commands. */ mdelay(3000); asm volatile ( /* Align to a cache line */ ".balign 32\n\t" /* Enter self refresh */ "str %[srcmd], [%[sdram_reg]]\n\t" /* * Wait 100 cycles for DDR to enter self refresh, by * doing 50 times two instructions. */ "mov r1, #50\n\t" "1: subs r1, r1, #1\n\t" "bne 1b\n\t" /* Issue the command ACK */ "str %[ackcmd], [%[gpio_ctrl]]\n\t" /* Trap the processor */ "b .\n\t" : : [srcmd] "r" (srcmd), [sdram_reg] "r" (sdram_reg), [ackcmd] "r" (ackcmd), [gpio_ctrl] "r" (gpio_ctrl) : "r1"); } static int __init mvebu_armada_pm_init(void) { struct device_node *np; struct device_node *gpio_ctrl_np = NULL; int ret = 0, i; if (!of_machine_is_compatible("marvell,axp-gp")) return -ENODEV; np = of_find_node_by_name(NULL, "pm_pic"); if (!np) return -ENODEV; for (i = 0; i < ARMADA_PIC_NR_GPIOS; i++) { char *name; struct of_phandle_args args; name = kasprintf(GFP_KERNEL, "pic-pin%d", i); if (!name) { ret = -ENOMEM; goto out; } pic_gpios[i] = fwnode_gpiod_get_index(of_fwnode_handle(np), "ctrl", i, GPIOD_OUT_HIGH, name); ret = PTR_ERR_OR_ZERO(pic_gpios[i]); if (ret) { kfree(name); goto out; } ret = of_parse_phandle_with_fixed_args(np, "ctrl-gpios", 2, i, &args); if (ret < 0) { gpiod_put(pic_gpios[i]); kfree(name); goto out; } if (gpio_ctrl_np) of_node_put(gpio_ctrl_np); gpio_ctrl_np = args.np; pic_raw_gpios[i] = args.args[0]; } gpio_ctrl = of_iomap(gpio_ctrl_np, 0); if (!gpio_ctrl) { ret = -ENOMEM; goto out; } mvebu_pm_suspend_init(mvebu_armada_pm_enter); out: of_node_put(np); of_node_put(gpio_ctrl_np); return ret; } /* * Registering the mvebu_board_pm_enter callback must be done before * the platform_suspend_ops will be registered. In the same time we * also need to have the gpio devices registered. That's why we use a * device_initcall_sync which is called after all the device_initcall * (used by the gpio device) but before the late_initcall (used to * register the platform_suspend_ops) */ device_initcall_sync(mvebu_armada_pm_init);
linux-master
arch/arm/mach-mvebu/pm-board.c
// SPDX-License-Identifier: GPL-2.0-only /* * Device Tree support for Armada 370 and XP platforms. * * Copyright (C) 2012 Marvell * * Lior Amsalem <[email protected]> * Gregory CLEMENT <[email protected]> * Thomas Petazzoni <[email protected]> */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/of_address.h> #include <linux/of_fdt.h> #include <linux/io.h> #include <linux/clocksource.h> #include <linux/dma-mapping.h> #include <linux/memblock.h> #include <linux/mbus.h> #include <linux/slab.h> #include <linux/irqchip.h> #include <asm/hardware/cache-l2x0.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/time.h> #include <asm/smp_scu.h> #include "armada-370-xp.h" #include "common.h" #include "coherency.h" #include "mvebu-soc-id.h" static void __iomem *scu_base; /* * Enables the SCU when available. Obviously, this is only useful on * Cortex-A based SOCs, not on PJ4B based ones. */ static void __init mvebu_scu_enable(void) { struct device_node *np = of_find_compatible_node(NULL, NULL, "arm,cortex-a9-scu"); if (np) { scu_base = of_iomap(np, 0); scu_enable(scu_base); of_node_put(np); } } void __iomem *mvebu_get_scu_base(void) { return scu_base; } /* * When returning from suspend, the platform goes through the * bootloader, which executes its DDR3 training code. This code has * the unfortunate idea of using the first 10 KB of each DRAM bank to * exercise the RAM and calculate the optimal timings. Therefore, this * area of RAM is overwritten, and shouldn't be used by the kernel if * suspend/resume is supported. */ #ifdef CONFIG_SUSPEND #define MVEBU_DDR_TRAINING_AREA_SZ (10 * SZ_1K) static int __init mvebu_scan_mem(unsigned long node, const char *uname, int depth, void *data) { const char *type = of_get_flat_dt_prop(node, "device_type", NULL); const __be32 *reg, *endp; int l; if (type == NULL || strcmp(type, "memory")) return 0; reg = of_get_flat_dt_prop(node, "linux,usable-memory", &l); if (reg == NULL) reg = of_get_flat_dt_prop(node, "reg", &l); if (reg == NULL) return 0; endp = reg + (l / sizeof(__be32)); while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) { u64 base, size; base = dt_mem_next_cell(dt_root_addr_cells, &reg); size = dt_mem_next_cell(dt_root_size_cells, &reg); memblock_reserve(base, MVEBU_DDR_TRAINING_AREA_SZ); } return 0; } static void __init mvebu_memblock_reserve(void) { of_scan_flat_dt(mvebu_scan_mem, NULL); } #else static void __init mvebu_memblock_reserve(void) {} #endif static void __init mvebu_init_irq(void) { irqchip_init(); mvebu_scu_enable(); coherency_init(); BUG_ON(mvebu_mbus_dt_init(coherency_available())); } static void __init i2c_quirk(void) { struct device_node *np; u32 dev, rev; /* * Only revisons more recent than A0 support the offload * mechanism. We can exit only if we are sure that we can * get the SoC revision and it is more recent than A0. */ if (mvebu_get_soc_id(&dev, &rev) == 0 && rev > MV78XX0_A0_REV) return; for_each_compatible_node(np, NULL, "marvell,mv78230-i2c") { struct property *new_compat; new_compat = kzalloc(sizeof(*new_compat), GFP_KERNEL); new_compat->name = kstrdup("compatible", GFP_KERNEL); new_compat->length = sizeof("marvell,mv78230-a0-i2c"); new_compat->value = kstrdup("marvell,mv78230-a0-i2c", GFP_KERNEL); of_update_property(np, new_compat); } } static void __init mvebu_dt_init(void) { if (of_machine_is_compatible("marvell,armadaxp")) i2c_quirk(); } static void __init armada_370_xp_dt_fixup(void) { #ifdef CONFIG_SMP smp_set_ops(smp_ops(armada_xp_smp_ops)); #endif } static const char * const armada_370_xp_dt_compat[] __initconst = { "marvell,armada-370-xp", NULL, }; DT_MACHINE_START(ARMADA_370_XP_DT, "Marvell Armada 370/XP (Device Tree)") .l2c_aux_val = 0, .l2c_aux_mask = ~0, .init_machine = mvebu_dt_init, .init_irq = mvebu_init_irq, .restart = mvebu_restart, .reserve = mvebu_memblock_reserve, .dt_compat = armada_370_xp_dt_compat, .dt_fixup = armada_370_xp_dt_fixup, MACHINE_END static const char * const armada_375_dt_compat[] __initconst = { "marvell,armada375", NULL, }; DT_MACHINE_START(ARMADA_375_DT, "Marvell Armada 375 (Device Tree)") .l2c_aux_val = 0, .l2c_aux_mask = ~0, .init_irq = mvebu_init_irq, .init_machine = mvebu_dt_init, .restart = mvebu_restart, .dt_compat = armada_375_dt_compat, MACHINE_END static const char * const armada_38x_dt_compat[] __initconst = { "marvell,armada380", "marvell,armada385", NULL, }; DT_MACHINE_START(ARMADA_38X_DT, "Marvell Armada 380/385 (Device Tree)") .l2c_aux_val = 0, .l2c_aux_mask = ~0, .init_irq = mvebu_init_irq, .restart = mvebu_restart, .dt_compat = armada_38x_dt_compat, MACHINE_END static const char * const armada_39x_dt_compat[] __initconst = { "marvell,armada390", "marvell,armada398", NULL, }; DT_MACHINE_START(ARMADA_39X_DT, "Marvell Armada 39x (Device Tree)") .l2c_aux_val = 0, .l2c_aux_mask = ~0, .init_irq = mvebu_init_irq, .restart = mvebu_restart, .dt_compat = armada_39x_dt_compat, MACHINE_END
linux-master
arch/arm/mach-mvebu/board-v7.c
// SPDX-License-Identifier: GPL-2.0-only /* * Symmetric Multi Processing (SMP) support for Armada XP * * Copyright (C) 2012 Marvell * * Lior Amsalem <[email protected]> * Yehuda Yitschak <[email protected]> * Gregory CLEMENT <[email protected]> * Thomas Petazzoni <[email protected]> * * The Armada XP SoC has 4 ARMv7 PJ4B CPUs running in full HW coherency * This file implements the routines for preparing the SMP infrastructure * and waking up the secondary CPUs */ #include <linux/init.h> #include <linux/smp.h> #include <linux/clk.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/mbus.h> #include <asm/cacheflush.h> #include <asm/smp_plat.h> #include "common.h" #include "armada-370-xp.h" #include "pmsu.h" #include "coherency.h" #define ARMADA_XP_MAX_CPUS 4 #define AXP_BOOTROM_BASE 0xfff00000 #define AXP_BOOTROM_SIZE 0x100000 static struct clk *boot_cpu_clk; static struct clk *get_cpu_clk(int cpu) { struct clk *cpu_clk; struct device_node *np = of_get_cpu_node(cpu, NULL); if (WARN(!np, "missing cpu node\n")) return NULL; cpu_clk = of_clk_get(np, 0); if (WARN_ON(IS_ERR(cpu_clk))) return NULL; return cpu_clk; } static int armada_xp_boot_secondary(unsigned int cpu, struct task_struct *idle) { int ret, hw_cpu; pr_info("Booting CPU %d\n", cpu); hw_cpu = cpu_logical_map(cpu); mvebu_pmsu_set_cpu_boot_addr(hw_cpu, armada_xp_secondary_startup); /* * This is needed to wake up CPUs in the offline state after * using CPU hotplug. */ arch_send_wakeup_ipi_mask(cpumask_of(cpu)); /* * This is needed to take secondary CPUs out of reset on the * initial boot. */ ret = mvebu_cpu_reset_deassert(hw_cpu); if (ret) { pr_warn("unable to boot CPU: %d\n", ret); return ret; } return 0; } /* * When a CPU is brought back online, either through CPU hotplug, or * because of the boot of a kexec'ed kernel, the PMSU configuration * for this CPU might be in the deep idle state, preventing this CPU * from receiving interrupts. Here, we therefore take out the current * CPU from this state, which was entered by armada_xp_cpu_die() * below. */ static void armada_xp_secondary_init(unsigned int cpu) { mvebu_v7_pmsu_idle_exit(); } static void __init armada_xp_smp_init_cpus(void) { unsigned int ncores = num_possible_cpus(); if (ncores == 0 || ncores > ARMADA_XP_MAX_CPUS) panic("Invalid number of CPUs in DT\n"); } static int armada_xp_sync_secondary_clk(unsigned int cpu) { struct clk *cpu_clk = get_cpu_clk(cpu); if (!cpu_clk || !boot_cpu_clk) return 0; clk_prepare_enable(cpu_clk); clk_set_rate(cpu_clk, clk_get_rate(boot_cpu_clk)); return 0; } static void __init armada_xp_smp_prepare_cpus(unsigned int max_cpus) { struct device_node *node; struct resource res; int err; flush_cache_all(); set_cpu_coherent(); boot_cpu_clk = get_cpu_clk(smp_processor_id()); if (boot_cpu_clk) { clk_prepare_enable(boot_cpu_clk); cpuhp_setup_state_nocalls(CPUHP_AP_ARM_MVEBU_SYNC_CLOCKS, "arm/mvebu/sync_clocks:online", armada_xp_sync_secondary_clk, NULL); } /* * In order to boot the secondary CPUs we need to ensure * the bootROM is mapped at the correct address. */ node = of_find_compatible_node(NULL, NULL, "marvell,bootrom"); if (!node) panic("Cannot find 'marvell,bootrom' compatible node"); err = of_address_to_resource(node, 0, &res); of_node_put(node); if (err < 0) panic("Cannot get 'bootrom' node address"); if (res.start != AXP_BOOTROM_BASE || resource_size(&res) != AXP_BOOTROM_SIZE) panic("The address for the BootROM is incorrect"); } #ifdef CONFIG_HOTPLUG_CPU static void armada_xp_cpu_die(unsigned int cpu) { /* * CPU hotplug is implemented by putting offline CPUs into the * deep idle sleep state. */ armada_370_xp_pmsu_idle_enter(true); } /* * We need a dummy function, so that platform_can_cpu_hotplug() knows * we support CPU hotplug. However, the function does not need to do * anything, because CPUs going offline can enter the deep idle state * by themselves, without any help from a still alive CPU. */ static int armada_xp_cpu_kill(unsigned int cpu) { return 1; } #endif const struct smp_operations armada_xp_smp_ops __initconst = { .smp_init_cpus = armada_xp_smp_init_cpus, .smp_prepare_cpus = armada_xp_smp_prepare_cpus, .smp_boot_secondary = armada_xp_boot_secondary, .smp_secondary_init = armada_xp_secondary_init, #ifdef CONFIG_HOTPLUG_CPU .cpu_die = armada_xp_cpu_die, .cpu_kill = armada_xp_cpu_kill, #endif }; CPU_METHOD_OF_DECLARE(armada_xp_smp, "marvell,armada-xp-smp", &armada_xp_smp_ops); #define MV98DX3236_CPU_RESUME_CTRL_REG 0x08 #define MV98DX3236_CPU_RESUME_ADDR_REG 0x04 static const struct of_device_id of_mv98dx3236_resume_table[] = { { .compatible = "marvell,98dx3336-resume-ctrl", }, { /* end of list */ }, }; static int mv98dx3236_resume_set_cpu_boot_addr(int hw_cpu, void *boot_addr) { struct device_node *np; void __iomem *base; WARN_ON(hw_cpu != 1); np = of_find_matching_node(NULL, of_mv98dx3236_resume_table); if (!np) return -ENODEV; base = of_io_request_and_map(np, 0, of_node_full_name(np)); of_node_put(np); if (IS_ERR(base)) return PTR_ERR(base); writel(0, base + MV98DX3236_CPU_RESUME_CTRL_REG); writel(__pa_symbol(boot_addr), base + MV98DX3236_CPU_RESUME_ADDR_REG); iounmap(base); return 0; } static int mv98dx3236_boot_secondary(unsigned int cpu, struct task_struct *idle) { int ret, hw_cpu; hw_cpu = cpu_logical_map(cpu); mv98dx3236_resume_set_cpu_boot_addr(hw_cpu, armada_xp_secondary_startup); /* * This is needed to wake up CPUs in the offline state after * using CPU hotplug. */ arch_send_wakeup_ipi_mask(cpumask_of(cpu)); /* * This is needed to take secondary CPUs out of reset on the * initial boot. */ ret = mvebu_cpu_reset_deassert(hw_cpu); if (ret) { pr_warn("unable to boot CPU: %d\n", ret); return ret; } return 0; } static const struct smp_operations mv98dx3236_smp_ops __initconst = { .smp_init_cpus = armada_xp_smp_init_cpus, .smp_prepare_cpus = armada_xp_smp_prepare_cpus, .smp_boot_secondary = mv98dx3236_boot_secondary, .smp_secondary_init = armada_xp_secondary_init, #ifdef CONFIG_HOTPLUG_CPU .cpu_die = armada_xp_cpu_die, .cpu_kill = armada_xp_cpu_kill, #endif }; CPU_METHOD_OF_DECLARE(mv98dx3236_smp, "marvell,98dx3236-smp", &mv98dx3236_smp_ops);
linux-master
arch/arm/mach-mvebu/platsmp.c
// SPDX-License-Identifier: GPL-2.0-only /* * arch/arm/mach-mvebu/dove.c * * Marvell Dove 88AP510 System On Chip FDT Board */ #include <linux/init.h> #include <linux/mbus.h> #include <linux/of.h> #include <linux/soc/dove/pmu.h> #include <asm/hardware/cache-tauros2.h> #include <asm/mach/arch.h> #include "common.h" static void __init dove_init(void) { pr_info("Dove 88AP510 SoC\n"); #ifdef CONFIG_CACHE_TAUROS2 tauros2_init(0); #endif BUG_ON(mvebu_mbus_dt_init(false)); dove_init_pmu(); } static const char * const dove_dt_compat[] __initconst = { "marvell,dove", NULL }; DT_MACHINE_START(DOVE_DT, "Marvell Dove") .init_machine = dove_init, .restart = mvebu_restart, .dt_compat = dove_dt_compat, MACHINE_END
linux-master
arch/arm/mach-mvebu/dove.c
// SPDX-License-Identifier: GPL-2.0-only /* * arch/arm/mach-mv78x00/mpp.c * * MPP functions for Marvell MV78x00 SoCs */ #include <linux/gpio.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/io.h> #include <plat/mpp.h> #include "mv78xx0.h" #include "common.h" #include "mpp.h" static unsigned int __init mv78xx0_variant(void) { u32 dev, rev; mv78xx0_pcie_id(&dev, &rev); if (dev == MV78100_DEV_ID && rev >= MV78100_REV_A0) return MPP_78100_A0_MASK; printk(KERN_ERR "MPP setup: unknown mv78x00 variant " "(dev %#x rev %#x)\n", dev, rev); return 0; } void __init mv78xx0_mpp_conf(unsigned int *mpp_list) { orion_mpp_conf(mpp_list, mv78xx0_variant(), MPP_MAX, DEV_BUS_VIRT_BASE); }
linux-master
arch/arm/mach-mv78xx0/mpp.c
// SPDX-License-Identifier: GPL-2.0-only /* * arch/arm/mach-mv78xx0/common.c * * Core functions for Marvell MV78xx0 SoCs */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/io.h> #include <linux/platform_device.h> #include <linux/serial_8250.h> #include <linux/ata_platform.h> #include <linux/clk-provider.h> #include <linux/ethtool.h> #include <asm/hardware/cache-feroceon-l2.h> #include <asm/mach/map.h> #include <asm/mach/time.h> #include <linux/platform_data/usb-ehci-orion.h> #include <linux/platform_data/mtd-orion_nand.h> #include <plat/time.h> #include <plat/common.h> #include <plat/addr-map.h> #include "mv78xx0.h" #include "bridge-regs.h" #include "common.h" static int get_tclk(void); /***************************************************************************** * Common bits ****************************************************************************/ int mv78xx0_core_index(void) { u32 extra; /* * Read Extra Features register. */ __asm__("mrc p15, 1, %0, c15, c1, 0" : "=r" (extra)); return !!(extra & 0x00004000); } static int get_hclk(void) { int hclk; /* * HCLK tick rate is configured by DEV_D[7:5] pins. */ switch ((readl(SAMPLE_AT_RESET_LOW) >> 5) & 7) { case 0: hclk = 166666667; break; case 1: hclk = 200000000; break; case 2: hclk = 266666667; break; case 3: hclk = 333333333; break; case 4: hclk = 400000000; break; default: panic("unknown HCLK PLL setting: %.8x\n", readl(SAMPLE_AT_RESET_LOW)); } return hclk; } static void get_pclk_l2clk(int hclk, int core_index, int *pclk, int *l2clk) { u32 cfg; /* * Core #0 PCLK/L2CLK is configured by bits [13:8], core #1 * PCLK/L2CLK by bits [19:14]. */ if (core_index == 0) { cfg = (readl(SAMPLE_AT_RESET_LOW) >> 8) & 0x3f; } else { cfg = (readl(SAMPLE_AT_RESET_LOW) >> 14) & 0x3f; } /* * Bits [11:8] ([17:14] for core #1) configure the PCLK:HCLK * ratio (1, 1.5, 2, 2.5, 3, 3.5, 4, 4.5, 5, 5.5, 6). */ *pclk = ((u64)hclk * (2 + (cfg & 0xf))) >> 1; /* * Bits [13:12] ([19:18] for core #1) configure the PCLK:L2CLK * ratio (1, 2, 3). */ *l2clk = *pclk / (((cfg >> 4) & 3) + 1); } static int get_tclk(void) { int tclk_freq; /* * TCLK tick rate is configured by DEV_A[2:0] strap pins. */ switch ((readl(SAMPLE_AT_RESET_HIGH) >> 6) & 7) { case 1: tclk_freq = 166666667; break; case 3: tclk_freq = 200000000; break; default: panic("unknown TCLK PLL setting: %.8x\n", readl(SAMPLE_AT_RESET_HIGH)); } return tclk_freq; } /***************************************************************************** * I/O Address Mapping ****************************************************************************/ static struct map_desc mv78xx0_io_desc[] __initdata = { { .virtual = (unsigned long) MV78XX0_CORE_REGS_VIRT_BASE, .pfn = 0, .length = MV78XX0_CORE_REGS_SIZE, .type = MT_DEVICE, }, { .virtual = (unsigned long) MV78XX0_REGS_VIRT_BASE, .pfn = __phys_to_pfn(MV78XX0_REGS_PHYS_BASE), .length = MV78XX0_REGS_SIZE, .type = MT_DEVICE, }, }; void __init mv78xx0_map_io(void) { unsigned long phys; /* * Map the right set of per-core registers depending on * which core we are running on. */ if (mv78xx0_core_index() == 0) { phys = MV78XX0_CORE0_REGS_PHYS_BASE; } else { phys = MV78XX0_CORE1_REGS_PHYS_BASE; } mv78xx0_io_desc[0].pfn = __phys_to_pfn(phys); iotable_init(mv78xx0_io_desc, ARRAY_SIZE(mv78xx0_io_desc)); } /***************************************************************************** * CLK tree ****************************************************************************/ static struct clk *tclk; static void __init clk_init(void) { tclk = clk_register_fixed_rate(NULL, "tclk", NULL, 0, get_tclk()); orion_clkdev_init(tclk); } /***************************************************************************** * EHCI ****************************************************************************/ void __init mv78xx0_ehci0_init(void) { orion_ehci_init(USB0_PHYS_BASE, IRQ_MV78XX0_USB_0, EHCI_PHY_NA); } /***************************************************************************** * EHCI1 ****************************************************************************/ void __init mv78xx0_ehci1_init(void) { orion_ehci_1_init(USB1_PHYS_BASE, IRQ_MV78XX0_USB_1); } /***************************************************************************** * EHCI2 ****************************************************************************/ void __init mv78xx0_ehci2_init(void) { orion_ehci_2_init(USB2_PHYS_BASE, IRQ_MV78XX0_USB_2); } /***************************************************************************** * GE00 ****************************************************************************/ void __init mv78xx0_ge00_init(struct mv643xx_eth_platform_data *eth_data) { orion_ge00_init(eth_data, GE00_PHYS_BASE, IRQ_MV78XX0_GE00_SUM, IRQ_MV78XX0_GE_ERR, MV643XX_TX_CSUM_DEFAULT_LIMIT); } /***************************************************************************** * GE01 ****************************************************************************/ void __init mv78xx0_ge01_init(struct mv643xx_eth_platform_data *eth_data) { orion_ge01_init(eth_data, GE01_PHYS_BASE, IRQ_MV78XX0_GE01_SUM, MV643XX_TX_CSUM_DEFAULT_LIMIT); } /***************************************************************************** * GE10 ****************************************************************************/ void __init mv78xx0_ge10_init(struct mv643xx_eth_platform_data *eth_data) { u32 dev, rev; /* * On the Z0, ge10 and ge11 are internally connected back * to back, and not brought out. */ mv78xx0_pcie_id(&dev, &rev); if (dev == MV78X00_Z0_DEV_ID) { eth_data->phy_addr = MV643XX_ETH_PHY_NONE; eth_data->speed = SPEED_1000; eth_data->duplex = DUPLEX_FULL; } orion_ge10_init(eth_data, GE10_PHYS_BASE, IRQ_MV78XX0_GE10_SUM); } /***************************************************************************** * GE11 ****************************************************************************/ void __init mv78xx0_ge11_init(struct mv643xx_eth_platform_data *eth_data) { u32 dev, rev; /* * On the Z0, ge10 and ge11 are internally connected back * to back, and not brought out. */ mv78xx0_pcie_id(&dev, &rev); if (dev == MV78X00_Z0_DEV_ID) { eth_data->phy_addr = MV643XX_ETH_PHY_NONE; eth_data->speed = SPEED_1000; eth_data->duplex = DUPLEX_FULL; } orion_ge11_init(eth_data, GE11_PHYS_BASE, IRQ_MV78XX0_GE11_SUM); } /***************************************************************************** * I2C ****************************************************************************/ void __init mv78xx0_i2c_init(void) { orion_i2c_init(I2C_0_PHYS_BASE, IRQ_MV78XX0_I2C_0, 8); orion_i2c_1_init(I2C_1_PHYS_BASE, IRQ_MV78XX0_I2C_1, 8); } /***************************************************************************** * SATA ****************************************************************************/ void __init mv78xx0_sata_init(struct mv_sata_platform_data *sata_data) { orion_sata_init(sata_data, SATA_PHYS_BASE, IRQ_MV78XX0_SATA); } /***************************************************************************** * UART0 ****************************************************************************/ void __init mv78xx0_uart0_init(void) { orion_uart0_init(UART0_VIRT_BASE, UART0_PHYS_BASE, IRQ_MV78XX0_UART_0, tclk); } /***************************************************************************** * UART1 ****************************************************************************/ void __init mv78xx0_uart1_init(void) { orion_uart1_init(UART1_VIRT_BASE, UART1_PHYS_BASE, IRQ_MV78XX0_UART_1, tclk); } /***************************************************************************** * UART2 ****************************************************************************/ void __init mv78xx0_uart2_init(void) { orion_uart2_init(UART2_VIRT_BASE, UART2_PHYS_BASE, IRQ_MV78XX0_UART_2, tclk); } /***************************************************************************** * UART3 ****************************************************************************/ void __init mv78xx0_uart3_init(void) { orion_uart3_init(UART3_VIRT_BASE, UART3_PHYS_BASE, IRQ_MV78XX0_UART_3, tclk); } /***************************************************************************** * Time handling ****************************************************************************/ void __init mv78xx0_init_early(void) { orion_time_set_base(TIMER_VIRT_BASE); if (mv78xx0_core_index() == 0) mvebu_mbus_init("marvell,mv78xx0-mbus", BRIDGE_WINS_CPU0_BASE, BRIDGE_WINS_SZ, DDR_WINDOW_CPU0_BASE, DDR_WINDOW_CPU_SZ); else mvebu_mbus_init("marvell,mv78xx0-mbus", BRIDGE_WINS_CPU1_BASE, BRIDGE_WINS_SZ, DDR_WINDOW_CPU1_BASE, DDR_WINDOW_CPU_SZ); } void __ref mv78xx0_timer_init(void) { orion_time_init(BRIDGE_VIRT_BASE, BRIDGE_INT_TIMER1_CLR, IRQ_MV78XX0_TIMER_1, get_tclk()); } /**************************************************************************** * XOR engine ****************************************************************************/ void __init mv78xx0_xor_init(void) { orion_xor0_init(XOR_PHYS_BASE, XOR_PHYS_BASE + 0x200, IRQ_MV78XX0_XOR_0, IRQ_MV78XX0_XOR_1); } /**************************************************************************** * Cryptographic Engines and Security Accelerator (CESA) ****************************************************************************/ void __init mv78xx0_crypto_init(void) { mvebu_mbus_add_window_by_id(MV78XX0_MBUS_SRAM_TARGET, MV78XX0_MBUS_SRAM_ATTR, MV78XX0_SRAM_PHYS_BASE, MV78XX0_SRAM_SIZE); orion_crypto_init(CRYPTO_PHYS_BASE, MV78XX0_SRAM_PHYS_BASE, SZ_8K, IRQ_MV78XX0_CRYPTO); } /***************************************************************************** * General ****************************************************************************/ static char * __init mv78xx0_id(void) { u32 dev, rev; mv78xx0_pcie_id(&dev, &rev); if (dev == MV78X00_Z0_DEV_ID) { if (rev == MV78X00_REV_Z0) return "MV78X00-Z0"; else return "MV78X00-Rev-Unsupported"; } else if (dev == MV78100_DEV_ID) { if (rev == MV78100_REV_A0) return "MV78100-A0"; else if (rev == MV78100_REV_A1) return "MV78100-A1"; else return "MV78100-Rev-Unsupported"; } else if (dev == MV78200_DEV_ID) { if (rev == MV78100_REV_A0) return "MV78200-A0"; else return "MV78200-Rev-Unsupported"; } else { return "Device-Unknown"; } } static int __init is_l2_writethrough(void) { return !!(readl(CPU_CONTROL) & L2_WRITETHROUGH); } void __init mv78xx0_init(void) { int core_index; int hclk; int pclk; int l2clk; core_index = mv78xx0_core_index(); hclk = get_hclk(); get_pclk_l2clk(hclk, core_index, &pclk, &l2clk); printk(KERN_INFO "%s ", mv78xx0_id()); printk("core #%d, ", core_index); printk("PCLK = %dMHz, ", (pclk + 499999) / 1000000); printk("L2 = %dMHz, ", (l2clk + 499999) / 1000000); printk("HCLK = %dMHz, ", (hclk + 499999) / 1000000); printk("TCLK = %dMHz\n", (get_tclk() + 499999) / 1000000); if (IS_ENABLED(CONFIG_CACHE_FEROCEON_L2)) feroceon_l2_init(is_l2_writethrough()); /* Setup root of clk tree */ clk_init(); } void mv78xx0_restart(enum reboot_mode mode, const char *cmd) { /* * Enable soft reset to assert RSTOUTn. */ writel(SOFT_RESET_OUT_EN, RSTOUTn_MASK); /* * Assert soft reset. */ writel(SOFT_RESET, SYSTEM_SOFT_RESET); while (1) ; }
linux-master
arch/arm/mach-mv78xx0/common.c
// SPDX-License-Identifier: GPL-2.0-only /* * arch/arm/mach-mv78xx0/buffalo-wxl-setup.c * * Buffalo WXL (Terastation Duo) Setup routines * * sebastien requiem <[email protected]> */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/ata_platform.h> #include <linux/mv643xx_eth.h> #include <linux/ethtool.h> #include <linux/i2c.h> #include <linux/gpio.h> #include <linux/gpio_keys.h> #include <linux/input.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include "mv78xx0.h" #include "common.h" #include "mpp.h" #define TSWXL_AUTO_SWITCH 15 #define TSWXL_USB_POWER1 30 #define TSWXL_USB_POWER2 31 /* This arch has 2 Giga Ethernet */ static struct mv643xx_eth_platform_data db78x00_ge00_data = { .phy_addr = MV643XX_ETH_PHY_ADDR(0), }; static struct mv643xx_eth_platform_data db78x00_ge01_data = { .phy_addr = MV643XX_ETH_PHY_ADDR(8), }; /* 2 SATA controller supporting HotPlug */ static struct mv_sata_platform_data db78x00_sata_data = { .n_ports = 2, }; static struct i2c_board_info __initdata db78x00_i2c_rtc = { I2C_BOARD_INFO("rs5c372a", 0x32), }; static unsigned int wxl_mpp_config[] __initdata = { MPP0_GE1_TXCLK, MPP1_GE1_TXCTL, MPP2_GE1_RXCTL, MPP3_GE1_RXCLK, MPP4_GE1_TXD0, MPP5_GE1_TXD1, MPP6_GE1_TXD2, MPP7_GE1_TXD3, MPP8_GE1_RXD0, MPP9_GE1_RXD1, MPP10_GE1_RXD2, MPP11_GE1_RXD3, MPP12_GPIO, MPP13_GPIO, MPP14_GPIO, MPP15_GPIO, MPP16_GPIO, MPP17_GPIO, MPP18_GPIO, MPP19_GPIO, MPP20_GPIO, MPP21_GPIO, MPP22_GPIO, MPP23_GPIO, MPP24_UA2_TXD, MPP25_UA2_RXD, MPP26_UA2_CTSn, MPP27_UA2_RTSn, MPP28_GPIO, MPP29_GPIO, MPP30_GPIO, MPP31_GPIO, MPP32_GPIO, MPP33_GPIO, MPP34_GPIO, MPP35_GPIO, MPP36_GPIO, MPP37_GPIO, MPP38_GPIO, MPP39_GPIO, MPP40_GPIO, MPP41_GPIO, MPP42_GPIO, MPP43_GPIO, MPP44_GPIO, MPP45_GPIO, MPP46_GPIO, MPP47_GPIO, MPP48_GPIO, MPP49_GPIO, 0 }; static struct gpio_keys_button tswxl_buttons[] = { { .code = KEY_OPTION, .gpio = TSWXL_AUTO_SWITCH, .desc = "Power-auto Switch", .active_low = 1, } }; static struct gpio_keys_platform_data tswxl_button_data = { .buttons = tswxl_buttons, .nbuttons = ARRAY_SIZE(tswxl_buttons), }; static struct platform_device tswxl_button_device = { .name = "gpio-keys", .id = -1, .num_resources = 0, .dev = { .platform_data = &tswxl_button_data, }, }; static void __init wxl_init(void) { /* * Basic MV78xx0 setup. Needs to be called early. */ mv78xx0_init(); mv78xx0_mpp_conf(wxl_mpp_config); /* * Partition on-chip peripherals between the two CPU cores. */ mv78xx0_ehci0_init(); mv78xx0_ehci1_init(); mv78xx0_ge00_init(&db78x00_ge00_data); mv78xx0_ge01_init(&db78x00_ge01_data); mv78xx0_sata_init(&db78x00_sata_data); mv78xx0_uart0_init(); mv78xx0_uart1_init(); mv78xx0_uart2_init(); mv78xx0_uart3_init(); mv78xx0_xor_init(); mv78xx0_crypto_init(); mv78xx0_i2c_init(); i2c_register_board_info(0, &db78x00_i2c_rtc, 1); //enable both usb ports gpio_direction_output(TSWXL_USB_POWER1, 1); gpio_direction_output(TSWXL_USB_POWER2, 1); //enable rear switch platform_device_register(&tswxl_button_device); } static int __init wxl_pci_init(void) { if (machine_is_terastation_wxl() && mv78xx0_core_index() == 0) mv78xx0_pcie_init(1, 1); return 0; } subsys_initcall(wxl_pci_init); MACHINE_START(TERASTATION_WXL, "Buffalo Nas WXL") /* Maintainer: Sebastien Requiem <[email protected]> */ .atag_offset = 0x100, .nr_irqs = MV78XX0_NR_IRQS, .init_machine = wxl_init, .map_io = mv78xx0_map_io, .init_early = mv78xx0_init_early, .init_irq = mv78xx0_init_irq, .init_time = mv78xx0_timer_init, .restart = mv78xx0_restart, MACHINE_END
linux-master
arch/arm/mach-mv78xx0/buffalo-wxl-setup.c
// SPDX-License-Identifier: GPL-2.0-only /* * arch/arm/mach-mv78xx0/irq.c * * MV78xx0 IRQ handling. */ #include <linux/gpio.h> #include <linux/kernel.h> #include <linux/irq.h> #include <linux/io.h> #include <asm/exception.h> #include <plat/orion-gpio.h> #include <plat/irq.h> #include "bridge-regs.h" #include "common.h" static int __initdata gpio0_irqs[4] = { IRQ_MV78XX0_GPIO_0_7, IRQ_MV78XX0_GPIO_8_15, IRQ_MV78XX0_GPIO_16_23, IRQ_MV78XX0_GPIO_24_31, }; static void __iomem *mv78xx0_irq_base = IRQ_VIRT_BASE; static asmlinkage void __exception_irq_entry mv78xx0_legacy_handle_irq(struct pt_regs *regs) { u32 stat; stat = readl_relaxed(mv78xx0_irq_base + IRQ_CAUSE_LOW_OFF); stat &= readl_relaxed(mv78xx0_irq_base + IRQ_MASK_LOW_OFF); if (stat) { unsigned int hwirq = __fls(stat); handle_IRQ(hwirq, regs); return; } stat = readl_relaxed(mv78xx0_irq_base + IRQ_CAUSE_HIGH_OFF); stat &= readl_relaxed(mv78xx0_irq_base + IRQ_MASK_HIGH_OFF); if (stat) { unsigned int hwirq = 32 + __fls(stat); handle_IRQ(hwirq, regs); return; } stat = readl_relaxed(mv78xx0_irq_base + IRQ_CAUSE_ERR_OFF); stat &= readl_relaxed(mv78xx0_irq_base + IRQ_MASK_ERR_OFF); if (stat) { unsigned int hwirq = 64 + __fls(stat); handle_IRQ(hwirq, regs); return; } } void __init mv78xx0_init_irq(void) { orion_irq_init(0, IRQ_VIRT_BASE + IRQ_MASK_LOW_OFF); orion_irq_init(32, IRQ_VIRT_BASE + IRQ_MASK_HIGH_OFF); orion_irq_init(64, IRQ_VIRT_BASE + IRQ_MASK_ERR_OFF); set_handle_irq(mv78xx0_legacy_handle_irq); /* * Initialize gpiolib for GPIOs 0-31. (The GPIO interrupt mask * registers for core #1 are at an offset of 0x18 from those of * core #0.) */ orion_gpio_init(0, 32, GPIO_VIRT_BASE, mv78xx0_core_index() ? 0x18 : 0, IRQ_MV78XX0_GPIO_START, gpio0_irqs); }
linux-master
arch/arm/mach-mv78xx0/irq.c
// SPDX-License-Identifier: GPL-2.0-only /* * arch/arm/mach-mv78xx0/pcie.c * * PCIe functions for Marvell MV78xx0 SoCs */ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/mbus.h> #include <video/vga.h> #include <asm/irq.h> #include <asm/mach/pci.h> #include <plat/pcie.h> #include "mv78xx0.h" #include "common.h" #define MV78XX0_MBUS_PCIE_MEM_TARGET(port, lane) ((port) ? 8 : 4) #define MV78XX0_MBUS_PCIE_MEM_ATTR(port, lane) (0xf8 & ~(0x10 << (lane))) #define MV78XX0_MBUS_PCIE_IO_TARGET(port, lane) ((port) ? 8 : 4) #define MV78XX0_MBUS_PCIE_IO_ATTR(port, lane) (0xf0 & ~(0x10 << (lane))) struct pcie_port { u8 maj; u8 min; u8 root_bus_nr; void __iomem *base; spinlock_t conf_lock; char mem_space_name[20]; struct resource res; }; static struct pcie_port pcie_port[8]; static int num_pcie_ports; static struct resource pcie_io_space; void __init mv78xx0_pcie_id(u32 *dev, u32 *rev) { *dev = orion_pcie_dev_id(PCIE00_VIRT_BASE); *rev = orion_pcie_rev(PCIE00_VIRT_BASE); } u32 pcie_port_size[8] = { 0, 0x20000000, 0x10000000, 0x10000000, 0x08000000, 0x08000000, 0x08000000, 0x04000000, }; static void __init mv78xx0_pcie_preinit(void) { int i; u32 size_each; u32 start; pcie_io_space.name = "PCIe I/O Space"; pcie_io_space.start = MV78XX0_PCIE_IO_PHYS_BASE(0); pcie_io_space.end = MV78XX0_PCIE_IO_PHYS_BASE(0) + MV78XX0_PCIE_IO_SIZE * 8 - 1; pcie_io_space.flags = IORESOURCE_MEM; if (request_resource(&iomem_resource, &pcie_io_space)) panic("can't allocate PCIe I/O space"); if (num_pcie_ports > 7) panic("invalid number of PCIe ports"); size_each = pcie_port_size[num_pcie_ports]; start = MV78XX0_PCIE_MEM_PHYS_BASE; for (i = 0; i < num_pcie_ports; i++) { struct pcie_port *pp = pcie_port + i; snprintf(pp->mem_space_name, sizeof(pp->mem_space_name), "PCIe %d.%d MEM", pp->maj, pp->min); pp->mem_space_name[sizeof(pp->mem_space_name) - 1] = 0; pp->res.name = pp->mem_space_name; pp->res.flags = IORESOURCE_MEM; pp->res.start = start; pp->res.end = start + size_each - 1; start += size_each; if (request_resource(&iomem_resource, &pp->res)) panic("can't allocate PCIe MEM sub-space"); mvebu_mbus_add_window_by_id(MV78XX0_MBUS_PCIE_MEM_TARGET(pp->maj, pp->min), MV78XX0_MBUS_PCIE_MEM_ATTR(pp->maj, pp->min), pp->res.start, resource_size(&pp->res)); mvebu_mbus_add_window_remap_by_id(MV78XX0_MBUS_PCIE_IO_TARGET(pp->maj, pp->min), MV78XX0_MBUS_PCIE_IO_ATTR(pp->maj, pp->min), i * SZ_64K, SZ_64K, 0); } } static int __init mv78xx0_pcie_setup(int nr, struct pci_sys_data *sys) { struct pcie_port *pp; struct resource realio; if (nr >= num_pcie_ports) return 0; pp = &pcie_port[nr]; sys->private_data = pp; pp->root_bus_nr = sys->busnr; /* * Generic PCIe unit setup. */ orion_pcie_set_local_bus_nr(pp->base, sys->busnr); orion_pcie_setup(pp->base); realio.start = nr * SZ_64K; realio.end = realio.start + SZ_64K - 1; pci_remap_iospace(&realio, MV78XX0_PCIE_IO_PHYS_BASE(nr)); pci_add_resource_offset(&sys->resources, &pp->res, sys->mem_offset); return 1; } static int pcie_valid_config(struct pcie_port *pp, int bus, int dev) { /* * Don't go out when trying to access nonexisting devices * on the local bus. */ if (bus == pp->root_bus_nr && dev > 1) return 0; return 1; } static int pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where, int size, u32 *val) { struct pci_sys_data *sys = bus->sysdata; struct pcie_port *pp = sys->private_data; unsigned long flags; int ret; if (pcie_valid_config(pp, bus->number, PCI_SLOT(devfn)) == 0) { *val = 0xffffffff; return PCIBIOS_DEVICE_NOT_FOUND; } spin_lock_irqsave(&pp->conf_lock, flags); ret = orion_pcie_rd_conf(pp->base, bus, devfn, where, size, val); spin_unlock_irqrestore(&pp->conf_lock, flags); return ret; } static int pcie_wr_conf(struct pci_bus *bus, u32 devfn, int where, int size, u32 val) { struct pci_sys_data *sys = bus->sysdata; struct pcie_port *pp = sys->private_data; unsigned long flags; int ret; if (pcie_valid_config(pp, bus->number, PCI_SLOT(devfn)) == 0) return PCIBIOS_DEVICE_NOT_FOUND; spin_lock_irqsave(&pp->conf_lock, flags); ret = orion_pcie_wr_conf(pp->base, bus, devfn, where, size, val); spin_unlock_irqrestore(&pp->conf_lock, flags); return ret; } static struct pci_ops pcie_ops = { .read = pcie_rd_conf, .write = pcie_wr_conf, }; /* * The root complex has a hardwired class of PCI_CLASS_MEMORY_OTHER, when it * is operating as a root complex this needs to be switched to * PCI_CLASS_BRIDGE_HOST or Linux will errantly try to process the BAR's on * the device. Decoding setup is handled by the orion code. */ static void rc_pci_fixup(struct pci_dev *dev) { if (dev->bus->parent == NULL && dev->devfn == 0) { struct resource *r; dev->class &= 0xff; dev->class |= PCI_CLASS_BRIDGE_HOST << 8; pci_dev_for_each_resource(dev, r) { r->start = 0; r->end = 0; r->flags = 0; } } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL, PCI_ANY_ID, rc_pci_fixup); static int __init mv78xx0_pcie_scan_bus(int nr, struct pci_host_bridge *bridge) { struct pci_sys_data *sys = pci_host_bridge_priv(bridge); if (nr >= num_pcie_ports) { BUG(); return -EINVAL; } list_splice_init(&sys->resources, &bridge->windows); bridge->dev.parent = NULL; bridge->sysdata = sys; bridge->busnr = sys->busnr; bridge->ops = &pcie_ops; return pci_scan_root_bus_bridge(bridge); } static int __init mv78xx0_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { struct pci_sys_data *sys = dev->bus->sysdata; struct pcie_port *pp = sys->private_data; return IRQ_MV78XX0_PCIE_00 + (pp->maj << 2) + pp->min; } static struct hw_pci mv78xx0_pci __initdata = { .nr_controllers = 8, .preinit = mv78xx0_pcie_preinit, .setup = mv78xx0_pcie_setup, .scan = mv78xx0_pcie_scan_bus, .map_irq = mv78xx0_pcie_map_irq, }; static void __init add_pcie_port(int maj, int min, void __iomem *base) { printk(KERN_INFO "MV78xx0 PCIe port %d.%d: ", maj, min); if (orion_pcie_link_up(base)) { struct pcie_port *pp = &pcie_port[num_pcie_ports++]; printk("link up\n"); pp->maj = maj; pp->min = min; pp->root_bus_nr = -1; pp->base = base; spin_lock_init(&pp->conf_lock); memset(&pp->res, 0, sizeof(pp->res)); } else { printk("link down, ignoring\n"); } } void __init mv78xx0_pcie_init(int init_port0, int init_port1) { vga_base = MV78XX0_PCIE_MEM_PHYS_BASE; if (init_port0) { add_pcie_port(0, 0, PCIE00_VIRT_BASE); if (!orion_pcie_x4_mode(PCIE00_VIRT_BASE)) { add_pcie_port(0, 1, PCIE01_VIRT_BASE); add_pcie_port(0, 2, PCIE02_VIRT_BASE); add_pcie_port(0, 3, PCIE03_VIRT_BASE); } } if (init_port1) { add_pcie_port(1, 0, PCIE10_VIRT_BASE); if (!orion_pcie_x4_mode((void __iomem *)PCIE10_VIRT_BASE)) { add_pcie_port(1, 1, PCIE11_VIRT_BASE); add_pcie_port(1, 2, PCIE12_VIRT_BASE); add_pcie_port(1, 3, PCIE13_VIRT_BASE); } } pci_common_init(&mv78xx0_pci); }
linux-master
arch/arm/mach-mv78xx0/pcie.c
// SPDX-License-Identifier: GPL-2.0-only /* * Support for Conexant Digicolor SoCs */ #include <asm/mach/arch.h> static const char *const digicolor_dt_compat[] __initconst = { "cnxt,cx92755", NULL, }; DT_MACHINE_START(DIGICOLOR, "Conexant Digicolor (Flattened Device Tree)") .dt_compat = digicolor_dt_compat, MACHINE_END
linux-master
arch/arm/mach-digicolor/digicolor.c
// SPDX-License-Identifier: GPL-2.0-or-later // Copyright (C) ASPEED Technology Inc. // Copyright IBM Corp. #include <linux/of_address.h> #include <linux/io.h> #include <linux/of.h> #include <linux/smp.h> #define BOOT_ADDR 0x00 #define BOOT_SIG 0x04 static struct device_node *secboot_node; static int aspeed_g6_boot_secondary(unsigned int cpu, struct task_struct *idle) { void __iomem *base; base = of_iomap(secboot_node, 0); if (!base) { pr_err("could not map the secondary boot base!"); return -ENODEV; } writel_relaxed(0, base + BOOT_ADDR); writel_relaxed(__pa_symbol(secondary_startup_arm), base + BOOT_ADDR); writel_relaxed((0xABBAAB00 | (cpu & 0xff)), base + BOOT_SIG); dsb_sev(); iounmap(base); return 0; } static void __init aspeed_g6_smp_prepare_cpus(unsigned int max_cpus) { void __iomem *base; secboot_node = of_find_compatible_node(NULL, NULL, "aspeed,ast2600-smpmem"); if (!secboot_node) { pr_err("secboot device node found!!\n"); return; } base = of_iomap(secboot_node, 0); if (!base) { pr_err("could not map the secondary boot base!"); return; } __raw_writel(0xBADABABA, base + BOOT_SIG); iounmap(base); } static const struct smp_operations aspeed_smp_ops __initconst = { .smp_prepare_cpus = aspeed_g6_smp_prepare_cpus, .smp_boot_secondary = aspeed_g6_boot_secondary, }; CPU_METHOD_OF_DECLARE(aspeed_smp, "aspeed,ast2600-smp", &aspeed_smp_ops);
linux-master
arch/arm/mach-aspeed/platsmp.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2002 ARM Ltd. * All Rights Reserved */ #include <linux/init.h> #include <linux/errno.h> #include <linux/smp.h> #include <linux/io.h> #include <linux/of_address.h> #include <linux/vexpress.h> #include <asm/mcpm.h> #include <asm/smp_scu.h> #include <asm/mach/map.h> #include "platsmp.h" #include "vexpress.h" bool __init vexpress_smp_init_ops(void) { #ifdef CONFIG_MCPM int cpu; struct device_node *cpu_node, *cci_node; /* * The best way to detect a multi-cluster configuration * is to detect if the kernel can take over CCI ports * control. Loop over possible CPUs and check if CCI * port control is available. * Override the default vexpress_smp_ops if so. */ for_each_possible_cpu(cpu) { bool available; cpu_node = of_get_cpu_node(cpu, NULL); if (WARN(!cpu_node, "Missing cpu device node!")) return false; cci_node = of_parse_phandle(cpu_node, "cci-control-port", 0); available = cci_node && of_device_is_available(cci_node); of_node_put(cci_node); of_node_put(cpu_node); if (!available) return false; } mcpm_smp_set_ops(); return true; #else return false; #endif } static const struct of_device_id vexpress_smp_dt_scu_match[] __initconst = { { .compatible = "arm,cortex-a5-scu", }, { .compatible = "arm,cortex-a9-scu", }, {} }; static void __init vexpress_smp_dt_prepare_cpus(unsigned int max_cpus) { struct device_node *scu = of_find_matching_node(NULL, vexpress_smp_dt_scu_match); if (scu) scu_enable(of_iomap(scu, 0)); /* * Write the address of secondary startup into the * system-wide flags register. The boot monitor waits * until it receives a soft interrupt, and then the * secondary CPU branches to this address. */ vexpress_flags_set(__pa_symbol(versatile_secondary_startup)); } #ifdef CONFIG_HOTPLUG_CPU static void vexpress_cpu_die(unsigned int cpu) { versatile_immitation_cpu_die(cpu, 0x40); } #endif const struct smp_operations vexpress_smp_dt_ops __initconst = { .smp_prepare_cpus = vexpress_smp_dt_prepare_cpus, .smp_secondary_init = versatile_secondary_init, .smp_boot_secondary = versatile_boot_secondary, #ifdef CONFIG_HOTPLUG_CPU .cpu_die = vexpress_cpu_die, #endif };
linux-master
arch/arm/mach-versatile/platsmp-vexpress.c
// SPDX-License-Identifier: GPL-2.0-only /* * Created by: Nicolas Pitre, October 2012 * Copyright: (C) 2012-2013 Linaro Limited * * Some portions of this file were originally written by Achin Gupta * Copyright: (C) 2012 ARM Limited */ #include <linux/delay.h> #include <linux/init.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/errno.h> #include <linux/irqchip/arm-gic.h> #include <asm/mcpm.h> #include <asm/proc-fns.h> #include <asm/cacheflush.h> #include <asm/cputype.h> #include <asm/cp15.h> #include <linux/arm-cci.h> #include "spc.h" /* SCC conf registers */ #define RESET_CTRL 0x018 #define RESET_A15_NCORERESET(cpu) (1 << (2 + (cpu))) #define RESET_A7_NCORERESET(cpu) (1 << (16 + (cpu))) #define A15_CONF 0x400 #define A7_CONF 0x500 #define SYS_INFO 0x700 #define SPC_BASE 0xb00 static void __iomem *scc; #define TC2_CLUSTERS 2 #define TC2_MAX_CPUS_PER_CLUSTER 3 static unsigned int tc2_nr_cpus[TC2_CLUSTERS]; static int tc2_pm_cpu_powerup(unsigned int cpu, unsigned int cluster) { pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); if (cluster >= TC2_CLUSTERS || cpu >= tc2_nr_cpus[cluster]) return -EINVAL; ve_spc_set_resume_addr(cluster, cpu, __pa_symbol(mcpm_entry_point)); ve_spc_cpu_wakeup_irq(cluster, cpu, true); return 0; } static int tc2_pm_cluster_powerup(unsigned int cluster) { pr_debug("%s: cluster %u\n", __func__, cluster); if (cluster >= TC2_CLUSTERS) return -EINVAL; ve_spc_powerdown(cluster, false); return 0; } static void tc2_pm_cpu_powerdown_prepare(unsigned int cpu, unsigned int cluster) { pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER); ve_spc_cpu_wakeup_irq(cluster, cpu, true); /* * If the CPU is committed to power down, make sure * the power controller will be in charge of waking it * up upon IRQ, ie IRQ lines are cut from GIC CPU IF * to the CPU by disabling the GIC CPU IF to prevent wfi * from completing execution behind power controller back */ gic_cpu_if_down(0); } static void tc2_pm_cluster_powerdown_prepare(unsigned int cluster) { pr_debug("%s: cluster %u\n", __func__, cluster); BUG_ON(cluster >= TC2_CLUSTERS); ve_spc_powerdown(cluster, true); ve_spc_global_wakeup_irq(true); } static void tc2_pm_cpu_cache_disable(void) { v7_exit_coherency_flush(louis); } static void tc2_pm_cluster_cache_disable(void) { if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A15) { /* * On the Cortex-A15 we need to disable * L2 prefetching before flushing the cache. */ asm volatile( "mcr p15, 1, %0, c15, c0, 3 \n\t" "isb \n\t" "dsb " : : "r" (0x400) ); } v7_exit_coherency_flush(all); cci_disable_port_by_cpu(read_cpuid_mpidr()); } static int tc2_core_in_reset(unsigned int cpu, unsigned int cluster) { u32 mask = cluster ? RESET_A7_NCORERESET(cpu) : RESET_A15_NCORERESET(cpu); return !(readl_relaxed(scc + RESET_CTRL) & mask); } #define POLL_MSEC 10 #define TIMEOUT_MSEC 1000 static int tc2_pm_wait_for_powerdown(unsigned int cpu, unsigned int cluster) { unsigned tries; pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER); for (tries = 0; tries < TIMEOUT_MSEC / POLL_MSEC; ++tries) { pr_debug("%s(cpu=%u, cluster=%u): RESET_CTRL = 0x%08X\n", __func__, cpu, cluster, readl_relaxed(scc + RESET_CTRL)); /* * We need the CPU to reach WFI, but the power * controller may put the cluster in reset and * power it off as soon as that happens, before * we have a chance to see STANDBYWFI. * * So we need to check for both conditions: */ if (tc2_core_in_reset(cpu, cluster) || ve_spc_cpu_in_wfi(cpu, cluster)) return 0; /* success: the CPU is halted */ /* Otherwise, wait and retry: */ msleep(POLL_MSEC); } return -ETIMEDOUT; /* timeout */ } static void tc2_pm_cpu_suspend_prepare(unsigned int cpu, unsigned int cluster) { ve_spc_set_resume_addr(cluster, cpu, __pa_symbol(mcpm_entry_point)); } static void tc2_pm_cpu_is_up(unsigned int cpu, unsigned int cluster) { pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER); ve_spc_cpu_wakeup_irq(cluster, cpu, false); ve_spc_set_resume_addr(cluster, cpu, 0); } static void tc2_pm_cluster_is_up(unsigned int cluster) { pr_debug("%s: cluster %u\n", __func__, cluster); BUG_ON(cluster >= TC2_CLUSTERS); ve_spc_powerdown(cluster, false); ve_spc_global_wakeup_irq(false); } static const struct mcpm_platform_ops tc2_pm_power_ops = { .cpu_powerup = tc2_pm_cpu_powerup, .cluster_powerup = tc2_pm_cluster_powerup, .cpu_suspend_prepare = tc2_pm_cpu_suspend_prepare, .cpu_powerdown_prepare = tc2_pm_cpu_powerdown_prepare, .cluster_powerdown_prepare = tc2_pm_cluster_powerdown_prepare, .cpu_cache_disable = tc2_pm_cpu_cache_disable, .cluster_cache_disable = tc2_pm_cluster_cache_disable, .wait_for_powerdown = tc2_pm_wait_for_powerdown, .cpu_is_up = tc2_pm_cpu_is_up, .cluster_is_up = tc2_pm_cluster_is_up, }; /* * Enable cluster-level coherency, in preparation for turning on the MMU. */ static void __naked tc2_pm_power_up_setup(unsigned int affinity_level) { asm volatile (" \n" " cmp r0, #1 \n" " bxne lr \n" " b cci_enable_port_for_self "); } static int __init tc2_pm_init(void) { unsigned int mpidr, cpu, cluster; int ret, irq; u32 a15_cluster_id, a7_cluster_id, sys_info; struct device_node *np; /* * The power management-related features are hidden behind * SCC registers. We need to extract runtime information like * cluster ids and number of CPUs really available in clusters. */ np = of_find_compatible_node(NULL, NULL, "arm,vexpress-scc,v2p-ca15_a7"); scc = of_iomap(np, 0); if (!scc) return -ENODEV; a15_cluster_id = readl_relaxed(scc + A15_CONF) & 0xf; a7_cluster_id = readl_relaxed(scc + A7_CONF) & 0xf; if (a15_cluster_id >= TC2_CLUSTERS || a7_cluster_id >= TC2_CLUSTERS) return -EINVAL; sys_info = readl_relaxed(scc + SYS_INFO); tc2_nr_cpus[a15_cluster_id] = (sys_info >> 16) & 0xf; tc2_nr_cpus[a7_cluster_id] = (sys_info >> 20) & 0xf; irq = irq_of_parse_and_map(np, 0); /* * A subset of the SCC registers is also used to communicate * with the SPC (power controller). We need to be able to * drive it very early in the boot process to power up * processors, so we initialize the SPC driver here. */ ret = ve_spc_init(scc + SPC_BASE, a15_cluster_id, irq); if (ret) return ret; if (!cci_probed()) return -ENODEV; mpidr = read_cpuid_mpidr(); cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); if (cluster >= TC2_CLUSTERS || cpu >= tc2_nr_cpus[cluster]) { pr_err("%s: boot CPU is out of bound!\n", __func__); return -EINVAL; } ret = mcpm_platform_register(&tc2_pm_power_ops); if (!ret) { mcpm_sync_init(tc2_pm_power_up_setup); /* test if we can (re)enable the CCI on our own */ BUG_ON(mcpm_loopback(tc2_pm_cluster_cache_disable) != 0); pr_info("TC2 power management initialized\n"); } return ret; } early_initcall(tc2_pm_init);
linux-master
arch/arm/mach-versatile/tc2_pm.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2015 Linus Walleij */ #include <linux/smp.h> #include <linux/io.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/regmap.h> #include <linux/mfd/syscon.h> #include <asm/cacheflush.h> #include <asm/smp_plat.h> #include <asm/smp_scu.h> #include "platsmp.h" #define REALVIEW_SYS_FLAGSSET_OFFSET 0x30 static const struct of_device_id realview_scu_match[] = { { .compatible = "arm,arm11mp-scu", }, { .compatible = "arm,cortex-a9-scu", }, { .compatible = "arm,cortex-a5-scu", }, { } }; static const struct of_device_id realview_syscon_match[] = { { .compatible = "arm,core-module-integrator", }, { .compatible = "arm,realview-eb-syscon", }, { .compatible = "arm,realview-pb11mp-syscon", }, { .compatible = "arm,realview-pbx-syscon", }, { }, }; static void __init realview_smp_prepare_cpus(unsigned int max_cpus) { struct device_node *np; void __iomem *scu_base; struct regmap *map; unsigned int ncores; int i; np = of_find_matching_node(NULL, realview_scu_match); if (!np) { pr_err("PLATSMP: No SCU base address\n"); return; } scu_base = of_iomap(np, 0); of_node_put(np); if (!scu_base) { pr_err("PLATSMP: No SCU remap\n"); return; } scu_enable(scu_base); ncores = scu_get_core_count(scu_base); pr_info("SCU: %d cores detected\n", ncores); for (i = 0; i < ncores; i++) set_cpu_possible(i, true); iounmap(scu_base); /* The syscon contains the magic SMP start address registers */ np = of_find_matching_node(NULL, realview_syscon_match); if (!np) { pr_err("PLATSMP: No syscon match\n"); return; } map = syscon_node_to_regmap(np); if (IS_ERR(map)) { pr_err("PLATSMP: No syscon regmap\n"); return; } /* Put the boot address in this magic register */ regmap_write(map, REALVIEW_SYS_FLAGSSET_OFFSET, __pa_symbol(versatile_secondary_startup)); } #ifdef CONFIG_HOTPLUG_CPU static void realview_cpu_die(unsigned int cpu) { return versatile_immitation_cpu_die(cpu, 0x20); } #endif static const struct smp_operations realview_dt_smp_ops __initconst = { .smp_prepare_cpus = realview_smp_prepare_cpus, .smp_secondary_init = versatile_secondary_init, .smp_boot_secondary = versatile_boot_secondary, #ifdef CONFIG_HOTPLUG_CPU .cpu_die = realview_cpu_die, #endif }; CPU_METHOD_OF_DECLARE(realview_smp, "arm,realview-smp", &realview_dt_smp_ops);
linux-master
arch/arm/mach-versatile/platsmp-realview.c
// SPDX-License-Identifier: GPL-2.0-only /* * Versatile Express Serial Power Controller (SPC) support * * Copyright (C) 2013 ARM Ltd. * * Authors: Sudeep KarkadaNagesha <[email protected]> * Achin Gupta <[email protected]> * Lorenzo Pieralisi <[email protected]> */ #include <linux/clk-provider.h> #include <linux/clkdev.h> #include <linux/cpu.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/platform_device.h> #include <linux/pm_opp.h> #include <linux/slab.h> #include <linux/semaphore.h> #include <asm/cacheflush.h> #include "spc.h" #define SPCLOG "vexpress-spc: " #define PERF_LVL_A15 0x00 #define PERF_REQ_A15 0x04 #define PERF_LVL_A7 0x08 #define PERF_REQ_A7 0x0c #define COMMS 0x10 #define COMMS_REQ 0x14 #define PWC_STATUS 0x18 #define PWC_FLAG 0x1c /* SPC wake-up IRQs status and mask */ #define WAKE_INT_MASK 0x24 #define WAKE_INT_RAW 0x28 #define WAKE_INT_STAT 0x2c /* SPC power down registers */ #define A15_PWRDN_EN 0x30 #define A7_PWRDN_EN 0x34 /* SPC per-CPU mailboxes */ #define A15_BX_ADDR0 0x68 #define A7_BX_ADDR0 0x78 /* SPC CPU/cluster reset statue */ #define STANDBYWFI_STAT 0x3c #define STANDBYWFI_STAT_A15_CPU_MASK(cpu) (1 << (cpu)) #define STANDBYWFI_STAT_A7_CPU_MASK(cpu) (1 << (3 + (cpu))) /* SPC system config interface registers */ #define SYSCFG_WDATA 0x70 #define SYSCFG_RDATA 0x74 /* A15/A7 OPP virtual register base */ #define A15_PERFVAL_BASE 0xC10 #define A7_PERFVAL_BASE 0xC30 /* Config interface control bits */ #define SYSCFG_START BIT(31) #define SYSCFG_SCC (6 << 20) #define SYSCFG_STAT (14 << 20) /* wake-up interrupt masks */ #define GBL_WAKEUP_INT_MSK (0x3 << 10) /* TC2 static dual-cluster configuration */ #define MAX_CLUSTERS 2 /* * Even though the SPC takes max 3-5 ms to complete any OPP/COMMS * operation, the operation could start just before jiffie is about * to be incremented. So setting timeout value of 20ms = 2jiffies@100Hz */ #define TIMEOUT_US 20000 #define MAX_OPPS 8 #define CA15_DVFS 0 #define CA7_DVFS 1 #define SPC_SYS_CFG 2 #define STAT_COMPLETE(type) ((1 << 0) << (type << 2)) #define STAT_ERR(type) ((1 << 1) << (type << 2)) #define RESPONSE_MASK(type) (STAT_COMPLETE(type) | STAT_ERR(type)) struct ve_spc_opp { unsigned long freq; unsigned long u_volt; }; struct ve_spc_drvdata { void __iomem *baseaddr; /* * A15s cluster identifier * It corresponds to A15 processors MPIDR[15:8] bitfield */ u32 a15_clusid; uint32_t cur_rsp_mask; uint32_t cur_rsp_stat; struct semaphore sem; struct completion done; struct ve_spc_opp *opps[MAX_CLUSTERS]; int num_opps[MAX_CLUSTERS]; }; static struct ve_spc_drvdata *info; static inline bool cluster_is_a15(u32 cluster) { return cluster == info->a15_clusid; } /** * ve_spc_global_wakeup_irq() - sets/clears global wakeup IRQs * * @set: if true, global wake-up IRQs are set, if false they are cleared * * Function to set/clear global wakeup IRQs. Not protected by locking since * it might be used in code paths where normal cacheable locks are not * working. Locking must be provided by the caller to ensure atomicity. */ void ve_spc_global_wakeup_irq(bool set) { u32 reg; reg = readl_relaxed(info->baseaddr + WAKE_INT_MASK); if (set) reg |= GBL_WAKEUP_INT_MSK; else reg &= ~GBL_WAKEUP_INT_MSK; writel_relaxed(reg, info->baseaddr + WAKE_INT_MASK); } /** * ve_spc_cpu_wakeup_irq() - sets/clears per-CPU wake-up IRQs * * @cluster: mpidr[15:8] bitfield describing cluster affinity level * @cpu: mpidr[7:0] bitfield describing cpu affinity level * @set: if true, wake-up IRQs are set, if false they are cleared * * Function to set/clear per-CPU wake-up IRQs. Not protected by locking since * it might be used in code paths where normal cacheable locks are not * working. Locking must be provided by the caller to ensure atomicity. */ void ve_spc_cpu_wakeup_irq(u32 cluster, u32 cpu, bool set) { u32 mask, reg; if (cluster >= MAX_CLUSTERS) return; mask = BIT(cpu); if (!cluster_is_a15(cluster)) mask <<= 4; reg = readl_relaxed(info->baseaddr + WAKE_INT_MASK); if (set) reg |= mask; else reg &= ~mask; writel_relaxed(reg, info->baseaddr + WAKE_INT_MASK); } /** * ve_spc_set_resume_addr() - set the jump address used for warm boot * * @cluster: mpidr[15:8] bitfield describing cluster affinity level * @cpu: mpidr[7:0] bitfield describing cpu affinity level * @addr: physical resume address */ void ve_spc_set_resume_addr(u32 cluster, u32 cpu, u32 addr) { void __iomem *baseaddr; if (cluster >= MAX_CLUSTERS) return; if (cluster_is_a15(cluster)) baseaddr = info->baseaddr + A15_BX_ADDR0 + (cpu << 2); else baseaddr = info->baseaddr + A7_BX_ADDR0 + (cpu << 2); writel_relaxed(addr, baseaddr); } /** * ve_spc_powerdown() - enables/disables cluster powerdown * * @cluster: mpidr[15:8] bitfield describing cluster affinity level * @enable: if true enables powerdown, if false disables it * * Function to enable/disable cluster powerdown. Not protected by locking * since it might be used in code paths where normal cacheable locks are not * working. Locking must be provided by the caller to ensure atomicity. */ void ve_spc_powerdown(u32 cluster, bool enable) { u32 pwdrn_reg; if (cluster >= MAX_CLUSTERS) return; pwdrn_reg = cluster_is_a15(cluster) ? A15_PWRDN_EN : A7_PWRDN_EN; writel_relaxed(enable, info->baseaddr + pwdrn_reg); } static u32 standbywfi_cpu_mask(u32 cpu, u32 cluster) { return cluster_is_a15(cluster) ? STANDBYWFI_STAT_A15_CPU_MASK(cpu) : STANDBYWFI_STAT_A7_CPU_MASK(cpu); } /** * ve_spc_cpu_in_wfi() - Checks if the specified CPU is in WFI or not * * @cpu: mpidr[7:0] bitfield describing CPU affinity level within cluster * @cluster: mpidr[15:8] bitfield describing cluster affinity level * * @return: non-zero if and only if the specified CPU is in WFI * * Take care when interpreting the result of this function: a CPU might * be in WFI temporarily due to idle, and is not necessarily safely * parked. */ int ve_spc_cpu_in_wfi(u32 cpu, u32 cluster) { int ret; u32 mask = standbywfi_cpu_mask(cpu, cluster); if (cluster >= MAX_CLUSTERS) return 1; ret = readl_relaxed(info->baseaddr + STANDBYWFI_STAT); pr_debug("%s: PCFGREG[0x%X] = 0x%08X, mask = 0x%X\n", __func__, STANDBYWFI_STAT, ret, mask); return ret & mask; } static int ve_spc_get_performance(int cluster, u32 *freq) { struct ve_spc_opp *opps = info->opps[cluster]; u32 perf_cfg_reg = 0; u32 perf; perf_cfg_reg = cluster_is_a15(cluster) ? PERF_LVL_A15 : PERF_LVL_A7; perf = readl_relaxed(info->baseaddr + perf_cfg_reg); if (perf >= info->num_opps[cluster]) return -EINVAL; opps += perf; *freq = opps->freq; return 0; } /* find closest match to given frequency in OPP table */ static int ve_spc_round_performance(int cluster, u32 freq) { int idx, max_opp = info->num_opps[cluster]; struct ve_spc_opp *opps = info->opps[cluster]; u32 fmin = 0, fmax = ~0, ftmp; freq /= 1000; /* OPP entries in kHz */ for (idx = 0; idx < max_opp; idx++, opps++) { ftmp = opps->freq; if (ftmp >= freq) { if (ftmp <= fmax) fmax = ftmp; } else { if (ftmp >= fmin) fmin = ftmp; } } if (fmax != ~0) return fmax * 1000; else return fmin * 1000; } static int ve_spc_find_performance_index(int cluster, u32 freq) { int idx, max_opp = info->num_opps[cluster]; struct ve_spc_opp *opps = info->opps[cluster]; for (idx = 0; idx < max_opp; idx++, opps++) if (opps->freq == freq) break; return (idx == max_opp) ? -EINVAL : idx; } static int ve_spc_waitforcompletion(int req_type) { int ret = wait_for_completion_interruptible_timeout( &info->done, usecs_to_jiffies(TIMEOUT_US)); if (ret == 0) ret = -ETIMEDOUT; else if (ret > 0) ret = info->cur_rsp_stat & STAT_COMPLETE(req_type) ? 0 : -EIO; return ret; } static int ve_spc_set_performance(int cluster, u32 freq) { u32 perf_cfg_reg; int ret, perf, req_type; if (cluster_is_a15(cluster)) { req_type = CA15_DVFS; perf_cfg_reg = PERF_LVL_A15; } else { req_type = CA7_DVFS; perf_cfg_reg = PERF_LVL_A7; } perf = ve_spc_find_performance_index(cluster, freq); if (perf < 0) return perf; if (down_timeout(&info->sem, usecs_to_jiffies(TIMEOUT_US))) return -ETIME; init_completion(&info->done); info->cur_rsp_mask = RESPONSE_MASK(req_type); writel(perf, info->baseaddr + perf_cfg_reg); ret = ve_spc_waitforcompletion(req_type); info->cur_rsp_mask = 0; up(&info->sem); return ret; } static int ve_spc_read_sys_cfg(int func, int offset, uint32_t *data) { int ret; if (down_timeout(&info->sem, usecs_to_jiffies(TIMEOUT_US))) return -ETIME; init_completion(&info->done); info->cur_rsp_mask = RESPONSE_MASK(SPC_SYS_CFG); /* Set the control value */ writel(SYSCFG_START | func | offset >> 2, info->baseaddr + COMMS); ret = ve_spc_waitforcompletion(SPC_SYS_CFG); if (ret == 0) *data = readl(info->baseaddr + SYSCFG_RDATA); info->cur_rsp_mask = 0; up(&info->sem); return ret; } static irqreturn_t ve_spc_irq_handler(int irq, void *data) { struct ve_spc_drvdata *drv_data = data; uint32_t status = readl_relaxed(drv_data->baseaddr + PWC_STATUS); if (info->cur_rsp_mask & status) { info->cur_rsp_stat = status; complete(&drv_data->done); } return IRQ_HANDLED; } /* * +--------------------------+ * | 31 20 | 19 0 | * +--------------------------+ * | m_volt | freq(kHz) | * +--------------------------+ */ #define MULT_FACTOR 20 #define VOLT_SHIFT 20 #define FREQ_MASK (0xFFFFF) static int ve_spc_populate_opps(uint32_t cluster) { uint32_t data = 0, off, ret, idx; struct ve_spc_opp *opps; opps = kcalloc(MAX_OPPS, sizeof(*opps), GFP_KERNEL); if (!opps) return -ENOMEM; info->opps[cluster] = opps; off = cluster_is_a15(cluster) ? A15_PERFVAL_BASE : A7_PERFVAL_BASE; for (idx = 0; idx < MAX_OPPS; idx++, off += 4, opps++) { ret = ve_spc_read_sys_cfg(SYSCFG_SCC, off, &data); if (!ret) { opps->freq = (data & FREQ_MASK) * MULT_FACTOR; opps->u_volt = (data >> VOLT_SHIFT) * 1000; } else { break; } } info->num_opps[cluster] = idx; return ret; } static int ve_init_opp_table(struct device *cpu_dev) { int cluster; int idx, ret = 0, max_opp; struct ve_spc_opp *opps; cluster = topology_physical_package_id(cpu_dev->id); cluster = cluster < 0 ? 0 : cluster; max_opp = info->num_opps[cluster]; opps = info->opps[cluster]; for (idx = 0; idx < max_opp; idx++, opps++) { ret = dev_pm_opp_add(cpu_dev, opps->freq * 1000, opps->u_volt); if (ret) { dev_warn(cpu_dev, "failed to add opp %lu %lu\n", opps->freq, opps->u_volt); return ret; } } return ret; } int __init ve_spc_init(void __iomem *baseaddr, u32 a15_clusid, int irq) { int ret; info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; info->baseaddr = baseaddr; info->a15_clusid = a15_clusid; if (irq <= 0) { pr_err(SPCLOG "Invalid IRQ %d\n", irq); kfree(info); return -EINVAL; } init_completion(&info->done); readl_relaxed(info->baseaddr + PWC_STATUS); ret = request_irq(irq, ve_spc_irq_handler, IRQF_TRIGGER_HIGH | IRQF_ONESHOT, "vexpress-spc", info); if (ret) { pr_err(SPCLOG "IRQ %d request failed\n", irq); kfree(info); return -ENODEV; } sema_init(&info->sem, 1); /* * Multi-cluster systems may need this data when non-coherent, during * cluster power-up/power-down. Make sure driver info reaches main * memory. */ sync_cache_w(info); sync_cache_w(&info); return 0; } struct clk_spc { struct clk_hw hw; int cluster; }; #define to_clk_spc(spc) container_of(spc, struct clk_spc, hw) static unsigned long spc_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { struct clk_spc *spc = to_clk_spc(hw); u32 freq; if (ve_spc_get_performance(spc->cluster, &freq)) return -EIO; return freq * 1000; } static long spc_round_rate(struct clk_hw *hw, unsigned long drate, unsigned long *parent_rate) { struct clk_spc *spc = to_clk_spc(hw); return ve_spc_round_performance(spc->cluster, drate); } static int spc_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { struct clk_spc *spc = to_clk_spc(hw); return ve_spc_set_performance(spc->cluster, rate / 1000); } static struct clk_ops clk_spc_ops = { .recalc_rate = spc_recalc_rate, .round_rate = spc_round_rate, .set_rate = spc_set_rate, }; static struct clk *ve_spc_clk_register(struct device *cpu_dev) { struct clk_init_data init; struct clk_spc *spc; spc = kzalloc(sizeof(*spc), GFP_KERNEL); if (!spc) return ERR_PTR(-ENOMEM); spc->hw.init = &init; spc->cluster = topology_physical_package_id(cpu_dev->id); spc->cluster = spc->cluster < 0 ? 0 : spc->cluster; init.name = dev_name(cpu_dev); init.ops = &clk_spc_ops; init.flags = CLK_GET_RATE_NOCACHE; init.num_parents = 0; return devm_clk_register(cpu_dev, &spc->hw); } static int __init ve_spc_clk_init(void) { int cpu, cluster; struct clk *clk; bool init_opp_table[MAX_CLUSTERS] = { false }; if (!info) return 0; /* Continue only if SPC is initialised */ if (ve_spc_populate_opps(0) || ve_spc_populate_opps(1)) { pr_err("failed to build OPP table\n"); return -ENODEV; } for_each_possible_cpu(cpu) { struct device *cpu_dev = get_cpu_device(cpu); if (!cpu_dev) { pr_warn("failed to get cpu%d device\n", cpu); continue; } clk = ve_spc_clk_register(cpu_dev); if (IS_ERR(clk)) { pr_warn("failed to register cpu%d clock\n", cpu); continue; } if (clk_register_clkdev(clk, NULL, dev_name(cpu_dev))) { pr_warn("failed to register cpu%d clock lookup\n", cpu); continue; } cluster = topology_physical_package_id(cpu_dev->id); if (cluster < 0 || init_opp_table[cluster]) continue; if (ve_init_opp_table(cpu_dev)) pr_warn("failed to initialise cpu%d opp table\n", cpu); else if (dev_pm_opp_set_sharing_cpus(cpu_dev, topology_core_cpumask(cpu_dev->id))) pr_warn("failed to mark OPPs shared for cpu%d\n", cpu); else init_opp_table[cluster] = true; } platform_device_register_simple("vexpress-spc-cpufreq", -1, NULL, 0); return 0; } device_initcall(ve_spc_clk_init);
linux-master
arch/arm/mach-versatile/spc.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2000-2003 Deep Blue Solutions Ltd */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/device.h> #include <linux/export.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/memblock.h> #include <linux/sched.h> #include <linux/smp.h> #include <linux/amba/bus.h> #include <linux/amba/serial.h> #include <linux/io.h> #include <linux/stat.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/pgtable.h> #include <asm/mach-types.h> #include <asm/mach/time.h> #include "integrator-hardware.h" #include "integrator-cm.h" #include "integrator.h" static DEFINE_RAW_SPINLOCK(cm_lock); static void __iomem *cm_base; /** * cm_get - get the value from the CM_CTRL register */ u32 cm_get(void) { return readl(cm_base + INTEGRATOR_HDR_CTRL_OFFSET); } /** * cm_control - update the CM_CTRL register. * @mask: bits to change * @set: bits to set */ void cm_control(u32 mask, u32 set) { unsigned long flags; u32 val; raw_spin_lock_irqsave(&cm_lock, flags); val = readl(cm_base + INTEGRATOR_HDR_CTRL_OFFSET) & ~mask; writel(val | set, cm_base + INTEGRATOR_HDR_CTRL_OFFSET); raw_spin_unlock_irqrestore(&cm_lock, flags); } void cm_clear_irqs(void) { /* disable core module IRQs */ writel(0xffffffffU, cm_base + INTEGRATOR_HDR_IC_OFFSET + IRQ_ENABLE_CLEAR); } static const struct of_device_id cm_match[] = { { .compatible = "arm,core-module-integrator"}, { }, }; void cm_init(void) { struct device_node *cm = of_find_matching_node(NULL, cm_match); if (!cm) { pr_crit("no core module node found in device tree\n"); return; } cm_base = of_iomap(cm, 0); if (!cm_base) { pr_crit("could not remap core module\n"); return; } cm_clear_irqs(); } /* * We need to stop things allocating the low memory; ideally we need a * better implementation of GFP_DMA which does not assume that DMA-able * memory starts at zero. */ void __init integrator_reserve(void) { memblock_reserve(PHYS_OFFSET, __pa(swapper_pg_dir) - PHYS_OFFSET); }
linux-master
arch/arm/mach-versatile/integrator.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2003 Deep Blue Solutions Ltd */ #include <linux/kernel.h> #include <linux/amba/mmci.h> #include <linux/io.h> #include <linux/irqchip.h> #include <linux/of_irq.h> #include <linux/of_address.h> #include <linux/of_platform.h> #include <linux/sched_clock.h> #include <linux/regmap.h> #include <linux/mfd/syscon.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include "integrator-hardware.h" #include "integrator-cm.h" #include "integrator.h" /* Base address to the core module header */ static struct regmap *cm_map; /* Base address to the CP controller */ static void __iomem *intcp_con_base; #define CM_COUNTER_OFFSET 0x28 /* * Logical Physical * f1400000 14000000 Interrupt controller * f1600000 16000000 UART 0 * fca00000 ca000000 SIC */ static struct map_desc intcp_io_desc[] __initdata __maybe_unused = { { .virtual = IO_ADDRESS(INTEGRATOR_IC_BASE), .pfn = __phys_to_pfn(INTEGRATOR_IC_BASE), .length = SZ_4K, .type = MT_DEVICE }, { .virtual = IO_ADDRESS(INTEGRATOR_UART0_BASE), .pfn = __phys_to_pfn(INTEGRATOR_UART0_BASE), .length = SZ_4K, .type = MT_DEVICE }, { .virtual = IO_ADDRESS(INTEGRATOR_CP_SIC_BASE), .pfn = __phys_to_pfn(INTEGRATOR_CP_SIC_BASE), .length = SZ_4K, .type = MT_DEVICE } }; static void __init intcp_map_io(void) { iotable_init(intcp_io_desc, ARRAY_SIZE(intcp_io_desc)); } /* * It seems that the card insertion interrupt remains active after * we've acknowledged it. We therefore ignore the interrupt, and * rely on reading it from the SIC. This also means that we must * clear the latched interrupt. */ static unsigned int mmc_status(struct device *dev) { unsigned int status = readl(__io_address(0xca000000 + 4)); writel(8, intcp_con_base + 8); return status & 8; } static struct mmci_platform_data mmc_data = { .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34, .status = mmc_status, }; static u64 notrace intcp_read_sched_clock(void) { unsigned int val; /* MMIO so discard return code */ regmap_read(cm_map, CM_COUNTER_OFFSET, &val); return val; } static void __init intcp_init_early(void) { cm_map = syscon_regmap_lookup_by_compatible("arm,core-module-integrator"); if (IS_ERR(cm_map)) return; sched_clock_register(intcp_read_sched_clock, 32, 24000000); } static void __init intcp_init_irq_of(void) { cm_init(); irqchip_init(); } /* * For the Device Tree, add in the UART, MMC and CLCD specifics as AUXDATA * and enforce the bus names since these are used for clock lookups. */ static struct of_dev_auxdata intcp_auxdata_lookup[] __initdata = { OF_DEV_AUXDATA("arm,primecell", INTEGRATOR_CP_MMC_BASE, "mmci", &mmc_data), { /* sentinel */ }, }; static const struct of_device_id intcp_syscon_match[] = { { .compatible = "arm,integrator-cp-syscon"}, { }, }; static void __init intcp_init_of(void) { struct device_node *cpcon; cpcon = of_find_matching_node(NULL, intcp_syscon_match); if (!cpcon) return; intcp_con_base = of_iomap(cpcon, 0); if (!intcp_con_base) return; of_platform_default_populate(NULL, intcp_auxdata_lookup, NULL); } static const char * intcp_dt_board_compat[] = { "arm,integrator-cp", NULL, }; DT_MACHINE_START(INTEGRATOR_CP_DT, "ARM Integrator/CP (Device Tree)") .reserve = integrator_reserve, .map_io = intcp_map_io, .init_early = intcp_init_early, .init_irq = intcp_init_irq_of, .init_machine = intcp_init_of, .dt_compat = intcp_dt_board_compat, MACHINE_END
linux-master
arch/arm/mach-versatile/integrator_cp.c
// SPDX-License-Identifier: GPL-2.0-only /* * dcscb.c - Dual Cluster System Configuration Block * * Created by: Nicolas Pitre, May 2012 * Copyright: (C) 2012-2013 Linaro Limited */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/io.h> #include <linux/errno.h> #include <linux/of_address.h> #include <linux/vexpress.h> #include <linux/arm-cci.h> #include <asm/mcpm.h> #include <asm/proc-fns.h> #include <asm/cacheflush.h> #include <asm/cputype.h> #include <asm/cp15.h> #include "vexpress.h" #define RST_HOLD0 0x0 #define RST_HOLD1 0x4 #define SYS_SWRESET 0x8 #define RST_STAT0 0xc #define RST_STAT1 0x10 #define EAG_CFG_R 0x20 #define EAG_CFG_W 0x24 #define KFC_CFG_R 0x28 #define KFC_CFG_W 0x2c #define DCS_CFG_R 0x30 static void __iomem *dcscb_base; static int dcscb_allcpus_mask[2]; static int dcscb_cpu_powerup(unsigned int cpu, unsigned int cluster) { unsigned int rst_hold, cpumask = (1 << cpu); pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); if (cluster >= 2 || !(cpumask & dcscb_allcpus_mask[cluster])) return -EINVAL; rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4); rst_hold &= ~(cpumask | (cpumask << 4)); writel_relaxed(rst_hold, dcscb_base + RST_HOLD0 + cluster * 4); return 0; } static int dcscb_cluster_powerup(unsigned int cluster) { unsigned int rst_hold; pr_debug("%s: cluster %u\n", __func__, cluster); if (cluster >= 2) return -EINVAL; /* remove cluster reset and add individual CPU's reset */ rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4); rst_hold &= ~(1 << 8); rst_hold |= dcscb_allcpus_mask[cluster]; writel_relaxed(rst_hold, dcscb_base + RST_HOLD0 + cluster * 4); return 0; } static void dcscb_cpu_powerdown_prepare(unsigned int cpu, unsigned int cluster) { unsigned int rst_hold; pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); BUG_ON(cluster >= 2 || !((1 << cpu) & dcscb_allcpus_mask[cluster])); rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4); rst_hold |= (1 << cpu); writel_relaxed(rst_hold, dcscb_base + RST_HOLD0 + cluster * 4); } static void dcscb_cluster_powerdown_prepare(unsigned int cluster) { unsigned int rst_hold; pr_debug("%s: cluster %u\n", __func__, cluster); BUG_ON(cluster >= 2); rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4); rst_hold |= (1 << 8); writel_relaxed(rst_hold, dcscb_base + RST_HOLD0 + cluster * 4); } static void dcscb_cpu_cache_disable(void) { /* Disable and flush the local CPU cache. */ v7_exit_coherency_flush(louis); } static void dcscb_cluster_cache_disable(void) { /* Flush all cache levels for this cluster. */ v7_exit_coherency_flush(all); /* * A full outer cache flush could be needed at this point * on platforms with such a cache, depending on where the * outer cache sits. In some cases the notion of a "last * cluster standing" would need to be implemented if the * outer cache is shared across clusters. In any case, when * the outer cache needs flushing, there is no concurrent * access to the cache controller to worry about and no * special locking besides what is already provided by the * MCPM state machinery is needed. */ /* * Disable cluster-level coherency by masking * incoming snoops and DVM messages: */ cci_disable_port_by_cpu(read_cpuid_mpidr()); } static const struct mcpm_platform_ops dcscb_power_ops = { .cpu_powerup = dcscb_cpu_powerup, .cluster_powerup = dcscb_cluster_powerup, .cpu_powerdown_prepare = dcscb_cpu_powerdown_prepare, .cluster_powerdown_prepare = dcscb_cluster_powerdown_prepare, .cpu_cache_disable = dcscb_cpu_cache_disable, .cluster_cache_disable = dcscb_cluster_cache_disable, }; extern void dcscb_power_up_setup(unsigned int affinity_level); static int __init dcscb_init(void) { struct device_node *node; unsigned int cfg; int ret; if (!cci_probed()) return -ENODEV; node = of_find_compatible_node(NULL, NULL, "arm,rtsm,dcscb"); if (!node) return -ENODEV; dcscb_base = of_iomap(node, 0); of_node_put(node); if (!dcscb_base) return -EADDRNOTAVAIL; cfg = readl_relaxed(dcscb_base + DCS_CFG_R); dcscb_allcpus_mask[0] = (1 << (((cfg >> 16) >> (0 << 2)) & 0xf)) - 1; dcscb_allcpus_mask[1] = (1 << (((cfg >> 16) >> (1 << 2)) & 0xf)) - 1; ret = mcpm_platform_register(&dcscb_power_ops); if (!ret) ret = mcpm_sync_init(dcscb_power_up_setup); if (ret) { iounmap(dcscb_base); return ret; } pr_info("VExpress DCSCB support installed\n"); /* * Future entries into the kernel can now go * through the cluster entry vectors. */ vexpress_flags_set(__pa_symbol(mcpm_entry_point)); return 0; } early_initcall(dcscb_init);
linux-master
arch/arm/mach-versatile/dcscb.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/of.h> #include <linux/of_address.h> #include <asm/mach/arch.h> #include "vexpress.h" #define SYS_FLAGSSET 0x030 #define SYS_FLAGSCLR 0x034 void vexpress_flags_set(u32 data) { static void __iomem *base; if (!base) { struct device_node *node = of_find_compatible_node(NULL, NULL, "arm,vexpress-sysreg"); base = of_iomap(node, 0); } if (WARN_ON(!base)) return; writel(~0, base + SYS_FLAGSCLR); writel(data, base + SYS_FLAGSSET); } static const char * const v2m_dt_match[] __initconst = { "arm,vexpress", NULL, }; DT_MACHINE_START(VEXPRESS_DT, "ARM-Versatile Express") .dt_compat = v2m_dt_match, .l2c_aux_val = 0x00400000, .l2c_aux_mask = 0xfe0fffff, .smp = smp_ops(vexpress_smp_dt_ops), .smp_init = smp_init_ops(vexpress_smp_init_ops), MACHINE_END
linux-master
arch/arm/mach-versatile/v2m.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2015 ARM Limited * * Author: Vladimir Murzin <[email protected]> */ #include <asm/mach/arch.h> static const char *const mps2_compat[] __initconst = { "arm,mps2", NULL }; DT_MACHINE_START(MPS2DT, "MPS2 (Device Tree Support)") .dt_compat = mps2_compat, MACHINE_END
linux-master
arch/arm/mach-versatile/v2m-mps2.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2014 Linaro Ltd. * * Author: Linus Walleij <[email protected]> */ #include <asm/mach/arch.h> static const char *const realview_dt_platform_compat[] __initconst = { "arm,realview-eb", "arm,realview-pb1176", "arm,realview-pb11mp", "arm,realview-pba8", "arm,realview-pbx", NULL, }; DT_MACHINE_START(REALVIEW_DT, "ARM RealView Machine (Device Tree Support)") #ifdef CONFIG_ZONE_DMA .dma_zone_size = SZ_256M, #endif .dt_compat = realview_dt_platform_compat, .l2c_aux_val = 0x0, .l2c_aux_mask = ~0x0, MACHINE_END
linux-master
arch/arm/mach-versatile/realview.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2002 ARM Ltd. * All Rights Reserved * * This code is specific to the hardware found on ARM Realview and * Versatile Express platforms where the CPUs are unable to be individually * woken, and where there is no way to hot-unplug CPUs. Real platforms * should not copy this code. */ #include <linux/init.h> #include <linux/errno.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/jiffies.h> #include <linux/smp.h> #include <asm/cacheflush.h> #include <asm/smp_plat.h> #include "platsmp.h" /* * versatile_cpu_release controls the release of CPUs from the holding * pen in headsmp.S, which exists because we are not always able to * control the release of individual CPUs from the board firmware. * Production platforms do not need this. */ volatile int versatile_cpu_release = -1; /* * Write versatile_cpu_release in a way that is guaranteed to be visible to * all observers, irrespective of whether they're taking part in coherency * or not. This is necessary for the hotplug code to work reliably. */ static void versatile_write_cpu_release(int val) { versatile_cpu_release = val; smp_wmb(); sync_cache_w(&versatile_cpu_release); } /* * versatile_lock exists to avoid running the loops_per_jiffy delay loop * calibrations on the secondary CPU while the requesting CPU is using * the limited-bandwidth bus - which affects the calibration value. * Production platforms do not need this. */ static DEFINE_RAW_SPINLOCK(versatile_lock); void versatile_secondary_init(unsigned int cpu) { /* * let the primary processor know we're out of the * pen, then head off into the C entry point */ versatile_write_cpu_release(-1); /* * Synchronise with the boot thread. */ raw_spin_lock(&versatile_lock); raw_spin_unlock(&versatile_lock); } int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle) { unsigned long timeout; /* * Set synchronisation state between this boot processor * and the secondary one */ raw_spin_lock(&versatile_lock); /* * This is really belt and braces; we hold unintended secondary * CPUs in the holding pen until we're ready for them. However, * since we haven't sent them a soft interrupt, they shouldn't * be there. */ versatile_write_cpu_release(cpu_logical_map(cpu)); /* * Send the secondary CPU a soft interrupt, thereby causing * the boot monitor to read the system wide flags register, * and branch to the address found there. */ arch_send_wakeup_ipi_mask(cpumask_of(cpu)); timeout = jiffies + (1 * HZ); while (time_before(jiffies, timeout)) { smp_rmb(); if (versatile_cpu_release == -1) break; udelay(10); } /* * now the secondary core is starting up let it run its * calibrations, then wait for it to finish */ raw_spin_unlock(&versatile_lock); return versatile_cpu_release != -1 ? -ENOSYS : 0; }
linux-master
arch/arm/mach-versatile/platsmp.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Versatile board support using the device tree * * Copyright (C) 2010 Secret Lab Technologies Ltd. * Copyright (C) 2009 Jeremy Kerr <[email protected]> * Copyright (C) 2004 ARM Limited * Copyright (C) 2000 Deep Blue Solutions Ltd */ #include <linux/init.h> #include <linux/io.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/of_platform.h> #include <linux/slab.h> #include <linux/amba/bus.h> #include <linux/amba/mmci.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> /* macro to get at MMIO space when running virtually */ #define IO_ADDRESS(x) (((x) & 0x0fffffff) + (((x) >> 4) & 0x0f000000) + 0xf0000000) #define __io_address(n) ((void __iomem __force *)IO_ADDRESS(n)) /* * ------------------------------------------------------------------------ * Versatile Registers * ------------------------------------------------------------------------ */ #define VERSATILE_SYS_PCICTL_OFFSET 0x44 #define VERSATILE_SYS_MCI_OFFSET 0x48 /* * VERSATILE peripheral addresses */ #define VERSATILE_MMCI0_BASE 0x10005000 /* MMC interface */ #define VERSATILE_MMCI1_BASE 0x1000B000 /* MMC Interface */ #define VERSATILE_SCTL_BASE 0x101E0000 /* System controller */ /* * System controller bit assignment */ #define VERSATILE_REFCLK 0 #define VERSATILE_TIMCLK 1 #define VERSATILE_TIMER1_EnSel 15 #define VERSATILE_TIMER2_EnSel 17 #define VERSATILE_TIMER3_EnSel 19 #define VERSATILE_TIMER4_EnSel 21 static void __iomem *versatile_sys_base; static unsigned int mmc_status(struct device *dev) { struct amba_device *adev = container_of(dev, struct amba_device, dev); u32 mask; if (adev->res.start == VERSATILE_MMCI0_BASE) mask = 1; else mask = 2; return readl(versatile_sys_base + VERSATILE_SYS_MCI_OFFSET) & mask; } static struct mmci_platform_data mmc0_plat_data = { .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34, .status = mmc_status, }; static struct mmci_platform_data mmc1_plat_data = { .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34, .status = mmc_status, }; /* * Lookup table for attaching a specific name and platform_data pointer to * devices as they get created by of_platform_populate(). Ideally this table * would not exist, but the current clock implementation depends on some devices * having a specific name. */ struct of_dev_auxdata versatile_auxdata_lookup[] __initdata = { OF_DEV_AUXDATA("arm,primecell", VERSATILE_MMCI0_BASE, "fpga:05", &mmc0_plat_data), OF_DEV_AUXDATA("arm,primecell", VERSATILE_MMCI1_BASE, "fpga:0b", &mmc1_plat_data), {} }; static struct map_desc versatile_io_desc[] __initdata __maybe_unused = { { .virtual = IO_ADDRESS(VERSATILE_SCTL_BASE), .pfn = __phys_to_pfn(VERSATILE_SCTL_BASE), .length = SZ_4K * 9, .type = MT_DEVICE } }; static void __init versatile_map_io(void) { debug_ll_io_init(); iotable_init(versatile_io_desc, ARRAY_SIZE(versatile_io_desc)); } static void __init versatile_init_early(void) { u32 val; /* * set clock frequency: * VERSATILE_REFCLK is 32KHz * VERSATILE_TIMCLK is 1MHz */ val = readl(__io_address(VERSATILE_SCTL_BASE)); writel((VERSATILE_TIMCLK << VERSATILE_TIMER1_EnSel) | (VERSATILE_TIMCLK << VERSATILE_TIMER2_EnSel) | (VERSATILE_TIMCLK << VERSATILE_TIMER3_EnSel) | (VERSATILE_TIMCLK << VERSATILE_TIMER4_EnSel) | val, __io_address(VERSATILE_SCTL_BASE)); } static void __init versatile_dt_pci_init(void) { u32 val; struct device_node *np; struct property *newprop; np = of_find_compatible_node(NULL, NULL, "arm,versatile-pci"); if (!np) return; /* Check if PCI backplane is detected */ val = readl(versatile_sys_base + VERSATILE_SYS_PCICTL_OFFSET); if (val & 1) { /* * Enable PCI accesses. Note that the documentaton is * inconsistent whether or not this is needed, but the old * driver had it so we will keep it. */ writel(1, versatile_sys_base + VERSATILE_SYS_PCICTL_OFFSET); goto out_put_node; } newprop = kzalloc(sizeof(*newprop), GFP_KERNEL); if (!newprop) goto out_put_node; newprop->name = kstrdup("status", GFP_KERNEL); newprop->value = kstrdup("disabled", GFP_KERNEL); newprop->length = sizeof("disabled"); of_update_property(np, newprop); pr_info("Not plugged into PCI backplane!\n"); out_put_node: of_node_put(np); } static void __init versatile_dt_init(void) { struct device_node *np; np = of_find_compatible_node(NULL, NULL, "arm,core-module-versatile"); if (np) versatile_sys_base = of_iomap(np, 0); WARN_ON(!versatile_sys_base); versatile_dt_pci_init(); of_platform_default_populate(NULL, versatile_auxdata_lookup, NULL); } static const char *const versatile_dt_match[] __initconst = { "arm,versatile-ab", "arm,versatile-pb", NULL, }; DT_MACHINE_START(VERSATILE_PB, "ARM-Versatile (Device Tree Support)") .map_io = versatile_map_io, .init_early = versatile_init_early, .init_machine = versatile_dt_init, .dt_compat = versatile_dt_match, MACHINE_END
linux-master
arch/arm/mach-versatile/versatile.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2002 ARM Ltd. * All Rights Reserved * * This hotplug implementation is _specific_ to the situation found on * ARM development platforms where there is _no_ possibility of actually * taking a CPU offline, resetting it, or otherwise. Real platforms must * NOT copy this code. */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/smp.h> #include <asm/smp_plat.h> #include <asm/cp15.h> #include "platsmp.h" static inline void versatile_immitation_enter_lowpower(unsigned int actrl_mask) { unsigned int v; asm volatile( "mcr p15, 0, %1, c7, c5, 0\n" " mcr p15, 0, %1, c7, c10, 4\n" /* * Turn off coherency */ " mrc p15, 0, %0, c1, c0, 1\n" " bic %0, %0, %3\n" " mcr p15, 0, %0, c1, c0, 1\n" " mrc p15, 0, %0, c1, c0, 0\n" " bic %0, %0, %2\n" " mcr p15, 0, %0, c1, c0, 0\n" : "=&r" (v) : "r" (0), "Ir" (CR_C), "Ir" (actrl_mask) : "cc"); } static inline void versatile_immitation_leave_lowpower(unsigned int actrl_mask) { unsigned int v; asm volatile( "mrc p15, 0, %0, c1, c0, 0\n" " orr %0, %0, %1\n" " mcr p15, 0, %0, c1, c0, 0\n" " mrc p15, 0, %0, c1, c0, 1\n" " orr %0, %0, %2\n" " mcr p15, 0, %0, c1, c0, 1\n" : "=&r" (v) : "Ir" (CR_C), "Ir" (actrl_mask) : "cc"); } static inline void versatile_immitation_do_lowpower(unsigned int cpu, int *spurious) { /* * there is no power-control hardware on this platform, so all * we can do is put the core into WFI; this is safe as the calling * code will have already disabled interrupts. * * This code should not be used outside Versatile platforms. */ for (;;) { wfi(); if (versatile_cpu_release == cpu_logical_map(cpu)) { /* * OK, proper wakeup, we're done */ break; } /* * Getting here, means that we have come out of WFI without * having been woken up - this shouldn't happen * * Just note it happening - when we're woken, we can report * its occurrence. */ (*spurious)++; } } /* * platform-specific code to shutdown a CPU. * This code supports immitation-style CPU hotplug for Versatile/Realview/ * Versatile Express platforms that are unable to do real CPU hotplug. */ void versatile_immitation_cpu_die(unsigned int cpu, unsigned int actrl_mask) { int spurious = 0; versatile_immitation_enter_lowpower(actrl_mask); versatile_immitation_do_lowpower(cpu, &spurious); versatile_immitation_leave_lowpower(actrl_mask); if (spurious) pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious); }
linux-master
arch/arm/mach-versatile/hotplug.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2000-2003 Deep Blue Solutions Ltd */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/syscore_ops.h> #include <linux/amba/bus.h> #include <linux/io.h> #include <linux/irqchip.h> #include <linux/of_irq.h> #include <linux/of_address.h> #include <linux/of_platform.h> #include <linux/uaccess.h> #include <linux/termios.h> #include <linux/mfd/syscon.h> #include <linux/regmap.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include "integrator-hardware.h" #include "integrator-cm.h" #include "integrator.h" /* Regmap to the AP system controller */ static struct regmap *ap_syscon_map; /* * All IO addresses are mapped onto VA 0xFFFx.xxxx, where x.xxxx * is the (PA >> 12). * * Setup a VA for the Integrator interrupt controller (for header #0, * just for now). */ #define VA_IC_BASE __io_address(INTEGRATOR_IC_BASE) /* * Logical Physical * f1400000 14000000 Interrupt controller * f1600000 16000000 UART 0 */ static struct map_desc ap_io_desc[] __initdata __maybe_unused = { { .virtual = IO_ADDRESS(INTEGRATOR_IC_BASE), .pfn = __phys_to_pfn(INTEGRATOR_IC_BASE), .length = SZ_4K, .type = MT_DEVICE }, { .virtual = IO_ADDRESS(INTEGRATOR_UART0_BASE), .pfn = __phys_to_pfn(INTEGRATOR_UART0_BASE), .length = SZ_4K, .type = MT_DEVICE } }; static void __init ap_map_io(void) { iotable_init(ap_io_desc, ARRAY_SIZE(ap_io_desc)); } #ifdef CONFIG_PM static unsigned long ic_irq_enable; static int irq_suspend(void) { ic_irq_enable = readl(VA_IC_BASE + IRQ_ENABLE); return 0; } static void irq_resume(void) { /* disable all irq sources */ cm_clear_irqs(); writel(-1, VA_IC_BASE + IRQ_ENABLE_CLEAR); writel(-1, VA_IC_BASE + FIQ_ENABLE_CLEAR); writel(ic_irq_enable, VA_IC_BASE + IRQ_ENABLE_SET); } #else #define irq_suspend NULL #define irq_resume NULL #endif static struct syscore_ops irq_syscore_ops = { .suspend = irq_suspend, .resume = irq_resume, }; static int __init irq_syscore_init(void) { register_syscore_ops(&irq_syscore_ops); return 0; } device_initcall(irq_syscore_init); /* * For the PL010 found in the Integrator/AP some of the UART control is * implemented in the system controller and accessed using a callback * from the driver. */ static void integrator_uart_set_mctrl(struct amba_device *dev, void __iomem *base, unsigned int mctrl) { unsigned int ctrls = 0, ctrlc = 0, rts_mask, dtr_mask; u32 phybase = dev->res.start; int ret; if (phybase == INTEGRATOR_UART0_BASE) { /* UART0 */ rts_mask = 1 << 4; dtr_mask = 1 << 5; } else { /* UART1 */ rts_mask = 1 << 6; dtr_mask = 1 << 7; } if (mctrl & TIOCM_RTS) ctrlc |= rts_mask; else ctrls |= rts_mask; if (mctrl & TIOCM_DTR) ctrlc |= dtr_mask; else ctrls |= dtr_mask; ret = regmap_write(ap_syscon_map, INTEGRATOR_SC_CTRLS_OFFSET, ctrls); if (ret) pr_err("MODEM: unable to write PL010 UART CTRLS\n"); ret = regmap_write(ap_syscon_map, INTEGRATOR_SC_CTRLC_OFFSET, ctrlc); if (ret) pr_err("MODEM: unable to write PL010 UART CRTLC\n"); } struct amba_pl010_data ap_uart_data = { .set_mctrl = integrator_uart_set_mctrl, }; static void __init ap_init_irq_of(void) { cm_init(); irqchip_init(); } /* For the Device Tree, add in the UART callbacks as AUXDATA */ static struct of_dev_auxdata ap_auxdata_lookup[] __initdata = { OF_DEV_AUXDATA("arm,primecell", INTEGRATOR_UART0_BASE, "uart0", &ap_uart_data), OF_DEV_AUXDATA("arm,primecell", INTEGRATOR_UART1_BASE, "uart1", &ap_uart_data), { /* sentinel */ }, }; static const struct of_device_id ap_syscon_match[] = { { .compatible = "arm,integrator-ap-syscon"}, { }, }; static void __init ap_init_of(void) { struct device_node *syscon; of_platform_default_populate(NULL, ap_auxdata_lookup, NULL); syscon = of_find_matching_node(NULL, ap_syscon_match); if (!syscon) return; ap_syscon_map = syscon_node_to_regmap(syscon); if (IS_ERR(ap_syscon_map)) { pr_crit("could not find Integrator/AP system controller\n"); return; } } static const char * ap_dt_board_compat[] = { "arm,integrator-ap", NULL, }; DT_MACHINE_START(INTEGRATOR_AP_DT, "ARM Integrator/AP (Device Tree)") .reserve = integrator_reserve, .map_io = ap_map_io, .init_irq = ap_init_irq_of, .init_machine = ap_init_of, .dt_compat = ap_dt_board_compat, MACHINE_END
linux-master
arch/arm/mach-versatile/integrator_ap.c
// SPDX-License-Identifier: GPL-2.0-or-later /* NetWinder Floating Point Emulator (c) Rebel.COM, 1998,1999 (c) Philip Blundell, 2001 Direct questions, comments to Scott Bambrough <[email protected]> */ #include "fpa11.h" #include "fpopcode.h" unsigned int SingleCPDO(struct roundingData *roundData, const unsigned int opcode, FPREG * rFd); unsigned int DoubleCPDO(struct roundingData *roundData, const unsigned int opcode, FPREG * rFd); unsigned int ExtendedCPDO(struct roundingData *roundData, const unsigned int opcode, FPREG * rFd); unsigned int EmulateCPDO(const unsigned int opcode) { FPA11 *fpa11 = GET_FPA11(); FPREG *rFd; unsigned int nType, nDest, nRc; struct roundingData roundData; /* Get the destination size. If not valid let Linux perform an invalid instruction trap. */ nDest = getDestinationSize(opcode); if (typeNone == nDest) return 0; roundData.mode = SetRoundingMode(opcode); roundData.precision = SetRoundingPrecision(opcode); roundData.exception = 0; /* Compare the size of the operands in Fn and Fm. Choose the largest size and perform operations in that size, in order to make use of all the precision of the operands. If Fm is a constant, we just grab a constant of a size matching the size of the operand in Fn. */ if (MONADIC_INSTRUCTION(opcode)) nType = nDest; else nType = fpa11->fType[getFn(opcode)]; if (!CONSTANT_FM(opcode)) { register unsigned int Fm = getFm(opcode); if (nType < fpa11->fType[Fm]) { nType = fpa11->fType[Fm]; } } rFd = &fpa11->fpreg[getFd(opcode)]; switch (nType) { case typeSingle: nRc = SingleCPDO(&roundData, opcode, rFd); break; case typeDouble: nRc = DoubleCPDO(&roundData, opcode, rFd); break; #ifdef CONFIG_FPE_NWFPE_XP case typeExtended: nRc = ExtendedCPDO(&roundData, opcode, rFd); break; #endif default: nRc = 0; } /* The CPDO functions used to always set the destination type to be the same as their working size. */ if (nRc != 0) { /* If the operation succeeded, check to see if the result in the destination register is the correct size. If not force it to be. */ fpa11->fType[getFd(opcode)] = nDest; #ifdef CONFIG_FPE_NWFPE_XP if (nDest != nType) { switch (nDest) { case typeSingle: { if (typeDouble == nType) rFd->fSingle = float64_to_float32(&roundData, rFd->fDouble); else rFd->fSingle = floatx80_to_float32(&roundData, rFd->fExtended); } break; case typeDouble: { if (typeSingle == nType) rFd->fDouble = float32_to_float64(rFd->fSingle); else rFd->fDouble = floatx80_to_float64(&roundData, rFd->fExtended); } break; case typeExtended: { if (typeSingle == nType) rFd->fExtended = float32_to_floatx80(rFd->fSingle); else rFd->fExtended = float64_to_floatx80(rFd->fDouble); } break; } } #else if (nDest != nType) { if (nDest == typeSingle) rFd->fSingle = float64_to_float32(&roundData, rFd->fDouble); else rFd->fDouble = float32_to_float64(rFd->fSingle); } #endif } if (roundData.exception) float_raise(roundData.exception); return nRc; }
linux-master
arch/arm/nwfpe/fpa11_cpdo.c
// SPDX-License-Identifier: GPL-2.0-or-later /* NetWinder Floating Point Emulator (c) Rebel.COM, 1998,1999 (c) Philip Blundell, 1999, 2001 Direct questions, comments to Scott Bambrough <[email protected]> */ #include "fpa11.h" #include "fpopcode.h" #include "fpa11.inl" #include "fpmodule.h" #include "fpmodule.inl" #include "softfloat.h" unsigned int PerformFLT(const unsigned int opcode); unsigned int PerformFIX(const unsigned int opcode); static unsigned int PerformComparison(const unsigned int opcode); unsigned int EmulateCPRT(const unsigned int opcode) { if (opcode & 0x800000) { /* This is some variant of a comparison (PerformComparison will sort out which one). Since most of the other CPRT instructions are oddball cases of some sort or other it makes sense to pull this out into a fast path. */ return PerformComparison(opcode); } /* Hint to GCC that we'd like a jump table rather than a load of CMPs */ switch ((opcode & 0x700000) >> 20) { case FLT_CODE >> 20: return PerformFLT(opcode); break; case FIX_CODE >> 20: return PerformFIX(opcode); break; case WFS_CODE >> 20: writeFPSR(readRegister(getRd(opcode))); break; case RFS_CODE >> 20: writeRegister(getRd(opcode), readFPSR()); break; default: return 0; } return 1; } unsigned int PerformFLT(const unsigned int opcode) { FPA11 *fpa11 = GET_FPA11(); struct roundingData roundData; roundData.mode = SetRoundingMode(opcode); roundData.precision = SetRoundingPrecision(opcode); roundData.exception = 0; switch (opcode & MASK_ROUNDING_PRECISION) { case ROUND_SINGLE: { fpa11->fType[getFn(opcode)] = typeSingle; fpa11->fpreg[getFn(opcode)].fSingle = int32_to_float32(&roundData, readRegister(getRd(opcode))); } break; case ROUND_DOUBLE: { fpa11->fType[getFn(opcode)] = typeDouble; fpa11->fpreg[getFn(opcode)].fDouble = int32_to_float64(readRegister(getRd(opcode))); } break; #ifdef CONFIG_FPE_NWFPE_XP case ROUND_EXTENDED: { fpa11->fType[getFn(opcode)] = typeExtended; fpa11->fpreg[getFn(opcode)].fExtended = int32_to_floatx80(readRegister(getRd(opcode))); } break; #endif default: return 0; } if (roundData.exception) float_raise(roundData.exception); return 1; } unsigned int PerformFIX(const unsigned int opcode) { FPA11 *fpa11 = GET_FPA11(); unsigned int Fn = getFm(opcode); struct roundingData roundData; roundData.mode = SetRoundingMode(opcode); roundData.precision = SetRoundingPrecision(opcode); roundData.exception = 0; switch (fpa11->fType[Fn]) { case typeSingle: { writeRegister(getRd(opcode), float32_to_int32(&roundData, fpa11->fpreg[Fn].fSingle)); } break; case typeDouble: { writeRegister(getRd(opcode), float64_to_int32(&roundData, fpa11->fpreg[Fn].fDouble)); } break; #ifdef CONFIG_FPE_NWFPE_XP case typeExtended: { writeRegister(getRd(opcode), floatx80_to_int32(&roundData, fpa11->fpreg[Fn].fExtended)); } break; #endif default: return 0; } if (roundData.exception) float_raise(roundData.exception); return 1; } /* This instruction sets the flags N, Z, C, V in the FPSR. */ static unsigned int PerformComparison(const unsigned int opcode) { FPA11 *fpa11 = GET_FPA11(); unsigned int Fn = getFn(opcode), Fm = getFm(opcode); int e_flag = opcode & 0x400000; /* 1 if CxFE */ int n_flag = opcode & 0x200000; /* 1 if CNxx */ unsigned int flags = 0; #ifdef CONFIG_FPE_NWFPE_XP floatx80 rFn, rFm; /* Check for unordered condition and convert all operands to 80-bit format. ?? Might be some mileage in avoiding this conversion if possible. Eg, if both operands are 32-bit, detect this and do a 32-bit comparison (cheaper than an 80-bit one). */ switch (fpa11->fType[Fn]) { case typeSingle: //printk("single.\n"); if (float32_is_nan(fpa11->fpreg[Fn].fSingle)) goto unordered; rFn = float32_to_floatx80(fpa11->fpreg[Fn].fSingle); break; case typeDouble: //printk("double.\n"); if (float64_is_nan(fpa11->fpreg[Fn].fDouble)) goto unordered; rFn = float64_to_floatx80(fpa11->fpreg[Fn].fDouble); break; case typeExtended: //printk("extended.\n"); if (floatx80_is_nan(fpa11->fpreg[Fn].fExtended)) goto unordered; rFn = fpa11->fpreg[Fn].fExtended; break; default: return 0; } if (CONSTANT_FM(opcode)) { //printk("Fm is a constant: #%d.\n",Fm); rFm = getExtendedConstant(Fm); if (floatx80_is_nan(rFm)) goto unordered; } else { //printk("Fm = r%d which contains a ",Fm); switch (fpa11->fType[Fm]) { case typeSingle: //printk("single.\n"); if (float32_is_nan(fpa11->fpreg[Fm].fSingle)) goto unordered; rFm = float32_to_floatx80(fpa11->fpreg[Fm].fSingle); break; case typeDouble: //printk("double.\n"); if (float64_is_nan(fpa11->fpreg[Fm].fDouble)) goto unordered; rFm = float64_to_floatx80(fpa11->fpreg[Fm].fDouble); break; case typeExtended: //printk("extended.\n"); if (floatx80_is_nan(fpa11->fpreg[Fm].fExtended)) goto unordered; rFm = fpa11->fpreg[Fm].fExtended; break; default: return 0; } } if (n_flag) rFm.high ^= 0x8000; /* test for less than condition */ if (floatx80_lt(rFn, rFm)) flags |= CC_NEGATIVE; /* test for equal condition */ if (floatx80_eq(rFn, rFm)) flags |= CC_ZERO; /* test for greater than or equal condition */ if (floatx80_lt(rFm, rFn)) flags |= CC_CARRY; #else if (CONSTANT_FM(opcode)) { /* Fm is a constant. Do the comparison in whatever precision Fn happens to be stored in. */ if (fpa11->fType[Fn] == typeSingle) { float32 rFm = getSingleConstant(Fm); float32 rFn = fpa11->fpreg[Fn].fSingle; if (float32_is_nan(rFn)) goto unordered; if (n_flag) rFm ^= 0x80000000; /* test for less than condition */ if (float32_lt_nocheck(rFn, rFm)) flags |= CC_NEGATIVE; /* test for equal condition */ if (float32_eq_nocheck(rFn, rFm)) flags |= CC_ZERO; /* test for greater than or equal condition */ if (float32_lt_nocheck(rFm, rFn)) flags |= CC_CARRY; } else { float64 rFm = getDoubleConstant(Fm); float64 rFn = fpa11->fpreg[Fn].fDouble; if (float64_is_nan(rFn)) goto unordered; if (n_flag) rFm ^= 0x8000000000000000ULL; /* test for less than condition */ if (float64_lt_nocheck(rFn, rFm)) flags |= CC_NEGATIVE; /* test for equal condition */ if (float64_eq_nocheck(rFn, rFm)) flags |= CC_ZERO; /* test for greater than or equal condition */ if (float64_lt_nocheck(rFm, rFn)) flags |= CC_CARRY; } } else { /* Both operands are in registers. */ if (fpa11->fType[Fn] == typeSingle && fpa11->fType[Fm] == typeSingle) { float32 rFm = fpa11->fpreg[Fm].fSingle; float32 rFn = fpa11->fpreg[Fn].fSingle; if (float32_is_nan(rFn) || float32_is_nan(rFm)) goto unordered; if (n_flag) rFm ^= 0x80000000; /* test for less than condition */ if (float32_lt_nocheck(rFn, rFm)) flags |= CC_NEGATIVE; /* test for equal condition */ if (float32_eq_nocheck(rFn, rFm)) flags |= CC_ZERO; /* test for greater than or equal condition */ if (float32_lt_nocheck(rFm, rFn)) flags |= CC_CARRY; } else { /* Promote 32-bit operand to 64 bits. */ float64 rFm, rFn; rFm = (fpa11->fType[Fm] == typeSingle) ? float32_to_float64(fpa11->fpreg[Fm].fSingle) : fpa11->fpreg[Fm].fDouble; rFn = (fpa11->fType[Fn] == typeSingle) ? float32_to_float64(fpa11->fpreg[Fn].fSingle) : fpa11->fpreg[Fn].fDouble; if (float64_is_nan(rFn) || float64_is_nan(rFm)) goto unordered; if (n_flag) rFm ^= 0x8000000000000000ULL; /* test for less than condition */ if (float64_lt_nocheck(rFn, rFm)) flags |= CC_NEGATIVE; /* test for equal condition */ if (float64_eq_nocheck(rFn, rFm)) flags |= CC_ZERO; /* test for greater than or equal condition */ if (float64_lt_nocheck(rFm, rFn)) flags |= CC_CARRY; } } #endif writeConditionCodes(flags); return 1; unordered: /* ?? The FPA data sheet is pretty vague about this, in particular about whether the non-E comparisons can ever raise exceptions. This implementation is based on a combination of what it says in the data sheet, observation of how the Acorn emulator actually behaves (and how programs expect it to) and guesswork. */ flags |= CC_OVERFLOW; flags &= ~(CC_ZERO | CC_NEGATIVE); if (BIT_AC & readFPSR()) flags |= CC_CARRY; if (e_flag) float_raise(float_flag_invalid); writeConditionCodes(flags); return 1; }
linux-master
arch/arm/nwfpe/fpa11_cprt.c
// SPDX-License-Identifier: GPL-2.0-or-later /* NetWinder Floating Point Emulator (c) Rebel.COM, 1998,1999 Direct questions, comments to Scott Bambrough <[email protected]> */ #include "fpa11.h" #include "softfloat.h" #include "fpopcode.h" union float64_components { float64 f64; unsigned int i[2]; }; float64 float64_exp(float64 Fm); float64 float64_ln(float64 Fm); float64 float64_sin(float64 rFm); float64 float64_cos(float64 rFm); float64 float64_arcsin(float64 rFm); float64 float64_arctan(float64 rFm); float64 float64_log(float64 rFm); float64 float64_tan(float64 rFm); float64 float64_arccos(float64 rFm); float64 float64_pow(float64 rFn, float64 rFm); float64 float64_pol(float64 rFn, float64 rFm); static float64 float64_rsf(struct roundingData *roundData, float64 rFn, float64 rFm) { return float64_sub(roundData, rFm, rFn); } static float64 float64_rdv(struct roundingData *roundData, float64 rFn, float64 rFm) { return float64_div(roundData, rFm, rFn); } static float64 (*const dyadic_double[16])(struct roundingData*, float64 rFn, float64 rFm) = { [ADF_CODE >> 20] = float64_add, [MUF_CODE >> 20] = float64_mul, [SUF_CODE >> 20] = float64_sub, [RSF_CODE >> 20] = float64_rsf, [DVF_CODE >> 20] = float64_div, [RDF_CODE >> 20] = float64_rdv, [RMF_CODE >> 20] = float64_rem, /* strictly, these opcodes should not be implemented */ [FML_CODE >> 20] = float64_mul, [FDV_CODE >> 20] = float64_div, [FRD_CODE >> 20] = float64_rdv, }; static float64 float64_mvf(struct roundingData *roundData,float64 rFm) { return rFm; } static float64 float64_mnf(struct roundingData *roundData,float64 rFm) { union float64_components u; u.f64 = rFm; #ifdef __ARMEB__ u.i[0] ^= 0x80000000; #else u.i[1] ^= 0x80000000; #endif return u.f64; } static float64 float64_abs(struct roundingData *roundData,float64 rFm) { union float64_components u; u.f64 = rFm; #ifdef __ARMEB__ u.i[0] &= 0x7fffffff; #else u.i[1] &= 0x7fffffff; #endif return u.f64; } static float64 (*const monadic_double[16])(struct roundingData *, float64 rFm) = { [MVF_CODE >> 20] = float64_mvf, [MNF_CODE >> 20] = float64_mnf, [ABS_CODE >> 20] = float64_abs, [RND_CODE >> 20] = float64_round_to_int, [URD_CODE >> 20] = float64_round_to_int, [SQT_CODE >> 20] = float64_sqrt, [NRM_CODE >> 20] = float64_mvf, }; unsigned int DoubleCPDO(struct roundingData *roundData, const unsigned int opcode, FPREG * rFd) { FPA11 *fpa11 = GET_FPA11(); float64 rFm; unsigned int Fm, opc_mask_shift; Fm = getFm(opcode); if (CONSTANT_FM(opcode)) { rFm = getDoubleConstant(Fm); } else { switch (fpa11->fType[Fm]) { case typeSingle: rFm = float32_to_float64(fpa11->fpreg[Fm].fSingle); break; case typeDouble: rFm = fpa11->fpreg[Fm].fDouble; break; default: return 0; } } opc_mask_shift = (opcode & MASK_ARITHMETIC_OPCODE) >> 20; if (!MONADIC_INSTRUCTION(opcode)) { unsigned int Fn = getFn(opcode); float64 rFn; switch (fpa11->fType[Fn]) { case typeSingle: rFn = float32_to_float64(fpa11->fpreg[Fn].fSingle); break; case typeDouble: rFn = fpa11->fpreg[Fn].fDouble; break; default: return 0; } if (dyadic_double[opc_mask_shift]) { rFd->fDouble = dyadic_double[opc_mask_shift](roundData, rFn, rFm); } else { return 0; } } else { if (monadic_double[opc_mask_shift]) { rFd->fDouble = monadic_double[opc_mask_shift](roundData, rFm); } else { return 0; } } return 1; }
linux-master
arch/arm/nwfpe/double_cpdo.c
/* =============================================================================== This C source file is part of the SoftFloat IEC/IEEE Floating-point Arithmetic Package, Release 2. Written by John R. Hauser. This work was made possible in part by the International Computer Science Institute, located at Suite 600, 1947 Center Street, Berkeley, California 94704. Funding was partially provided by the National Science Foundation under grant MIP-9311980. The original version of this code was written as part of a project to build a fixed-point vector processor in collaboration with the University of California at Berkeley, overseen by Profs. Nelson Morgan and John Wawrzynek. More information is available through the web page http://www.jhauser.us/arithmetic/SoftFloat-2b/SoftFloat-source.txt THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort has been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT TIMES RESULT IN INCORRECT BEHAVIOR. USE OF THIS SOFTWARE IS RESTRICTED TO PERSONS AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ANY AND ALL LOSSES, COSTS, OR OTHER PROBLEMS ARISING FROM ITS USE. Derivative works are acceptable, even for commercial purposes, so long as (1) they include prominent notice that the work is derivative, and (2) they include prominent notice akin to these three paragraphs for those parts of this code that are retained. =============================================================================== */ #include <asm/div64.h> #include "fpa11.h" //#include "milieu.h" //#include "softfloat.h" /* ------------------------------------------------------------------------------- Primitive arithmetic functions, including multi-word arithmetic, and division and square root approximations. (Can be specialized to target if desired.) ------------------------------------------------------------------------------- */ #include "softfloat-macros" /* ------------------------------------------------------------------------------- Functions and definitions to determine: (1) whether tininess for underflow is detected before or after rounding by default, (2) what (if anything) happens when exceptions are raised, (3) how signaling NaNs are distinguished from quiet NaNs, (4) the default generated quiet NaNs, and (5) how NaNs are propagated from function inputs to output. These details are target- specific. ------------------------------------------------------------------------------- */ #include "softfloat-specialize" /* ------------------------------------------------------------------------------- Takes a 64-bit fixed-point value `absZ' with binary point between bits 6 and 7, and returns the properly rounded 32-bit integer corresponding to the input. If `zSign' is nonzero, the input is negated before being converted to an integer. Bit 63 of `absZ' must be zero. Ordinarily, the fixed-point input is simply rounded to an integer, with the inexact exception raised if the input cannot be represented exactly as an integer. If the fixed-point input is too large, however, the invalid exception is raised and the largest positive or negative integer is returned. ------------------------------------------------------------------------------- */ static int32 roundAndPackInt32( struct roundingData *roundData, flag zSign, bits64 absZ ) { int8 roundingMode; flag roundNearestEven; int8 roundIncrement, roundBits; int32 z; roundingMode = roundData->mode; roundNearestEven = ( roundingMode == float_round_nearest_even ); roundIncrement = 0x40; if ( ! roundNearestEven ) { if ( roundingMode == float_round_to_zero ) { roundIncrement = 0; } else { roundIncrement = 0x7F; if ( zSign ) { if ( roundingMode == float_round_up ) roundIncrement = 0; } else { if ( roundingMode == float_round_down ) roundIncrement = 0; } } } roundBits = absZ & 0x7F; absZ = ( absZ + roundIncrement )>>7; absZ &= ~ ( ( ( roundBits ^ 0x40 ) == 0 ) & roundNearestEven ); z = absZ; if ( zSign ) z = - z; if ( ( absZ>>32 ) || ( z && ( ( z < 0 ) ^ zSign ) ) ) { roundData->exception |= float_flag_invalid; return zSign ? 0x80000000 : 0x7FFFFFFF; } if ( roundBits ) roundData->exception |= float_flag_inexact; return z; } /* ------------------------------------------------------------------------------- Returns the fraction bits of the single-precision floating-point value `a'. ------------------------------------------------------------------------------- */ INLINE bits32 extractFloat32Frac( float32 a ) { return a & 0x007FFFFF; } /* ------------------------------------------------------------------------------- Returns the exponent bits of the single-precision floating-point value `a'. ------------------------------------------------------------------------------- */ INLINE int16 extractFloat32Exp( float32 a ) { return ( a>>23 ) & 0xFF; } /* ------------------------------------------------------------------------------- Returns the sign bit of the single-precision floating-point value `a'. ------------------------------------------------------------------------------- */ #if 0 /* in softfloat.h */ INLINE flag extractFloat32Sign( float32 a ) { return a>>31; } #endif /* ------------------------------------------------------------------------------- Normalizes the subnormal single-precision floating-point value represented by the denormalized significand `aSig'. The normalized exponent and significand are stored at the locations pointed to by `zExpPtr' and `zSigPtr', respectively. ------------------------------------------------------------------------------- */ static void normalizeFloat32Subnormal( bits32 aSig, int16 *zExpPtr, bits32 *zSigPtr ) { int8 shiftCount; shiftCount = countLeadingZeros32( aSig ) - 8; *zSigPtr = aSig<<shiftCount; *zExpPtr = 1 - shiftCount; } /* ------------------------------------------------------------------------------- Packs the sign `zSign', exponent `zExp', and significand `zSig' into a single-precision floating-point value, returning the result. After being shifted into the proper positions, the three fields are simply added together to form the result. This means that any integer portion of `zSig' will be added into the exponent. Since a properly normalized significand will have an integer portion equal to 1, the `zExp' input should be 1 less than the desired result exponent whenever `zSig' is a complete, normalized significand. ------------------------------------------------------------------------------- */ INLINE float32 packFloat32( flag zSign, int16 zExp, bits32 zSig ) { #if 0 float32 f; __asm__("@ packFloat32 \n\ mov %0, %1, asl #31 \n\ orr %0, %2, asl #23 \n\ orr %0, %3" : /* no outputs */ : "g" (f), "g" (zSign), "g" (zExp), "g" (zSig) : "cc"); return f; #else return ( ( (bits32) zSign )<<31 ) + ( ( (bits32) zExp )<<23 ) + zSig; #endif } /* ------------------------------------------------------------------------------- Takes an abstract floating-point value having sign `zSign', exponent `zExp', and significand `zSig', and returns the proper single-precision floating- point value corresponding to the abstract input. Ordinarily, the abstract value is simply rounded and packed into the single-precision format, with the inexact exception raised if the abstract input cannot be represented exactly. If the abstract value is too large, however, the overflow and inexact exceptions are raised and an infinity or maximal finite value is returned. If the abstract value is too small, the input value is rounded to a subnormal number, and the underflow and inexact exceptions are raised if the abstract input cannot be represented exactly as a subnormal single- precision floating-point number. The input significand `zSig' has its binary point between bits 30 and 29, which is 7 bits to the left of the usual location. This shifted significand must be normalized or smaller. If `zSig' is not normalized, `zExp' must be 0; in that case, the result returned is a subnormal number, and it must not require rounding. In the usual case that `zSig' is normalized, `zExp' must be 1 less than the ``true'' floating-point exponent. The handling of underflow and overflow follows the IEC/IEEE Standard for Binary Floating-point Arithmetic. ------------------------------------------------------------------------------- */ static float32 roundAndPackFloat32( struct roundingData *roundData, flag zSign, int16 zExp, bits32 zSig ) { int8 roundingMode; flag roundNearestEven; int8 roundIncrement, roundBits; flag isTiny; roundingMode = roundData->mode; roundNearestEven = ( roundingMode == float_round_nearest_even ); roundIncrement = 0x40; if ( ! roundNearestEven ) { if ( roundingMode == float_round_to_zero ) { roundIncrement = 0; } else { roundIncrement = 0x7F; if ( zSign ) { if ( roundingMode == float_round_up ) roundIncrement = 0; } else { if ( roundingMode == float_round_down ) roundIncrement = 0; } } } roundBits = zSig & 0x7F; if ( 0xFD <= (bits16) zExp ) { if ( ( 0xFD < zExp ) || ( ( zExp == 0xFD ) && ( (sbits32) ( zSig + roundIncrement ) < 0 ) ) ) { roundData->exception |= float_flag_overflow | float_flag_inexact; return packFloat32( zSign, 0xFF, 0 ) - ( roundIncrement == 0 ); } if ( zExp < 0 ) { isTiny = ( float_detect_tininess == float_tininess_before_rounding ) || ( zExp < -1 ) || ( zSig + roundIncrement < 0x80000000 ); shift32RightJamming( zSig, - zExp, &zSig ); zExp = 0; roundBits = zSig & 0x7F; if ( isTiny && roundBits ) roundData->exception |= float_flag_underflow; } } if ( roundBits ) roundData->exception |= float_flag_inexact; zSig = ( zSig + roundIncrement )>>7; zSig &= ~ ( ( ( roundBits ^ 0x40 ) == 0 ) & roundNearestEven ); if ( zSig == 0 ) zExp = 0; return packFloat32( zSign, zExp, zSig ); } /* ------------------------------------------------------------------------------- Takes an abstract floating-point value having sign `zSign', exponent `zExp', and significand `zSig', and returns the proper single-precision floating- point value corresponding to the abstract input. This routine is just like `roundAndPackFloat32' except that `zSig' does not have to be normalized in any way. In all cases, `zExp' must be 1 less than the ``true'' floating- point exponent. ------------------------------------------------------------------------------- */ static float32 normalizeRoundAndPackFloat32( struct roundingData *roundData, flag zSign, int16 zExp, bits32 zSig ) { int8 shiftCount; shiftCount = countLeadingZeros32( zSig ) - 1; return roundAndPackFloat32( roundData, zSign, zExp - shiftCount, zSig<<shiftCount ); } /* ------------------------------------------------------------------------------- Returns the fraction bits of the double-precision floating-point value `a'. ------------------------------------------------------------------------------- */ INLINE bits64 extractFloat64Frac( float64 a ) { return a & LIT64( 0x000FFFFFFFFFFFFF ); } /* ------------------------------------------------------------------------------- Returns the exponent bits of the double-precision floating-point value `a'. ------------------------------------------------------------------------------- */ INLINE int16 extractFloat64Exp( float64 a ) { return ( a>>52 ) & 0x7FF; } /* ------------------------------------------------------------------------------- Returns the sign bit of the double-precision floating-point value `a'. ------------------------------------------------------------------------------- */ #if 0 /* in softfloat.h */ INLINE flag extractFloat64Sign( float64 a ) { return a>>63; } #endif /* ------------------------------------------------------------------------------- Normalizes the subnormal double-precision floating-point value represented by the denormalized significand `aSig'. The normalized exponent and significand are stored at the locations pointed to by `zExpPtr' and `zSigPtr', respectively. ------------------------------------------------------------------------------- */ static void normalizeFloat64Subnormal( bits64 aSig, int16 *zExpPtr, bits64 *zSigPtr ) { int8 shiftCount; shiftCount = countLeadingZeros64( aSig ) - 11; *zSigPtr = aSig<<shiftCount; *zExpPtr = 1 - shiftCount; } /* ------------------------------------------------------------------------------- Packs the sign `zSign', exponent `zExp', and significand `zSig' into a double-precision floating-point value, returning the result. After being shifted into the proper positions, the three fields are simply added together to form the result. This means that any integer portion of `zSig' will be added into the exponent. Since a properly normalized significand will have an integer portion equal to 1, the `zExp' input should be 1 less than the desired result exponent whenever `zSig' is a complete, normalized significand. ------------------------------------------------------------------------------- */ INLINE float64 packFloat64( flag zSign, int16 zExp, bits64 zSig ) { return ( ( (bits64) zSign )<<63 ) + ( ( (bits64) zExp )<<52 ) + zSig; } /* ------------------------------------------------------------------------------- Takes an abstract floating-point value having sign `zSign', exponent `zExp', and significand `zSig', and returns the proper double-precision floating- point value corresponding to the abstract input. Ordinarily, the abstract value is simply rounded and packed into the double-precision format, with the inexact exception raised if the abstract input cannot be represented exactly. If the abstract value is too large, however, the overflow and inexact exceptions are raised and an infinity or maximal finite value is returned. If the abstract value is too small, the input value is rounded to a subnormal number, and the underflow and inexact exceptions are raised if the abstract input cannot be represented exactly as a subnormal double- precision floating-point number. The input significand `zSig' has its binary point between bits 62 and 61, which is 10 bits to the left of the usual location. This shifted significand must be normalized or smaller. If `zSig' is not normalized, `zExp' must be 0; in that case, the result returned is a subnormal number, and it must not require rounding. In the usual case that `zSig' is normalized, `zExp' must be 1 less than the ``true'' floating-point exponent. The handling of underflow and overflow follows the IEC/IEEE Standard for Binary Floating-point Arithmetic. ------------------------------------------------------------------------------- */ static float64 roundAndPackFloat64( struct roundingData *roundData, flag zSign, int16 zExp, bits64 zSig ) { int8 roundingMode; flag roundNearestEven; int16 roundIncrement, roundBits; flag isTiny; roundingMode = roundData->mode; roundNearestEven = ( roundingMode == float_round_nearest_even ); roundIncrement = 0x200; if ( ! roundNearestEven ) { if ( roundingMode == float_round_to_zero ) { roundIncrement = 0; } else { roundIncrement = 0x3FF; if ( zSign ) { if ( roundingMode == float_round_up ) roundIncrement = 0; } else { if ( roundingMode == float_round_down ) roundIncrement = 0; } } } roundBits = zSig & 0x3FF; if ( 0x7FD <= (bits16) zExp ) { if ( ( 0x7FD < zExp ) || ( ( zExp == 0x7FD ) && ( (sbits64) ( zSig + roundIncrement ) < 0 ) ) ) { //register int lr = __builtin_return_address(0); //printk("roundAndPackFloat64 called from 0x%08x\n",lr); roundData->exception |= float_flag_overflow | float_flag_inexact; return packFloat64( zSign, 0x7FF, 0 ) - ( roundIncrement == 0 ); } if ( zExp < 0 ) { isTiny = ( float_detect_tininess == float_tininess_before_rounding ) || ( zExp < -1 ) || ( zSig + roundIncrement < LIT64( 0x8000000000000000 ) ); shift64RightJamming( zSig, - zExp, &zSig ); zExp = 0; roundBits = zSig & 0x3FF; if ( isTiny && roundBits ) roundData->exception |= float_flag_underflow; } } if ( roundBits ) roundData->exception |= float_flag_inexact; zSig = ( zSig + roundIncrement )>>10; zSig &= ~ ( ( ( roundBits ^ 0x200 ) == 0 ) & roundNearestEven ); if ( zSig == 0 ) zExp = 0; return packFloat64( zSign, zExp, zSig ); } /* ------------------------------------------------------------------------------- Takes an abstract floating-point value having sign `zSign', exponent `zExp', and significand `zSig', and returns the proper double-precision floating- point value corresponding to the abstract input. This routine is just like `roundAndPackFloat64' except that `zSig' does not have to be normalized in any way. In all cases, `zExp' must be 1 less than the ``true'' floating- point exponent. ------------------------------------------------------------------------------- */ static float64 normalizeRoundAndPackFloat64( struct roundingData *roundData, flag zSign, int16 zExp, bits64 zSig ) { int8 shiftCount; shiftCount = countLeadingZeros64( zSig ) - 1; return roundAndPackFloat64( roundData, zSign, zExp - shiftCount, zSig<<shiftCount ); } #ifdef FLOATX80 /* ------------------------------------------------------------------------------- Returns the fraction bits of the extended double-precision floating-point value `a'. ------------------------------------------------------------------------------- */ INLINE bits64 extractFloatx80Frac( floatx80 a ) { return a.low; } /* ------------------------------------------------------------------------------- Returns the exponent bits of the extended double-precision floating-point value `a'. ------------------------------------------------------------------------------- */ INLINE int32 extractFloatx80Exp( floatx80 a ) { return a.high & 0x7FFF; } /* ------------------------------------------------------------------------------- Returns the sign bit of the extended double-precision floating-point value `a'. ------------------------------------------------------------------------------- */ INLINE flag extractFloatx80Sign( floatx80 a ) { return a.high>>15; } /* ------------------------------------------------------------------------------- Normalizes the subnormal extended double-precision floating-point value represented by the denormalized significand `aSig'. The normalized exponent and significand are stored at the locations pointed to by `zExpPtr' and `zSigPtr', respectively. ------------------------------------------------------------------------------- */ static void normalizeFloatx80Subnormal( bits64 aSig, int32 *zExpPtr, bits64 *zSigPtr ) { int8 shiftCount; shiftCount = countLeadingZeros64( aSig ); *zSigPtr = aSig<<shiftCount; *zExpPtr = 1 - shiftCount; } /* ------------------------------------------------------------------------------- Packs the sign `zSign', exponent `zExp', and significand `zSig' into an extended double-precision floating-point value, returning the result. ------------------------------------------------------------------------------- */ INLINE floatx80 packFloatx80( flag zSign, int32 zExp, bits64 zSig ) { floatx80 z; z.low = zSig; z.high = ( ( (bits16) zSign )<<15 ) + zExp; z.__padding = 0; return z; } /* ------------------------------------------------------------------------------- Takes an abstract floating-point value having sign `zSign', exponent `zExp', and extended significand formed by the concatenation of `zSig0' and `zSig1', and returns the proper extended double-precision floating-point value corresponding to the abstract input. Ordinarily, the abstract value is rounded and packed into the extended double-precision format, with the inexact exception raised if the abstract input cannot be represented exactly. If the abstract value is too large, however, the overflow and inexact exceptions are raised and an infinity or maximal finite value is returned. If the abstract value is too small, the input value is rounded to a subnormal number, and the underflow and inexact exceptions are raised if the abstract input cannot be represented exactly as a subnormal extended double-precision floating-point number. If `roundingPrecision' is 32 or 64, the result is rounded to the same number of bits as single or double precision, respectively. Otherwise, the result is rounded to the full precision of the extended double-precision format. The input significand must be normalized or smaller. If the input significand is not normalized, `zExp' must be 0; in that case, the result returned is a subnormal number, and it must not require rounding. The handling of underflow and overflow follows the IEC/IEEE Standard for Binary Floating-point Arithmetic. ------------------------------------------------------------------------------- */ static floatx80 roundAndPackFloatx80( struct roundingData *roundData, flag zSign, int32 zExp, bits64 zSig0, bits64 zSig1 ) { int8 roundingMode, roundingPrecision; flag roundNearestEven, increment, isTiny; int64 roundIncrement, roundMask, roundBits; roundingMode = roundData->mode; roundingPrecision = roundData->precision; roundNearestEven = ( roundingMode == float_round_nearest_even ); if ( roundingPrecision == 80 ) goto precision80; if ( roundingPrecision == 64 ) { roundIncrement = LIT64( 0x0000000000000400 ); roundMask = LIT64( 0x00000000000007FF ); } else if ( roundingPrecision == 32 ) { roundIncrement = LIT64( 0x0000008000000000 ); roundMask = LIT64( 0x000000FFFFFFFFFF ); } else { goto precision80; } zSig0 |= ( zSig1 != 0 ); if ( ! roundNearestEven ) { if ( roundingMode == float_round_to_zero ) { roundIncrement = 0; } else { roundIncrement = roundMask; if ( zSign ) { if ( roundingMode == float_round_up ) roundIncrement = 0; } else { if ( roundingMode == float_round_down ) roundIncrement = 0; } } } roundBits = zSig0 & roundMask; if ( 0x7FFD <= (bits32) ( zExp - 1 ) ) { if ( ( 0x7FFE < zExp ) || ( ( zExp == 0x7FFE ) && ( zSig0 + roundIncrement < zSig0 ) ) ) { goto overflow; } if ( zExp <= 0 ) { isTiny = ( float_detect_tininess == float_tininess_before_rounding ) || ( zExp < 0 ) || ( zSig0 <= zSig0 + roundIncrement ); shift64RightJamming( zSig0, 1 - zExp, &zSig0 ); zExp = 0; roundBits = zSig0 & roundMask; if ( isTiny && roundBits ) roundData->exception |= float_flag_underflow; if ( roundBits ) roundData->exception |= float_flag_inexact; zSig0 += roundIncrement; if ( (sbits64) zSig0 < 0 ) zExp = 1; roundIncrement = roundMask + 1; if ( roundNearestEven && ( roundBits<<1 == roundIncrement ) ) { roundMask |= roundIncrement; } zSig0 &= ~ roundMask; return packFloatx80( zSign, zExp, zSig0 ); } } if ( roundBits ) roundData->exception |= float_flag_inexact; zSig0 += roundIncrement; if ( zSig0 < roundIncrement ) { ++zExp; zSig0 = LIT64( 0x8000000000000000 ); } roundIncrement = roundMask + 1; if ( roundNearestEven && ( roundBits<<1 == roundIncrement ) ) { roundMask |= roundIncrement; } zSig0 &= ~ roundMask; if ( zSig0 == 0 ) zExp = 0; return packFloatx80( zSign, zExp, zSig0 ); precision80: increment = ( (sbits64) zSig1 < 0 ); if ( ! roundNearestEven ) { if ( roundingMode == float_round_to_zero ) { increment = 0; } else { if ( zSign ) { increment = ( roundingMode == float_round_down ) && zSig1; } else { increment = ( roundingMode == float_round_up ) && zSig1; } } } if ( 0x7FFD <= (bits32) ( zExp - 1 ) ) { if ( ( 0x7FFE < zExp ) || ( ( zExp == 0x7FFE ) && ( zSig0 == LIT64( 0xFFFFFFFFFFFFFFFF ) ) && increment ) ) { roundMask = 0; overflow: roundData->exception |= float_flag_overflow | float_flag_inexact; if ( ( roundingMode == float_round_to_zero ) || ( zSign && ( roundingMode == float_round_up ) ) || ( ! zSign && ( roundingMode == float_round_down ) ) ) { return packFloatx80( zSign, 0x7FFE, ~ roundMask ); } return packFloatx80( zSign, 0x7FFF, LIT64( 0x8000000000000000 ) ); } if ( zExp <= 0 ) { isTiny = ( float_detect_tininess == float_tininess_before_rounding ) || ( zExp < 0 ) || ! increment || ( zSig0 < LIT64( 0xFFFFFFFFFFFFFFFF ) ); shift64ExtraRightJamming( zSig0, zSig1, 1 - zExp, &zSig0, &zSig1 ); zExp = 0; if ( isTiny && zSig1 ) roundData->exception |= float_flag_underflow; if ( zSig1 ) roundData->exception |= float_flag_inexact; if ( roundNearestEven ) { increment = ( (sbits64) zSig1 < 0 ); } else { if ( zSign ) { increment = ( roundingMode == float_round_down ) && zSig1; } else { increment = ( roundingMode == float_round_up ) && zSig1; } } if ( increment ) { ++zSig0; zSig0 &= ~ ( ( zSig1 + zSig1 == 0 ) & roundNearestEven ); if ( (sbits64) zSig0 < 0 ) zExp = 1; } return packFloatx80( zSign, zExp, zSig0 ); } } if ( zSig1 ) roundData->exception |= float_flag_inexact; if ( increment ) { ++zSig0; if ( zSig0 == 0 ) { ++zExp; zSig0 = LIT64( 0x8000000000000000 ); } else { zSig0 &= ~ ( ( zSig1 + zSig1 == 0 ) & roundNearestEven ); } } else { if ( zSig0 == 0 ) zExp = 0; } return packFloatx80( zSign, zExp, zSig0 ); } /* ------------------------------------------------------------------------------- Takes an abstract floating-point value having sign `zSign', exponent `zExp', and significand formed by the concatenation of `zSig0' and `zSig1', and returns the proper extended double-precision floating-point value corresponding to the abstract input. This routine is just like `roundAndPackFloatx80' except that the input significand does not have to be normalized. ------------------------------------------------------------------------------- */ static floatx80 normalizeRoundAndPackFloatx80( struct roundingData *roundData, flag zSign, int32 zExp, bits64 zSig0, bits64 zSig1 ) { int8 shiftCount; if ( zSig0 == 0 ) { zSig0 = zSig1; zSig1 = 0; zExp -= 64; } shiftCount = countLeadingZeros64( zSig0 ); shortShift128Left( zSig0, zSig1, shiftCount, &zSig0, &zSig1 ); zExp -= shiftCount; return roundAndPackFloatx80( roundData, zSign, zExp, zSig0, zSig1 ); } #endif /* ------------------------------------------------------------------------------- Returns the result of converting the 32-bit two's complement integer `a' to the single-precision floating-point format. The conversion is performed according to the IEC/IEEE Standard for Binary Floating-point Arithmetic. ------------------------------------------------------------------------------- */ float32 int32_to_float32(struct roundingData *roundData, int32 a) { flag zSign; if ( a == 0 ) return 0; if ( a == 0x80000000 ) return packFloat32( 1, 0x9E, 0 ); zSign = ( a < 0 ); return normalizeRoundAndPackFloat32( roundData, zSign, 0x9C, zSign ? - a : a ); } /* ------------------------------------------------------------------------------- Returns the result of converting the 32-bit two's complement integer `a' to the double-precision floating-point format. The conversion is performed according to the IEC/IEEE Standard for Binary Floating-point Arithmetic. ------------------------------------------------------------------------------- */ float64 int32_to_float64( int32 a ) { flag aSign; uint32 absA; int8 shiftCount; bits64 zSig; if ( a == 0 ) return 0; aSign = ( a < 0 ); absA = aSign ? - a : a; shiftCount = countLeadingZeros32( absA ) + 21; zSig = absA; return packFloat64( aSign, 0x432 - shiftCount, zSig<<shiftCount ); } #ifdef FLOATX80 /* ------------------------------------------------------------------------------- Returns the result of converting the 32-bit two's complement integer `a' to the extended double-precision floating-point format. The conversion is performed according to the IEC/IEEE Standard for Binary Floating-point Arithmetic. ------------------------------------------------------------------------------- */ floatx80 int32_to_floatx80( int32 a ) { flag zSign; uint32 absA; int8 shiftCount; bits64 zSig; if ( a == 0 ) return packFloatx80( 0, 0, 0 ); zSign = ( a < 0 ); absA = zSign ? - a : a; shiftCount = countLeadingZeros32( absA ) + 32; zSig = absA; return packFloatx80( zSign, 0x403E - shiftCount, zSig<<shiftCount ); } #endif /* ------------------------------------------------------------------------------- Returns the result of converting the single-precision floating-point value `a' to the 32-bit two's complement integer format. The conversion is performed according to the IEC/IEEE Standard for Binary Floating-point Arithmetic---which means in particular that the conversion is rounded according to the current rounding mode. If `a' is a NaN, the largest positive integer is returned. Otherwise, if the conversion overflows, the largest integer with the same sign as `a' is returned. ------------------------------------------------------------------------------- */ int32 float32_to_int32( struct roundingData *roundData, float32 a ) { flag aSign; int16 aExp, shiftCount; bits32 aSig; bits64 zSig; aSig = extractFloat32Frac( a ); aExp = extractFloat32Exp( a ); aSign = extractFloat32Sign( a ); if ( ( aExp == 0x7FF ) && aSig ) aSign = 0; if ( aExp ) aSig |= 0x00800000; shiftCount = 0xAF - aExp; zSig = aSig; zSig <<= 32; if ( 0 < shiftCount ) shift64RightJamming( zSig, shiftCount, &zSig ); return roundAndPackInt32( roundData, aSign, zSig ); } /* ------------------------------------------------------------------------------- Returns the result of converting the single-precision floating-point value `a' to the 32-bit two's complement integer format. The conversion is performed according to the IEC/IEEE Standard for Binary Floating-point Arithmetic, except that the conversion is always rounded toward zero. If `a' is a NaN, the largest positive integer is returned. Otherwise, if the conversion overflows, the largest integer with the same sign as `a' is returned. ------------------------------------------------------------------------------- */ int32 float32_to_int32_round_to_zero( float32 a ) { flag aSign; int16 aExp, shiftCount; bits32 aSig; int32 z; aSig = extractFloat32Frac( a ); aExp = extractFloat32Exp( a ); aSign = extractFloat32Sign( a ); shiftCount = aExp - 0x9E; if ( 0 <= shiftCount ) { if ( a == 0xCF000000 ) return 0x80000000; float_raise( float_flag_invalid ); if ( ! aSign || ( ( aExp == 0xFF ) && aSig ) ) return 0x7FFFFFFF; return 0x80000000; } else if ( aExp <= 0x7E ) { if ( aExp | aSig ) float_raise( float_flag_inexact ); return 0; } aSig = ( aSig | 0x00800000 )<<8; z = aSig>>( - shiftCount ); if ( (bits32) ( aSig<<( shiftCount & 31 ) ) ) { float_raise( float_flag_inexact ); } return aSign ? - z : z; } /* ------------------------------------------------------------------------------- Returns the result of converting the single-precision floating-point value `a' to the double-precision floating-point format. The conversion is performed according to the IEC/IEEE Standard for Binary Floating-point Arithmetic. ------------------------------------------------------------------------------- */ float64 float32_to_float64( float32 a ) { flag aSign; int16 aExp; bits32 aSig; aSig = extractFloat32Frac( a ); aExp = extractFloat32Exp( a ); aSign = extractFloat32Sign( a ); if ( aExp == 0xFF ) { if ( aSig ) return commonNaNToFloat64( float32ToCommonNaN( a ) ); return packFloat64( aSign, 0x7FF, 0 ); } if ( aExp == 0 ) { if ( aSig == 0 ) return packFloat64( aSign, 0, 0 ); normalizeFloat32Subnormal( aSig, &aExp, &aSig ); --aExp; } return packFloat64( aSign, aExp + 0x380, ( (bits64) aSig )<<29 ); } #ifdef FLOATX80 /* ------------------------------------------------------------------------------- Returns the result of converting the single-precision floating-point value `a' to the extended double-precision floating-point format. The conversion is performed according to the IEC/IEEE Standard for Binary Floating-point Arithmetic. ------------------------------------------------------------------------------- */ floatx80 float32_to_floatx80( float32 a ) { flag aSign; int16 aExp; bits32 aSig; aSig = extractFloat32Frac( a ); aExp = extractFloat32Exp( a ); aSign = extractFloat32Sign( a ); if ( aExp == 0xFF ) { if ( aSig ) return commonNaNToFloatx80( float32ToCommonNaN( a ) ); return packFloatx80( aSign, 0x7FFF, LIT64( 0x8000000000000000 ) ); } if ( aExp == 0 ) { if ( aSig == 0 ) return packFloatx80( aSign, 0, 0 ); normalizeFloat32Subnormal( aSig, &aExp, &aSig ); } aSig |= 0x00800000; return packFloatx80( aSign, aExp + 0x3F80, ( (bits64) aSig )<<40 ); } #endif /* ------------------------------------------------------------------------------- Rounds the single-precision floating-point value `a' to an integer, and returns the result as a single-precision floating-point value. The operation is performed according to the IEC/IEEE Standard for Binary Floating-point Arithmetic. ------------------------------------------------------------------------------- */ float32 float32_round_to_int( struct roundingData *roundData, float32 a ) { flag aSign; int16 aExp; bits32 lastBitMask, roundBitsMask; int8 roundingMode; float32 z; aExp = extractFloat32Exp( a ); if ( 0x96 <= aExp ) { if ( ( aExp == 0xFF ) && extractFloat32Frac( a ) ) { return propagateFloat32NaN( a, a ); } return a; } roundingMode = roundData->mode; if ( aExp <= 0x7E ) { if ( (bits32) ( a<<1 ) == 0 ) return a; roundData->exception |= float_flag_inexact; aSign = extractFloat32Sign( a ); switch ( roundingMode ) { case float_round_nearest_even: if ( ( aExp == 0x7E ) && extractFloat32Frac( a ) ) { return packFloat32( aSign, 0x7F, 0 ); } break; case float_round_down: return aSign ? 0xBF800000 : 0; case float_round_up: return aSign ? 0x80000000 : 0x3F800000; } return packFloat32( aSign, 0, 0 ); } lastBitMask = 1; lastBitMask <<= 0x96 - aExp; roundBitsMask = lastBitMask - 1; z = a; if ( roundingMode == float_round_nearest_even ) { z += lastBitMask>>1; if ( ( z & roundBitsMask ) == 0 ) z &= ~ lastBitMask; } else if ( roundingMode != float_round_to_zero ) { if ( extractFloat32Sign( z ) ^ ( roundingMode == float_round_up ) ) { z += roundBitsMask; } } z &= ~ roundBitsMask; if ( z != a ) roundData->exception |= float_flag_inexact; return z; } /* ------------------------------------------------------------------------------- Returns the result of adding the absolute values of the single-precision floating-point values `a' and `b'. If `zSign' is true, the sum is negated before being returned. `zSign' is ignored if the result is a NaN. The addition is performed according to the IEC/IEEE Standard for Binary Floating-point Arithmetic. ------------------------------------------------------------------------------- */ static float32 addFloat32Sigs( struct roundingData *roundData, float32 a, float32 b, flag zSign ) { int16 aExp, bExp, zExp; bits32 aSig, bSig, zSig; int16 expDiff; aSig = extractFloat32Frac( a ); aExp = extractFloat32Exp( a ); bSig = extractFloat32Frac( b ); bExp = extractFloat32Exp( b ); expDiff = aExp - bExp; aSig <<= 6; bSig <<= 6; if ( 0 < expDiff ) { if ( aExp == 0xFF ) { if ( aSig ) return propagateFloat32NaN( a, b ); return a; } if ( bExp == 0 ) { --expDiff; } else { bSig |= 0x20000000; } shift32RightJamming( bSig, expDiff, &bSig ); zExp = aExp; } else if ( expDiff < 0 ) { if ( bExp == 0xFF ) { if ( bSig ) return propagateFloat32NaN( a, b ); return packFloat32( zSign, 0xFF, 0 ); } if ( aExp == 0 ) { ++expDiff; } else { aSig |= 0x20000000; } shift32RightJamming( aSig, - expDiff, &aSig ); zExp = bExp; } else { if ( aExp == 0xFF ) { if ( aSig | bSig ) return propagateFloat32NaN( a, b ); return a; } if ( aExp == 0 ) return packFloat32( zSign, 0, ( aSig + bSig )>>6 ); zSig = 0x40000000 + aSig + bSig; zExp = aExp; goto roundAndPack; } aSig |= 0x20000000; zSig = ( aSig + bSig )<<1; --zExp; if ( (sbits32) zSig < 0 ) { zSig = aSig + bSig; ++zExp; } roundAndPack: return roundAndPackFloat32( roundData, zSign, zExp, zSig ); } /* ------------------------------------------------------------------------------- Returns the result of subtracting the absolute values of the single- precision floating-point values `a' and `b'. If `zSign' is true, the difference is negated before being returned. `zSign' is ignored if the result is a NaN. The subtraction is performed according to the IEC/IEEE Standard for Binary Floating-point Arithmetic. ------------------------------------------------------------------------------- */ static float32 subFloat32Sigs( struct roundingData *roundData, float32 a, float32 b, flag zSign ) { int16 aExp, bExp, zExp; bits32 aSig, bSig, zSig; int16 expDiff; aSig = extractFloat32Frac( a ); aExp = extractFloat32Exp( a ); bSig = extractFloat32Frac( b ); bExp = extractFloat32Exp( b ); expDiff = aExp - bExp; aSig <<= 7; bSig <<= 7; if ( 0 < expDiff ) goto aExpBigger; if ( expDiff < 0 ) goto bExpBigger; if ( aExp == 0xFF ) { if ( aSig | bSig ) return propagateFloat32NaN( a, b ); roundData->exception |= float_flag_invalid; return float32_default_nan; } if ( aExp == 0 ) { aExp = 1; bExp = 1; } if ( bSig < aSig ) goto aBigger; if ( aSig < bSig ) goto bBigger; return packFloat32( roundData->mode == float_round_down, 0, 0 ); bExpBigger: if ( bExp == 0xFF ) { if ( bSig ) return propagateFloat32NaN( a, b ); return packFloat32( zSign ^ 1, 0xFF, 0 ); } if ( aExp == 0 ) { ++expDiff; } else { aSig |= 0x40000000; } shift32RightJamming( aSig, - expDiff, &aSig ); bSig |= 0x40000000; bBigger: zSig = bSig - aSig; zExp = bExp; zSign ^= 1; goto normalizeRoundAndPack; aExpBigger: if ( aExp == 0xFF ) { if ( aSig ) return propagateFloat32NaN( a, b ); return a; } if ( bExp == 0 ) { --expDiff; } else { bSig |= 0x40000000; } shift32RightJamming( bSig, expDiff, &bSig ); aSig |= 0x40000000; aBigger: zSig = aSig - bSig; zExp = aExp; normalizeRoundAndPack: --zExp; return normalizeRoundAndPackFloat32( roundData, zSign, zExp, zSig ); } /* ------------------------------------------------------------------------------- Returns the result of adding the single-precision floating-point values `a' and `b'. The operation is performed according to the IEC/IEEE Standard for Binary Floating-point Arithmetic. ------------------------------------------------------------------------------- */ float32 float32_add( struct roundingData *roundData, float32 a, float32 b ) { flag aSign, bSign; aSign = extractFloat32Sign( a ); bSign = extractFloat32Sign( b ); if ( aSign == bSign ) { return addFloat32Sigs( roundData, a, b, aSign ); } else { return subFloat32Sigs( roundData, a, b, aSign ); } } /* ------------------------------------------------------------------------------- Returns the result of subtracting the single-precision floating-point values `a' and `b'. The operation is performed according to the IEC/IEEE Standard for Binary Floating-point Arithmetic. ------------------------------------------------------------------------------- */ float32 float32_sub( struct roundingData *roundData, float32 a, float32 b ) { flag aSign, bSign; aSign = extractFloat32Sign( a ); bSign = extractFloat32Sign( b ); if ( aSign == bSign ) { return subFloat32Sigs( roundData, a, b, aSign ); } else { return addFloat32Sigs( roundData, a, b, aSign ); } } /* ------------------------------------------------------------------------------- Returns the result of multiplying the single-precision floating-point values `a' and `b'. The operation is performed according to the IEC/IEEE Standard for Binary Floating-point Arithmetic. ------------------------------------------------------------------------------- */ float32 float32_mul( struct roundingData *roundData, float32 a, float32 b ) { flag aSign, bSign, zSign; int16 aExp, bExp, zExp; bits32 aSig, bSig; bits64 zSig64; bits32 zSig; aSig = extractFloat32Frac( a ); aExp = extractFloat32Exp( a ); aSign = extractFloat32Sign( a ); bSig = extractFloat32Frac( b ); bExp = extractFloat32Exp( b ); bSign = extractFloat32Sign( b ); zSign = aSign ^ bSign; if ( aExp == 0xFF ) { if ( aSig || ( ( bExp == 0xFF ) && bSig ) ) { return propagateFloat32NaN( a, b ); } if ( ( bExp | bSig ) == 0 ) { roundData->exception |= float_flag_invalid; return float32_default_nan; } return packFloat32( zSign, 0xFF, 0 ); } if ( bExp == 0xFF ) { if ( bSig ) return propagateFloat32NaN( a, b ); if ( ( aExp | aSig ) == 0 ) { roundData->exception |= float_flag_invalid; return float32_default_nan; } return packFloat32( zSign, 0xFF, 0 ); } if ( aExp == 0 ) { if ( aSig == 0 ) return packFloat32( zSign, 0, 0 ); normalizeFloat32Subnormal( aSig, &aExp, &aSig ); } if ( bExp == 0 ) { if ( bSig == 0 ) return packFloat32( zSign, 0, 0 ); normalizeFloat32Subnormal( bSig, &bExp, &bSig ); } zExp = aExp + bExp - 0x7F; aSig = ( aSig | 0x00800000 )<<7; bSig = ( bSig | 0x00800000 )<<8; shift64RightJamming( ( (bits64) aSig ) * bSig, 32, &zSig64 ); zSig = zSig64; if ( 0 <= (sbits32) ( zSig<<1 ) ) { zSig <<= 1; --zExp; } return roundAndPackFloat32( roundData, zSign, zExp, zSig ); } /* ------------------------------------------------------------------------------- Returns the result of dividing the single-precision floating-point value `a' by the corresponding value `b'. The operation is performed according to the IEC/IEEE Standard for Binary Floating-point Arithmetic. ------------------------------------------------------------------------------- */ float32 float32_div( struct roundingData *roundData, float32 a, float32 b ) { flag aSign, bSign, zSign; int16 aExp, bExp, zExp; bits32 aSig, bSig, zSig; aSig = extractFloat32Frac( a ); aExp = extractFloat32Exp( a ); aSign = extractFloat32Sign( a ); bSig = extractFloat32Frac( b ); bExp = extractFloat32Exp( b ); bSign = extractFloat32Sign( b ); zSign = aSign ^ bSign; if ( aExp == 0xFF ) { if ( aSig ) return propagateFloat32NaN( a, b ); if ( bExp == 0xFF ) { if ( bSig ) return propagateFloat32NaN( a, b ); roundData->exception |= float_flag_invalid; return float32_default_nan; } return packFloat32( zSign, 0xFF, 0 ); } if ( bExp == 0xFF ) { if ( bSig ) return propagateFloat32NaN( a, b ); return packFloat32( zSign, 0, 0 ); } if ( bExp == 0 ) { if ( bSig == 0 ) { if ( ( aExp | aSig ) == 0 ) { roundData->exception |= float_flag_invalid; return float32_default_nan; } roundData->exception |= float_flag_divbyzero; return packFloat32( zSign, 0xFF, 0 ); } normalizeFloat32Subnormal( bSig, &bExp, &bSig ); } if ( aExp == 0 ) { if ( aSig == 0 ) return packFloat32( zSign, 0, 0 ); normalizeFloat32Subnormal( aSig, &aExp, &aSig ); } zExp = aExp - bExp + 0x7D; aSig = ( aSig | 0x00800000 )<<7; bSig = ( bSig | 0x00800000 )<<8; if ( bSig <= ( aSig + aSig ) ) { aSig >>= 1; ++zExp; } { bits64 tmp = ( (bits64) aSig )<<32; do_div( tmp, bSig ); zSig = tmp; } if ( ( zSig & 0x3F ) == 0 ) { zSig |= ( ( (bits64) bSig ) * zSig != ( (bits64) aSig )<<32 ); } return roundAndPackFloat32( roundData, zSign, zExp, zSig ); } /* ------------------------------------------------------------------------------- Returns the remainder of the single-precision floating-point value `a' with respect to the corresponding value `b'. The operation is performed according to the IEC/IEEE Standard for Binary Floating-point Arithmetic. ------------------------------------------------------------------------------- */ float32 float32_rem( struct roundingData *roundData, float32 a, float32 b ) { flag aSign, bSign, zSign; int16 aExp, bExp, expDiff; bits32 aSig, bSig; bits32 q; bits64 aSig64, bSig64, q64; bits32 alternateASig; sbits32 sigMean; aSig = extractFloat32Frac( a ); aExp = extractFloat32Exp( a ); aSign = extractFloat32Sign( a ); bSig = extractFloat32Frac( b ); bExp = extractFloat32Exp( b ); bSign = extractFloat32Sign( b ); if ( aExp == 0xFF ) { if ( aSig || ( ( bExp == 0xFF ) && bSig ) ) { return propagateFloat32NaN( a, b ); } roundData->exception |= float_flag_invalid; return float32_default_nan; } if ( bExp == 0xFF ) { if ( bSig ) return propagateFloat32NaN( a, b ); return a; } if ( bExp == 0 ) { if ( bSig == 0 ) { roundData->exception |= float_flag_invalid; return float32_default_nan; } normalizeFloat32Subnormal( bSig, &bExp, &bSig ); } if ( aExp == 0 ) { if ( aSig == 0 ) return a; normalizeFloat32Subnormal( aSig, &aExp, &aSig ); } expDiff = aExp - bExp; aSig |= 0x00800000; bSig |= 0x00800000; if ( expDiff < 32 ) { aSig <<= 8; bSig <<= 8; if ( expDiff < 0 ) { if ( expDiff < -1 ) return a; aSig >>= 1; } q = ( bSig <= aSig ); if ( q ) aSig -= bSig; if ( 0 < expDiff ) { bits64 tmp = ( (bits64) aSig )<<32; do_div( tmp, bSig ); q = tmp; q >>= 32 - expDiff; bSig >>= 2; aSig = ( ( aSig>>1 )<<( expDiff - 1 ) ) - bSig * q; } else { aSig >>= 2; bSig >>= 2; } } else { if ( bSig <= aSig ) aSig -= bSig; aSig64 = ( (bits64) aSig )<<40; bSig64 = ( (bits64) bSig )<<40; expDiff -= 64; while ( 0 < expDiff ) { q64 = estimateDiv128To64( aSig64, 0, bSig64 ); q64 = ( 2 < q64 ) ? q64 - 2 : 0; aSig64 = - ( ( bSig * q64 )<<38 ); expDiff -= 62; } expDiff += 64; q64 = estimateDiv128To64( aSig64, 0, bSig64 ); q64 = ( 2 < q64 ) ? q64 - 2 : 0; q = q64>>( 64 - expDiff ); bSig <<= 6; aSig = ( ( aSig64>>33 )<<( expDiff - 1 ) ) - bSig * q; } do { alternateASig = aSig; ++q; aSig -= bSig; } while ( 0 <= (sbits32) aSig ); sigMean = aSig + alternateASig; if ( ( sigMean < 0 ) || ( ( sigMean == 0 ) && ( q & 1 ) ) ) { aSig = alternateASig; } zSign = ( (sbits32) aSig < 0 ); if ( zSign ) aSig = - aSig; return normalizeRoundAndPackFloat32( roundData, aSign ^ zSign, bExp, aSig ); } /* ------------------------------------------------------------------------------- Returns the square root of the single-precision floating-point value `a'. The operation is performed according to the IEC/IEEE Standard for Binary Floating-point Arithmetic. ------------------------------------------------------------------------------- */ float32 float32_sqrt( struct roundingData *roundData, float32 a ) { flag aSign; int16 aExp, zExp; bits32 aSig, zSig; bits64 rem, term; aSig = extractFloat32Frac( a ); aExp = extractFloat32Exp( a ); aSign = extractFloat32Sign( a ); if ( aExp == 0xFF ) { if ( aSig ) return propagateFloat32NaN( a, 0 ); if ( ! aSign ) return a; roundData->exception |= float_flag_invalid; return float32_default_nan; } if ( aSign ) { if ( ( aExp | aSig ) == 0 ) return a; roundData->exception |= float_flag_invalid; return float32_default_nan; } if ( aExp == 0 ) { if ( aSig == 0 ) return 0; normalizeFloat32Subnormal( aSig, &aExp, &aSig ); } zExp = ( ( aExp - 0x7F )>>1 ) + 0x7E; aSig = ( aSig | 0x00800000 )<<8; zSig = estimateSqrt32( aExp, aSig ) + 2; if ( ( zSig & 0x7F ) <= 5 ) { if ( zSig < 2 ) { zSig = 0xFFFFFFFF; } else { aSig >>= aExp & 1; term = ( (bits64) zSig ) * zSig; rem = ( ( (bits64) aSig )<<32 ) - term; while ( (sbits64) rem < 0 ) { --zSig; rem += ( ( (bits64) zSig )<<1 ) | 1; } zSig |= ( rem != 0 ); } } shift32RightJamming( zSig, 1, &zSig ); return roundAndPackFloat32( roundData, 0, zExp, zSig ); } /* ------------------------------------------------------------------------------- Returns 1 if the single-precision floating-point value `a' is equal to the corresponding value `b', and 0 otherwise. The comparison is performed according to the IEC/IEEE Standard for Binary Floating-point Arithmetic. ------------------------------------------------------------------------------- */ flag float32_eq( float32 a, float32 b ) { if ( ( ( extractFloat32Exp( a ) == 0xFF ) && extractFloat32Frac( a ) ) || ( ( extractFloat32Exp( b ) == 0xFF ) && extractFloat32Frac( b ) ) ) { if ( float32_is_signaling_nan( a ) || float32_is_signaling_nan( b ) ) { float_raise( float_flag_invalid ); } return 0; } return ( a == b ) || ( (bits32) ( ( a | b )<<1 ) == 0 ); } /* ------------------------------------------------------------------------------- Returns 1 if the single-precision floating-point value `a' is less than or equal to the corresponding value `b', and 0 otherwise. The comparison is performed according to the IEC/IEEE Standard for Binary Floating-point Arithmetic. ------------------------------------------------------------------------------- */ flag float32_le( float32 a, float32 b ) { flag aSign, bSign; if ( ( ( extractFloat32Exp( a ) == 0xFF ) && extractFloat32Frac( a ) ) || ( ( extractFloat32Exp( b ) == 0xFF ) && extractFloat32Frac( b ) ) ) { float_raise( float_flag_invalid ); return 0; } aSign = extractFloat32Sign( a ); bSign = extractFloat32Sign( b ); if ( aSign != bSign ) return aSign || ( (bits32) ( ( a | b )<<1 ) == 0 ); return ( a == b ) || ( aSign ^ ( a < b ) ); } /* ------------------------------------------------------------------------------- Returns 1 if the single-precision floating-point value `a' is less than the corresponding value `b', and 0 otherwise. The comparison is performed according to the IEC/IEEE Standard for Binary Floating-point Arithmetic. ------------------------------------------------------------------------------- */ flag float32_lt( float32 a, float32 b ) { flag aSign, bSign; if ( ( ( extractFloat32Exp( a ) == 0xFF ) && extractFloat32Frac( a ) ) || ( ( extractFloat32Exp( b ) == 0xFF ) && extractFloat32Frac( b ) ) ) { float_raise( float_flag_invalid ); return 0; } aSign = extractFloat32Sign( a ); bSign = extractFloat32Sign( b ); if ( aSign != bSign ) return aSign && ( (bits32) ( ( a | b )<<1 ) != 0 ); return ( a != b ) && ( aSign ^ ( a < b ) ); } /* ------------------------------------------------------------------------------- Returns 1 if the single-precision floating-point value `a' is equal to the corresponding value `b', and 0 otherwise. The invalid exception is raised if either operand is a NaN. Otherwise, the comparison is performed according to the IEC/IEEE Standard for Binary Floating-point Arithmetic. ------------------------------------------------------------------------------- */ flag float32_eq_signaling( float32 a, float32 b ) { if ( ( ( extractFloat32Exp( a ) == 0xFF ) && extractFloat32Frac( a ) ) || ( ( extractFloat32Exp( b ) == 0xFF ) && extractFloat32Frac( b ) ) ) { float_raise( float_flag_invalid ); return 0; } return ( a == b ) || ( (bits32) ( ( a | b )<<1 ) == 0 ); } /* ------------------------------------------------------------------------------- Returns 1 if the single-precision floating-point value `a' is less than or equal to the corresponding value `b', and 0 otherwise. Quiet NaNs do not cause an exception. Otherwise, the comparison is performed according to the IEC/IEEE Standard for Binary Floating-point Arithmetic. ------------------------------------------------------------------------------- */ flag float32_le_quiet( float32 a, float32 b ) { flag aSign, bSign; //int16 aExp, bExp; if ( ( ( extractFloat32Exp( a ) == 0xFF ) && extractFloat32Frac( a ) ) || ( ( extractFloat32Exp( b ) == 0xFF ) && extractFloat32Frac( b ) ) ) { /* Do nothing, even if NaN as we're quiet */ return 0; } aSign = extractFloat32Sign( a ); bSign = extractFloat32Sign( b ); if ( aSign != bSign ) return aSign || ( (bits32) ( ( a | b )<<1 ) == 0 ); return ( a == b ) || ( aSign ^ ( a < b ) ); } /* ------------------------------------------------------------------------------- Returns 1 if the single-precision floating-point value `a' is less than the corresponding value `b', and 0 otherwise. Quiet NaNs do not cause an exception. Otherwise, the comparison is performed according to the IEC/IEEE Standard for Binary Floating-point Arithmetic. ------------------------------------------------------------------------------- */ flag float32_lt_quiet( float32 a, float32 b ) { flag aSign, bSign; if ( ( ( extractFloat32Exp( a ) == 0xFF ) && extractFloat32Frac( a ) ) || ( ( extractFloat32Exp( b ) == 0xFF ) && extractFloat32Frac( b ) ) ) { /* Do nothing, even if NaN as we're quiet */ return 0; } aSign = extractFloat32Sign( a ); bSign = extractFloat32Sign( b ); if ( aSign != bSign ) return aSign && ( (bits32) ( ( a | b )<<1 ) != 0 ); return ( a != b ) && ( aSign ^ ( a < b ) ); } /* ------------------------------------------------------------------------------- Returns the result of converting the double-precision floating-point value `a' to the 32-bit two's complement integer format. The conversion is performed according to the IEC/IEEE Standard for Binary Floating-point Arithmetic---which means in particular that the conversion is rounded according to the current rounding mode. If `a' is a NaN, the largest positive integer is returned. Otherwise, if the conversion overflows, the largest integer with the same sign as `a' is returned. ------------------------------------------------------------------------------- */ int32 float64_to_int32( struct roundingData *roundData, float64 a ) { flag aSign; int16 aExp, shiftCount; bits64 aSig; aSig = extractFloat64Frac( a ); aExp = extractFloat64Exp( a ); aSign = extractFloat64Sign( a ); if ( ( aExp == 0x7FF ) && aSig ) aSign = 0; if ( aExp ) aSig |= LIT64( 0x0010000000000000 ); shiftCount = 0x42C - aExp; if ( 0 < shiftCount ) shift64RightJamming( aSig, shiftCount, &aSig ); return roundAndPackInt32( roundData, aSign, aSig ); } /* ------------------------------------------------------------------------------- Returns the result of converting the double-precision floating-point value `a' to the 32-bit two's complement integer format. The conversion is performed according to the IEC/IEEE Standard for Binary Floating-point Arithmetic, except that the conversion is always rounded toward zero. If `a' is a NaN, the largest positive integer is returned. Otherwise, if the conversion overflows, the largest integer with the same sign as `a' is returned. ------------------------------------------------------------------------------- */ int32 float64_to_int32_round_to_zero( float64 a ) { flag aSign; int16 aExp, shiftCount; bits64 aSig, savedASig; int32 z; aSig = extractFloat64Frac( a ); aExp = extractFloat64Exp( a ); aSign = extractFloat64Sign( a ); shiftCount = 0x433 - aExp; if ( shiftCount < 21 ) { if ( ( aExp == 0x7FF ) && aSig ) aSign = 0; goto invalid; } else if ( 52 < shiftCount ) { if ( aExp || aSig ) float_raise( float_flag_inexact ); return 0; } aSig |= LIT64( 0x0010000000000000 ); savedASig = aSig; aSig >>= shiftCount; z = aSig; if ( aSign ) z = - z; if ( ( z < 0 ) ^ aSign ) { invalid: float_raise( float_flag_invalid ); return aSign ? 0x80000000 : 0x7FFFFFFF; } if ( ( aSig<<shiftCount ) != savedASig ) { float_raise( float_flag_inexact ); } return z; } /* ------------------------------------------------------------------------------- Returns the result of converting the double-precision floating-point value `a' to the 32-bit two's complement unsigned integer format. The conversion is performed according to the IEC/IEEE Standard for Binary Floating-point Arithmetic---which means in particular that the conversion is rounded according to the current rounding mode. If `a' is a NaN, the largest positive integer is returned. Otherwise, if the conversion overflows, the largest positive integer is returned. ------------------------------------------------------------------------------- */ int32 float64_to_uint32( struct roundingData *roundData, float64 a ) { flag aSign; int16 aExp, shiftCount; bits64 aSig; aSig = extractFloat64Frac( a ); aExp = extractFloat64Exp( a ); aSign = 0; //extractFloat64Sign( a ); //if ( ( aExp == 0x7FF ) && aSig ) aSign = 0; if ( aExp ) aSig |= LIT64( 0x0010000000000000 ); shiftCount = 0x42C - aExp; if ( 0 < shiftCount ) shift64RightJamming( aSig, shiftCount, &aSig ); return roundAndPackInt32( roundData, aSign, aSig ); } /* ------------------------------------------------------------------------------- Returns the result of converting the double-precision floating-point value `a' to the 32-bit two's complement integer format. The conversion is performed according to the IEC/IEEE Standard for Binary Floating-point Arithmetic, except that the conversion is always rounded toward zero. If `a' is a NaN, the largest positive integer is returned. Otherwise, if the conversion overflows, the largest positive integer is returned. ------------------------------------------------------------------------------- */ int32 float64_to_uint32_round_to_zero( float64 a ) { flag aSign; int16 aExp, shiftCount; bits64 aSig, savedASig; int32 z; aSig = extractFloat64Frac( a ); aExp = extractFloat64Exp( a ); aSign = extractFloat64Sign( a ); shiftCount = 0x433 - aExp; if ( shiftCount < 21 ) { if ( ( aExp == 0x7FF ) && aSig ) aSign = 0; goto invalid; } else if ( 52 < shiftCount ) { if ( aExp || aSig ) float_raise( float_flag_inexact ); return 0; } aSig |= LIT64( 0x0010000000000000 ); savedASig = aSig; aSig >>= shiftCount; z = aSig; if ( aSign ) z = - z; if ( ( z < 0 ) ^ aSign ) { invalid: float_raise( float_flag_invalid ); return aSign ? 0x80000000 : 0x7FFFFFFF; } if ( ( aSig<<shiftCount ) != savedASig ) { float_raise( float_flag_inexact ); } return z; } /* ------------------------------------------------------------------------------- Returns the result of converting the double-precision floating-point value `a' to the single-precision floating-point format. The conversion is performed according to the IEC/IEEE Standard for Binary Floating-point Arithmetic. ------------------------------------------------------------------------------- */ float32 float64_to_float32( struct roundingData *roundData, float64 a ) { flag aSign; int16 aExp; bits64 aSig; bits32 zSig; aSig = extractFloat64Frac( a ); aExp = extractFloat64Exp( a ); aSign = extractFloat64Sign( a ); if ( aExp == 0x7FF ) { if ( aSig ) return commonNaNToFloat32( float64ToCommonNaN( a ) ); return packFloat32( aSign, 0xFF, 0 ); } shift64RightJamming( aSig, 22, &aSig ); zSig = aSig; if ( aExp || zSig ) { zSig |= 0x40000000; aExp -= 0x381; } return roundAndPackFloat32( roundData, aSign, aExp, zSig ); } #ifdef FLOATX80 /* ------------------------------------------------------------------------------- Returns the result of converting the double-precision floating-point value `a' to the extended double-precision floating-point format. The conversion is performed according to the IEC/IEEE Standard for Binary Floating-point Arithmetic. ------------------------------------------------------------------------------- */ floatx80 float64_to_floatx80( float64 a ) { flag aSign; int16 aExp; bits64 aSig; aSig = extractFloat64Frac( a ); aExp = extractFloat64Exp( a ); aSign = extractFloat64Sign( a ); if ( aExp == 0x7FF ) { if ( aSig ) return commonNaNToFloatx80( float64ToCommonNaN( a ) ); return packFloatx80( aSign, 0x7FFF, LIT64( 0x8000000000000000 ) ); } if ( aExp == 0 ) { if ( aSig == 0 ) return packFloatx80( aSign, 0, 0 ); normalizeFloat64Subnormal( aSig, &aExp, &aSig ); } return packFloatx80( aSign, aExp + 0x3C00, ( aSig | LIT64( 0x0010000000000000 ) )<<11 ); } #endif /* ------------------------------------------------------------------------------- Rounds the double-precision floating-point value `a' to an integer, and returns the result as a double-precision floating-point value. The operation is performed according to the IEC/IEEE Standard for Binary Floating-point Arithmetic. ------------------------------------------------------------------------------- */ float64 float64_round_to_int( struct roundingData *roundData, float64 a ) { flag aSign; int16 aExp; bits64 lastBitMask, roundBitsMask; int8 roundingMode; float64 z; aExp = extractFloat64Exp( a ); if ( 0x433 <= aExp ) { if ( ( aExp == 0x7FF ) && extractFloat64Frac( a ) ) { return propagateFloat64NaN( a, a ); } return a; } if ( aExp <= 0x3FE ) { if ( (bits64) ( a<<1 ) == 0 ) return a; roundData->exception |= float_flag_inexact; aSign = extractFloat64Sign( a ); switch ( roundData->mode ) { case float_round_nearest_even: if ( ( aExp == 0x3FE ) && extractFloat64Frac( a ) ) { return packFloat64( aSign, 0x3FF, 0 ); } break; case float_round_down: return aSign ? LIT64( 0xBFF0000000000000 ) : 0; case float_round_up: return aSign ? LIT64( 0x8000000000000000 ) : LIT64( 0x3FF0000000000000 ); } return packFloat64( aSign, 0, 0 ); } lastBitMask = 1; lastBitMask <<= 0x433 - aExp; roundBitsMask = lastBitMask - 1; z = a; roundingMode = roundData->mode; if ( roundingMode == float_round_nearest_even ) { z += lastBitMask>>1; if ( ( z & roundBitsMask ) == 0 ) z &= ~ lastBitMask; } else if ( roundingMode != float_round_to_zero ) { if ( extractFloat64Sign( z ) ^ ( roundingMode == float_round_up ) ) { z += roundBitsMask; } } z &= ~ roundBitsMask; if ( z != a ) roundData->exception |= float_flag_inexact; return z; } /* ------------------------------------------------------------------------------- Returns the result of adding the absolute values of the double-precision floating-point values `a' and `b'. If `zSign' is true, the sum is negated before being returned. `zSign' is ignored if the result is a NaN. The addition is performed according to the IEC/IEEE Standard for Binary Floating-point Arithmetic. ------------------------------------------------------------------------------- */ static float64 addFloat64Sigs( struct roundingData *roundData, float64 a, float64 b, flag zSign ) { int16 aExp, bExp, zExp; bits64 aSig, bSig, zSig; int16 expDiff; aSig = extractFloat64Frac( a ); aExp = extractFloat64Exp( a ); bSig = extractFloat64Frac( b ); bExp = extractFloat64Exp( b ); expDiff = aExp - bExp; aSig <<= 9; bSig <<= 9; if ( 0 < expDiff ) { if ( aExp == 0x7FF ) { if ( aSig ) return propagateFloat64NaN( a, b ); return a; } if ( bExp == 0 ) { --expDiff; } else { bSig |= LIT64( 0x2000000000000000 ); } shift64RightJamming( bSig, expDiff, &bSig ); zExp = aExp; } else if ( expDiff < 0 ) { if ( bExp == 0x7FF ) { if ( bSig ) return propagateFloat64NaN( a, b ); return packFloat64( zSign, 0x7FF, 0 ); } if ( aExp == 0 ) { ++expDiff; } else { aSig |= LIT64( 0x2000000000000000 ); } shift64RightJamming( aSig, - expDiff, &aSig ); zExp = bExp; } else { if ( aExp == 0x7FF ) { if ( aSig | bSig ) return propagateFloat64NaN( a, b ); return a; } if ( aExp == 0 ) return packFloat64( zSign, 0, ( aSig + bSig )>>9 ); zSig = LIT64( 0x4000000000000000 ) + aSig + bSig; zExp = aExp; goto roundAndPack; } aSig |= LIT64( 0x2000000000000000 ); zSig = ( aSig + bSig )<<1; --zExp; if ( (sbits64) zSig < 0 ) { zSig = aSig + bSig; ++zExp; } roundAndPack: return roundAndPackFloat64( roundData, zSign, zExp, zSig ); } /* ------------------------------------------------------------------------------- Returns the result of subtracting the absolute values of the double- precision floating-point values `a' and `b'. If `zSign' is true, the difference is negated before being returned. `zSign' is ignored if the result is a NaN. The subtraction is performed according to the IEC/IEEE Standard for Binary Floating-point Arithmetic. ------------------------------------------------------------------------------- */ static float64 subFloat64Sigs( struct roundingData *roundData, float64 a, float64 b, flag zSign ) { int16 aExp, bExp, zExp; bits64 aSig, bSig, zSig; int16 expDiff; aSig = extractFloat64Frac( a ); aExp = extractFloat64Exp( a ); bSig = extractFloat64Frac( b ); bExp = extractFloat64Exp( b ); expDiff = aExp - bExp; aSig <<= 10; bSig <<= 10; if ( 0 < expDiff ) goto aExpBigger; if ( expDiff < 0 ) goto bExpBigger; if ( aExp == 0x7FF ) { if ( aSig | bSig ) return propagateFloat64NaN( a, b ); roundData->exception |= float_flag_invalid; return float64_default_nan; } if ( aExp == 0 ) { aExp = 1; bExp = 1; } if ( bSig < aSig ) goto aBigger; if ( aSig < bSig ) goto bBigger; return packFloat64( roundData->mode == float_round_down, 0, 0 ); bExpBigger: if ( bExp == 0x7FF ) { if ( bSig ) return propagateFloat64NaN( a, b ); return packFloat64( zSign ^ 1, 0x7FF, 0 ); } if ( aExp == 0 ) { ++expDiff; } else { aSig |= LIT64( 0x4000000000000000 ); } shift64RightJamming( aSig, - expDiff, &aSig ); bSig |= LIT64( 0x4000000000000000 ); bBigger: zSig = bSig - aSig; zExp = bExp; zSign ^= 1; goto normalizeRoundAndPack; aExpBigger: if ( aExp == 0x7FF ) { if ( aSig ) return propagateFloat64NaN( a, b ); return a; } if ( bExp == 0 ) { --expDiff; } else { bSig |= LIT64( 0x4000000000000000 ); } shift64RightJamming( bSig, expDiff, &bSig ); aSig |= LIT64( 0x4000000000000000 ); aBigger: zSig = aSig - bSig; zExp = aExp; normalizeRoundAndPack: --zExp; return normalizeRoundAndPackFloat64( roundData, zSign, zExp, zSig ); } /* ------------------------------------------------------------------------------- Returns the result of adding the double-precision floating-point values `a' and `b'. The operation is performed according to the IEC/IEEE Standard for Binary Floating-point Arithmetic. ------------------------------------------------------------------------------- */ float64 float64_add( struct roundingData *roundData, float64 a, float64 b ) { flag aSign, bSign; aSign = extractFloat64Sign( a ); bSign = extractFloat64Sign( b ); if ( aSign == bSign ) { return addFloat64Sigs( roundData, a, b, aSign ); } else { return subFloat64Sigs( roundData, a, b, aSign ); } } /* ------------------------------------------------------------------------------- Returns the result of subtracting the double-precision floating-point values `a' and `b'. The operation is performed according to the IEC/IEEE Standard for Binary Floating-point Arithmetic. ------------------------------------------------------------------------------- */ float64 float64_sub( struct roundingData *roundData, float64 a, float64 b ) { flag aSign, bSign; aSign = extractFloat64Sign( a ); bSign = extractFloat64Sign( b ); if ( aSign == bSign ) { return subFloat64Sigs( roundData, a, b, aSign ); } else { return addFloat64Sigs( roundData, a, b, aSign ); } } /* ------------------------------------------------------------------------------- Returns the result of multiplying the double-precision floating-point values `a' and `b'. The operation is performed according to the IEC/IEEE Standard for Binary Floating-point Arithmetic. ------------------------------------------------------------------------------- */ float64 float64_mul( struct roundingData *roundData, float64 a, float64 b ) { flag aSign, bSign, zSign; int16 aExp, bExp, zExp; bits64 aSig, bSig, zSig0, zSig1; aSig = extractFloat64Frac( a ); aExp = extractFloat64Exp( a ); aSign = extractFloat64Sign( a ); bSig = extractFloat64Frac( b ); bExp = extractFloat64Exp( b ); bSign = extractFloat64Sign( b ); zSign = aSign ^ bSign; if ( aExp == 0x7FF ) { if ( aSig || ( ( bExp == 0x7FF ) && bSig ) ) { return propagateFloat64NaN( a, b ); } if ( ( bExp | bSig ) == 0 ) { roundData->exception |= float_flag_invalid; return float64_default_nan; } return packFloat64( zSign, 0x7FF, 0 ); } if ( bExp == 0x7FF ) { if ( bSig ) return propagateFloat64NaN( a, b ); if ( ( aExp | aSig ) == 0 ) { roundData->exception |= float_flag_invalid; return float64_default_nan; } return packFloat64( zSign, 0x7FF, 0 ); } if ( aExp == 0 ) { if ( aSig == 0 ) return packFloat64( zSign, 0, 0 ); normalizeFloat64Subnormal( aSig, &aExp, &aSig ); } if ( bExp == 0 ) { if ( bSig == 0 ) return packFloat64( zSign, 0, 0 ); normalizeFloat64Subnormal( bSig, &bExp, &bSig ); } zExp = aExp + bExp - 0x3FF; aSig = ( aSig | LIT64( 0x0010000000000000 ) )<<10; bSig = ( bSig | LIT64( 0x0010000000000000 ) )<<11; mul64To128( aSig, bSig, &zSig0, &zSig1 ); zSig0 |= ( zSig1 != 0 ); if ( 0 <= (sbits64) ( zSig0<<1 ) ) { zSig0 <<= 1; --zExp; } return roundAndPackFloat64( roundData, zSign, zExp, zSig0 ); } /* ------------------------------------------------------------------------------- Returns the result of dividing the double-precision floating-point value `a' by the corresponding value `b'. The operation is performed according to the IEC/IEEE Standard for Binary Floating-point Arithmetic. ------------------------------------------------------------------------------- */ float64 float64_div( struct roundingData *roundData, float64 a, float64 b ) { flag aSign, bSign, zSign; int16 aExp, bExp, zExp; bits64 aSig, bSig, zSig; bits64 rem0, rem1; bits64 term0, term1; aSig = extractFloat64Frac( a ); aExp = extractFloat64Exp( a ); aSign = extractFloat64Sign( a ); bSig = extractFloat64Frac( b ); bExp = extractFloat64Exp( b ); bSign = extractFloat64Sign( b ); zSign = aSign ^ bSign; if ( aExp == 0x7FF ) { if ( aSig ) return propagateFloat64NaN( a, b ); if ( bExp == 0x7FF ) { if ( bSig ) return propagateFloat64NaN( a, b ); roundData->exception |= float_flag_invalid; return float64_default_nan; } return packFloat64( zSign, 0x7FF, 0 ); } if ( bExp == 0x7FF ) { if ( bSig ) return propagateFloat64NaN( a, b ); return packFloat64( zSign, 0, 0 ); } if ( bExp == 0 ) { if ( bSig == 0 ) { if ( ( aExp | aSig ) == 0 ) { roundData->exception |= float_flag_invalid; return float64_default_nan; } roundData->exception |= float_flag_divbyzero; return packFloat64( zSign, 0x7FF, 0 ); } normalizeFloat64Subnormal( bSig, &bExp, &bSig ); } if ( aExp == 0 ) { if ( aSig == 0 ) return packFloat64( zSign, 0, 0 ); normalizeFloat64Subnormal( aSig, &aExp, &aSig ); } zExp = aExp - bExp + 0x3FD; aSig = ( aSig | LIT64( 0x0010000000000000 ) )<<10; bSig = ( bSig | LIT64( 0x0010000000000000 ) )<<11; if ( bSig <= ( aSig + aSig ) ) { aSig >>= 1; ++zExp; } zSig = estimateDiv128To64( aSig, 0, bSig ); if ( ( zSig & 0x1FF ) <= 2 ) { mul64To128( bSig, zSig, &term0, &term1 ); sub128( aSig, 0, term0, term1, &rem0, &rem1 ); while ( (sbits64) rem0 < 0 ) { --zSig; add128( rem0, rem1, 0, bSig, &rem0, &rem1 ); } zSig |= ( rem1 != 0 ); } return roundAndPackFloat64( roundData, zSign, zExp, zSig ); } /* ------------------------------------------------------------------------------- Returns the remainder of the double-precision floating-point value `a' with respect to the corresponding value `b'. The operation is performed according to the IEC/IEEE Standard for Binary Floating-point Arithmetic. ------------------------------------------------------------------------------- */ float64 float64_rem( struct roundingData *roundData, float64 a, float64 b ) { flag aSign, bSign, zSign; int16 aExp, bExp, expDiff; bits64 aSig, bSig; bits64 q, alternateASig; sbits64 sigMean; aSig = extractFloat64Frac( a ); aExp = extractFloat64Exp( a ); aSign = extractFloat64Sign( a ); bSig = extractFloat64Frac( b ); bExp = extractFloat64Exp( b ); bSign = extractFloat64Sign( b ); if ( aExp == 0x7FF ) { if ( aSig || ( ( bExp == 0x7FF ) && bSig ) ) { return propagateFloat64NaN( a, b ); } roundData->exception |= float_flag_invalid; return float64_default_nan; } if ( bExp == 0x7FF ) { if ( bSig ) return propagateFloat64NaN( a, b ); return a; } if ( bExp == 0 ) { if ( bSig == 0 ) { roundData->exception |= float_flag_invalid; return float64_default_nan; } normalizeFloat64Subnormal( bSig, &bExp, &bSig ); } if ( aExp == 0 ) { if ( aSig == 0 ) return a; normalizeFloat64Subnormal( aSig, &aExp, &aSig ); } expDiff = aExp - bExp; aSig = ( aSig | LIT64( 0x0010000000000000 ) )<<11; bSig = ( bSig | LIT64( 0x0010000000000000 ) )<<11; if ( expDiff < 0 ) { if ( expDiff < -1 ) return a; aSig >>= 1; } q = ( bSig <= aSig ); if ( q ) aSig -= bSig; expDiff -= 64; while ( 0 < expDiff ) { q = estimateDiv128To64( aSig, 0, bSig ); q = ( 2 < q ) ? q - 2 : 0; aSig = - ( ( bSig>>2 ) * q ); expDiff -= 62; } expDiff += 64; if ( 0 < expDiff ) { q = estimateDiv128To64( aSig, 0, bSig ); q = ( 2 < q ) ? q - 2 : 0; q >>= 64 - expDiff; bSig >>= 2; aSig = ( ( aSig>>1 )<<( expDiff - 1 ) ) - bSig * q; } else { aSig >>= 2; bSig >>= 2; } do { alternateASig = aSig; ++q; aSig -= bSig; } while ( 0 <= (sbits64) aSig ); sigMean = aSig + alternateASig; if ( ( sigMean < 0 ) || ( ( sigMean == 0 ) && ( q & 1 ) ) ) { aSig = alternateASig; } zSign = ( (sbits64) aSig < 0 ); if ( zSign ) aSig = - aSig; return normalizeRoundAndPackFloat64( roundData, aSign ^ zSign, bExp, aSig ); } /* ------------------------------------------------------------------------------- Returns the square root of the double-precision floating-point value `a'. The operation is performed according to the IEC/IEEE Standard for Binary Floating-point Arithmetic. ------------------------------------------------------------------------------- */ float64 float64_sqrt( struct roundingData *roundData, float64 a ) { flag aSign; int16 aExp, zExp; bits64 aSig, zSig; bits64 rem0, rem1, term0, term1; //, shiftedRem; //float64 z; aSig = extractFloat64Frac( a ); aExp = extractFloat64Exp( a ); aSign = extractFloat64Sign( a ); if ( aExp == 0x7FF ) { if ( aSig ) return propagateFloat64NaN( a, a ); if ( ! aSign ) return a; roundData->exception |= float_flag_invalid; return float64_default_nan; } if ( aSign ) { if ( ( aExp | aSig ) == 0 ) return a; roundData->exception |= float_flag_invalid; return float64_default_nan; } if ( aExp == 0 ) { if ( aSig == 0 ) return 0; normalizeFloat64Subnormal( aSig, &aExp, &aSig ); } zExp = ( ( aExp - 0x3FF )>>1 ) + 0x3FE; aSig |= LIT64( 0x0010000000000000 ); zSig = estimateSqrt32( aExp, aSig>>21 ); zSig <<= 31; aSig <<= 9 - ( aExp & 1 ); zSig = estimateDiv128To64( aSig, 0, zSig ) + zSig + 2; if ( ( zSig & 0x3FF ) <= 5 ) { if ( zSig < 2 ) { zSig = LIT64( 0xFFFFFFFFFFFFFFFF ); } else { aSig <<= 2; mul64To128( zSig, zSig, &term0, &term1 ); sub128( aSig, 0, term0, term1, &rem0, &rem1 ); while ( (sbits64) rem0 < 0 ) { --zSig; shortShift128Left( 0, zSig, 1, &term0, &term1 ); term1 |= 1; add128( rem0, rem1, term0, term1, &rem0, &rem1 ); } zSig |= ( ( rem0 | rem1 ) != 0 ); } } shift64RightJamming( zSig, 1, &zSig ); return roundAndPackFloat64( roundData, 0, zExp, zSig ); } /* ------------------------------------------------------------------------------- Returns 1 if the double-precision floating-point value `a' is equal to the corresponding value `b', and 0 otherwise. The comparison is performed according to the IEC/IEEE Standard for Binary Floating-point Arithmetic. ------------------------------------------------------------------------------- */ flag float64_eq( float64 a, float64 b ) { if ( ( ( extractFloat64Exp( a ) == 0x7FF ) && extractFloat64Frac( a ) ) || ( ( extractFloat64Exp( b ) == 0x7FF ) && extractFloat64Frac( b ) ) ) { if ( float64_is_signaling_nan( a ) || float64_is_signaling_nan( b ) ) { float_raise( float_flag_invalid ); } return 0; } return ( a == b ) || ( (bits64) ( ( a | b )<<1 ) == 0 ); } /* ------------------------------------------------------------------------------- Returns 1 if the double-precision floating-point value `a' is less than or equal to the corresponding value `b', and 0 otherwise. The comparison is performed according to the IEC/IEEE Standard for Binary Floating-point Arithmetic. ------------------------------------------------------------------------------- */ flag float64_le( float64 a, float64 b ) { flag aSign, bSign; if ( ( ( extractFloat64Exp( a ) == 0x7FF ) && extractFloat64Frac( a ) ) || ( ( extractFloat64Exp( b ) == 0x7FF ) && extractFloat64Frac( b ) ) ) { float_raise( float_flag_invalid ); return 0; } aSign = extractFloat64Sign( a ); bSign = extractFloat64Sign( b ); if ( aSign != bSign ) return aSign || ( (bits64) ( ( a | b )<<1 ) == 0 ); return ( a == b ) || ( aSign ^ ( a < b ) ); } /* ------------------------------------------------------------------------------- Returns 1 if the double-precision floating-point value `a' is less than the corresponding value `b', and 0 otherwise. The comparison is performed according to the IEC/IEEE Standard for Binary Floating-point Arithmetic. ------------------------------------------------------------------------------- */ flag float64_lt( float64 a, float64 b ) { flag aSign, bSign; if ( ( ( extractFloat64Exp( a ) == 0x7FF ) && extractFloat64Frac( a ) ) || ( ( extractFloat64Exp( b ) == 0x7FF ) && extractFloat64Frac( b ) ) ) { float_raise( float_flag_invalid ); return 0; } aSign = extractFloat64Sign( a ); bSign = extractFloat64Sign( b ); if ( aSign != bSign ) return aSign && ( (bits64) ( ( a | b )<<1 ) != 0 ); return ( a != b ) && ( aSign ^ ( a < b ) ); } /* ------------------------------------------------------------------------------- Returns 1 if the double-precision floating-point value `a' is equal to the corresponding value `b', and 0 otherwise. The invalid exception is raised if either operand is a NaN. Otherwise, the comparison is performed according to the IEC/IEEE Standard for Binary Floating-point Arithmetic. ------------------------------------------------------------------------------- */ flag float64_eq_signaling( float64 a, float64 b ) { if ( ( ( extractFloat64Exp( a ) == 0x7FF ) && extractFloat64Frac( a ) ) || ( ( extractFloat64Exp( b ) == 0x7FF ) && extractFloat64Frac( b ) ) ) { float_raise( float_flag_invalid ); return 0; } return ( a == b ) || ( (bits64) ( ( a | b )<<1 ) == 0 ); } /* ------------------------------------------------------------------------------- Returns 1 if the double-precision floating-point value `a' is less than or equal to the corresponding value `b', and 0 otherwise. Quiet NaNs do not cause an exception. Otherwise, the comparison is performed according to the IEC/IEEE Standard for Binary Floating-point Arithmetic. ------------------------------------------------------------------------------- */ flag float64_le_quiet( float64 a, float64 b ) { flag aSign, bSign; //int16 aExp, bExp; if ( ( ( extractFloat64Exp( a ) == 0x7FF ) && extractFloat64Frac( a ) ) || ( ( extractFloat64Exp( b ) == 0x7FF ) && extractFloat64Frac( b ) ) ) { /* Do nothing, even if NaN as we're quiet */ return 0; } aSign = extractFloat64Sign( a ); bSign = extractFloat64Sign( b ); if ( aSign != bSign ) return aSign || ( (bits64) ( ( a | b )<<1 ) == 0 ); return ( a == b ) || ( aSign ^ ( a < b ) ); } /* ------------------------------------------------------------------------------- Returns 1 if the double-precision floating-point value `a' is less than the corresponding value `b', and 0 otherwise. Quiet NaNs do not cause an exception. Otherwise, the comparison is performed according to the IEC/IEEE Standard for Binary Floating-point Arithmetic. ------------------------------------------------------------------------------- */ flag float64_lt_quiet( float64 a, float64 b ) { flag aSign, bSign; if ( ( ( extractFloat64Exp( a ) == 0x7FF ) && extractFloat64Frac( a ) ) || ( ( extractFloat64Exp( b ) == 0x7FF ) && extractFloat64Frac( b ) ) ) { /* Do nothing, even if NaN as we're quiet */ return 0; } aSign = extractFloat64Sign( a ); bSign = extractFloat64Sign( b ); if ( aSign != bSign ) return aSign && ( (bits64) ( ( a | b )<<1 ) != 0 ); return ( a != b ) && ( aSign ^ ( a < b ) ); } #ifdef FLOATX80 /* ------------------------------------------------------------------------------- Returns the result of converting the extended double-precision floating- point value `a' to the 32-bit two's complement integer format. The conversion is performed according to the IEC/IEEE Standard for Binary Floating-point Arithmetic---which means in particular that the conversion is rounded according to the current rounding mode. If `a' is a NaN, the largest positive integer is returned. Otherwise, if the conversion overflows, the largest integer with the same sign as `a' is returned. ------------------------------------------------------------------------------- */ int32 floatx80_to_int32( struct roundingData *roundData, floatx80 a ) { flag aSign; int32 aExp, shiftCount; bits64 aSig; aSig = extractFloatx80Frac( a ); aExp = extractFloatx80Exp( a ); aSign = extractFloatx80Sign( a ); if ( ( aExp == 0x7FFF ) && (bits64) ( aSig<<1 ) ) aSign = 0; shiftCount = 0x4037 - aExp; if ( shiftCount <= 0 ) shiftCount = 1; shift64RightJamming( aSig, shiftCount, &aSig ); return roundAndPackInt32( roundData, aSign, aSig ); } /* ------------------------------------------------------------------------------- Returns the result of converting the extended double-precision floating- point value `a' to the 32-bit two's complement integer format. The conversion is performed according to the IEC/IEEE Standard for Binary Floating-point Arithmetic, except that the conversion is always rounded toward zero. If `a' is a NaN, the largest positive integer is returned. Otherwise, if the conversion overflows, the largest integer with the same sign as `a' is returned. ------------------------------------------------------------------------------- */ int32 floatx80_to_int32_round_to_zero( floatx80 a ) { flag aSign; int32 aExp, shiftCount; bits64 aSig, savedASig; int32 z; aSig = extractFloatx80Frac( a ); aExp = extractFloatx80Exp( a ); aSign = extractFloatx80Sign( a ); shiftCount = 0x403E - aExp; if ( shiftCount < 32 ) { if ( ( aExp == 0x7FFF ) && (bits64) ( aSig<<1 ) ) aSign = 0; goto invalid; } else if ( 63 < shiftCount ) { if ( aExp || aSig ) float_raise( float_flag_inexact ); return 0; } savedASig = aSig; aSig >>= shiftCount; z = aSig; if ( aSign ) z = - z; if ( ( z < 0 ) ^ aSign ) { invalid: float_raise( float_flag_invalid ); return aSign ? 0x80000000 : 0x7FFFFFFF; } if ( ( aSig<<shiftCount ) != savedASig ) { float_raise( float_flag_inexact ); } return z; } /* ------------------------------------------------------------------------------- Returns the result of converting the extended double-precision floating- point value `a' to the single-precision floating-point format. The conversion is performed according to the IEC/IEEE Standard for Binary Floating-point Arithmetic. ------------------------------------------------------------------------------- */ float32 floatx80_to_float32( struct roundingData *roundData, floatx80 a ) { flag aSign; int32 aExp; bits64 aSig; aSig = extractFloatx80Frac( a ); aExp = extractFloatx80Exp( a ); aSign = extractFloatx80Sign( a ); if ( aExp == 0x7FFF ) { if ( (bits64) ( aSig<<1 ) ) { return commonNaNToFloat32( floatx80ToCommonNaN( a ) ); } return packFloat32( aSign, 0xFF, 0 ); } shift64RightJamming( aSig, 33, &aSig ); if ( aExp || aSig ) aExp -= 0x3F81; return roundAndPackFloat32( roundData, aSign, aExp, aSig ); } /* ------------------------------------------------------------------------------- Returns the result of converting the extended double-precision floating- point value `a' to the double-precision floating-point format. The conversion is performed according to the IEC/IEEE Standard for Binary Floating-point Arithmetic. ------------------------------------------------------------------------------- */ float64 floatx80_to_float64( struct roundingData *roundData, floatx80 a ) { flag aSign; int32 aExp; bits64 aSig, zSig; aSig = extractFloatx80Frac( a ); aExp = extractFloatx80Exp( a ); aSign = extractFloatx80Sign( a ); if ( aExp == 0x7FFF ) { if ( (bits64) ( aSig<<1 ) ) { return commonNaNToFloat64( floatx80ToCommonNaN( a ) ); } return packFloat64( aSign, 0x7FF, 0 ); } shift64RightJamming( aSig, 1, &zSig ); if ( aExp || aSig ) aExp -= 0x3C01; return roundAndPackFloat64( roundData, aSign, aExp, zSig ); } /* ------------------------------------------------------------------------------- Rounds the extended double-precision floating-point value `a' to an integer, and returns the result as an extended quadruple-precision floating-point value. The operation is performed according to the IEC/IEEE Standard for Binary Floating-point Arithmetic. ------------------------------------------------------------------------------- */ floatx80 floatx80_round_to_int( struct roundingData *roundData, floatx80 a ) { flag aSign; int32 aExp; bits64 lastBitMask, roundBitsMask; int8 roundingMode; floatx80 z; aExp = extractFloatx80Exp( a ); if ( 0x403E <= aExp ) { if ( ( aExp == 0x7FFF ) && (bits64) ( extractFloatx80Frac( a )<<1 ) ) { return propagateFloatx80NaN( a, a ); } return a; } if ( aExp <= 0x3FFE ) { if ( ( aExp == 0 ) && ( (bits64) ( extractFloatx80Frac( a )<<1 ) == 0 ) ) { return a; } roundData->exception |= float_flag_inexact; aSign = extractFloatx80Sign( a ); switch ( roundData->mode ) { case float_round_nearest_even: if ( ( aExp == 0x3FFE ) && (bits64) ( extractFloatx80Frac( a )<<1 ) ) { return packFloatx80( aSign, 0x3FFF, LIT64( 0x8000000000000000 ) ); } break; case float_round_down: return aSign ? packFloatx80( 1, 0x3FFF, LIT64( 0x8000000000000000 ) ) : packFloatx80( 0, 0, 0 ); case float_round_up: return aSign ? packFloatx80( 1, 0, 0 ) : packFloatx80( 0, 0x3FFF, LIT64( 0x8000000000000000 ) ); } return packFloatx80( aSign, 0, 0 ); } lastBitMask = 1; lastBitMask <<= 0x403E - aExp; roundBitsMask = lastBitMask - 1; z = a; roundingMode = roundData->mode; if ( roundingMode == float_round_nearest_even ) { z.low += lastBitMask>>1; if ( ( z.low & roundBitsMask ) == 0 ) z.low &= ~ lastBitMask; } else if ( roundingMode != float_round_to_zero ) { if ( extractFloatx80Sign( z ) ^ ( roundingMode == float_round_up ) ) { z.low += roundBitsMask; } } z.low &= ~ roundBitsMask; if ( z.low == 0 ) { ++z.high; z.low = LIT64( 0x8000000000000000 ); } if ( z.low != a.low ) roundData->exception |= float_flag_inexact; return z; } /* ------------------------------------------------------------------------------- Returns the result of adding the absolute values of the extended double- precision floating-point values `a' and `b'. If `zSign' is true, the sum is negated before being returned. `zSign' is ignored if the result is a NaN. The addition is performed according to the IEC/IEEE Standard for Binary Floating-point Arithmetic. ------------------------------------------------------------------------------- */ static floatx80 addFloatx80Sigs( struct roundingData *roundData, floatx80 a, floatx80 b, flag zSign ) { int32 aExp, bExp, zExp; bits64 aSig, bSig, zSig0, zSig1; int32 expDiff; aSig = extractFloatx80Frac( a ); aExp = extractFloatx80Exp( a ); bSig = extractFloatx80Frac( b ); bExp = extractFloatx80Exp( b ); expDiff = aExp - bExp; if ( 0 < expDiff ) { if ( aExp == 0x7FFF ) { if ( (bits64) ( aSig<<1 ) ) return propagateFloatx80NaN( a, b ); return a; } if ( bExp == 0 ) --expDiff; shift64ExtraRightJamming( bSig, 0, expDiff, &bSig, &zSig1 ); zExp = aExp; } else if ( expDiff < 0 ) { if ( bExp == 0x7FFF ) { if ( (bits64) ( bSig<<1 ) ) return propagateFloatx80NaN( a, b ); return packFloatx80( zSign, 0x7FFF, LIT64( 0x8000000000000000 ) ); } if ( aExp == 0 ) ++expDiff; shift64ExtraRightJamming( aSig, 0, - expDiff, &aSig, &zSig1 ); zExp = bExp; } else { if ( aExp == 0x7FFF ) { if ( (bits64) ( ( aSig | bSig )<<1 ) ) { return propagateFloatx80NaN( a, b ); } return a; } zSig1 = 0; zSig0 = aSig + bSig; if ( aExp == 0 ) { normalizeFloatx80Subnormal( zSig0, &zExp, &zSig0 ); goto roundAndPack; } zExp = aExp; goto shiftRight1; } zSig0 = aSig + bSig; if ( (sbits64) zSig0 < 0 ) goto roundAndPack; shiftRight1: shift64ExtraRightJamming( zSig0, zSig1, 1, &zSig0, &zSig1 ); zSig0 |= LIT64( 0x8000000000000000 ); ++zExp; roundAndPack: return roundAndPackFloatx80( roundData, zSign, zExp, zSig0, zSig1 ); } /* ------------------------------------------------------------------------------- Returns the result of subtracting the absolute values of the extended double-precision floating-point values `a' and `b'. If `zSign' is true, the difference is negated before being returned. `zSign' is ignored if the result is a NaN. The subtraction is performed according to the IEC/IEEE Standard for Binary Floating-point Arithmetic. ------------------------------------------------------------------------------- */ static floatx80 subFloatx80Sigs( struct roundingData *roundData, floatx80 a, floatx80 b, flag zSign ) { int32 aExp, bExp, zExp; bits64 aSig, bSig, zSig0, zSig1; int32 expDiff; floatx80 z; aSig = extractFloatx80Frac( a ); aExp = extractFloatx80Exp( a ); bSig = extractFloatx80Frac( b ); bExp = extractFloatx80Exp( b ); expDiff = aExp - bExp; if ( 0 < expDiff ) goto aExpBigger; if ( expDiff < 0 ) goto bExpBigger; if ( aExp == 0x7FFF ) { if ( (bits64) ( ( aSig | bSig )<<1 ) ) { return propagateFloatx80NaN( a, b ); } roundData->exception |= float_flag_invalid; z.low = floatx80_default_nan_low; z.high = floatx80_default_nan_high; z.__padding = 0; return z; } if ( aExp == 0 ) { aExp = 1; bExp = 1; } zSig1 = 0; if ( bSig < aSig ) goto aBigger; if ( aSig < bSig ) goto bBigger; return packFloatx80( roundData->mode == float_round_down, 0, 0 ); bExpBigger: if ( bExp == 0x7FFF ) { if ( (bits64) ( bSig<<1 ) ) return propagateFloatx80NaN( a, b ); return packFloatx80( zSign ^ 1, 0x7FFF, LIT64( 0x8000000000000000 ) ); } if ( aExp == 0 ) ++expDiff; shift128RightJamming( aSig, 0, - expDiff, &aSig, &zSig1 ); bBigger: sub128( bSig, 0, aSig, zSig1, &zSig0, &zSig1 ); zExp = bExp; zSign ^= 1; goto normalizeRoundAndPack; aExpBigger: if ( aExp == 0x7FFF ) { if ( (bits64) ( aSig<<1 ) ) return propagateFloatx80NaN( a, b ); return a; } if ( bExp == 0 ) --expDiff; shift128RightJamming( bSig, 0, expDiff, &bSig, &zSig1 ); aBigger: sub128( aSig, 0, bSig, zSig1, &zSig0, &zSig1 ); zExp = aExp; normalizeRoundAndPack: return normalizeRoundAndPackFloatx80( roundData, zSign, zExp, zSig0, zSig1 ); } /* ------------------------------------------------------------------------------- Returns the result of adding the extended double-precision floating-point values `a' and `b'. The operation is performed according to the IEC/IEEE Standard for Binary Floating-point Arithmetic. ------------------------------------------------------------------------------- */ floatx80 floatx80_add( struct roundingData *roundData, floatx80 a, floatx80 b ) { flag aSign, bSign; aSign = extractFloatx80Sign( a ); bSign = extractFloatx80Sign( b ); if ( aSign == bSign ) { return addFloatx80Sigs( roundData, a, b, aSign ); } else { return subFloatx80Sigs( roundData, a, b, aSign ); } } /* ------------------------------------------------------------------------------- Returns the result of subtracting the extended double-precision floating- point values `a' and `b'. The operation is performed according to the IEC/IEEE Standard for Binary Floating-point Arithmetic. ------------------------------------------------------------------------------- */ floatx80 floatx80_sub( struct roundingData *roundData, floatx80 a, floatx80 b ) { flag aSign, bSign; aSign = extractFloatx80Sign( a ); bSign = extractFloatx80Sign( b ); if ( aSign == bSign ) { return subFloatx80Sigs( roundData, a, b, aSign ); } else { return addFloatx80Sigs( roundData, a, b, aSign ); } } /* ------------------------------------------------------------------------------- Returns the result of multiplying the extended double-precision floating- point values `a' and `b'. The operation is performed according to the IEC/IEEE Standard for Binary Floating-point Arithmetic. ------------------------------------------------------------------------------- */ floatx80 floatx80_mul( struct roundingData *roundData, floatx80 a, floatx80 b ) { flag aSign, bSign, zSign; int32 aExp, bExp, zExp; bits64 aSig, bSig, zSig0, zSig1; floatx80 z; aSig = extractFloatx80Frac( a ); aExp = extractFloatx80Exp( a ); aSign = extractFloatx80Sign( a ); bSig = extractFloatx80Frac( b ); bExp = extractFloatx80Exp( b ); bSign = extractFloatx80Sign( b ); zSign = aSign ^ bSign; if ( aExp == 0x7FFF ) { if ( (bits64) ( aSig<<1 ) || ( ( bExp == 0x7FFF ) && (bits64) ( bSig<<1 ) ) ) { return propagateFloatx80NaN( a, b ); } if ( ( bExp | bSig ) == 0 ) goto invalid; return packFloatx80( zSign, 0x7FFF, LIT64( 0x8000000000000000 ) ); } if ( bExp == 0x7FFF ) { if ( (bits64) ( bSig<<1 ) ) return propagateFloatx80NaN( a, b ); if ( ( aExp | aSig ) == 0 ) { invalid: roundData->exception |= float_flag_invalid; z.low = floatx80_default_nan_low; z.high = floatx80_default_nan_high; z.__padding = 0; return z; } return packFloatx80( zSign, 0x7FFF, LIT64( 0x8000000000000000 ) ); } if ( aExp == 0 ) { if ( aSig == 0 ) return packFloatx80( zSign, 0, 0 ); normalizeFloatx80Subnormal( aSig, &aExp, &aSig ); } if ( bExp == 0 ) { if ( bSig == 0 ) return packFloatx80( zSign, 0, 0 ); normalizeFloatx80Subnormal( bSig, &bExp, &bSig ); } zExp = aExp + bExp - 0x3FFE; mul64To128( aSig, bSig, &zSig0, &zSig1 ); if ( 0 < (sbits64) zSig0 ) { shortShift128Left( zSig0, zSig1, 1, &zSig0, &zSig1 ); --zExp; } return roundAndPackFloatx80( roundData, zSign, zExp, zSig0, zSig1 ); } /* ------------------------------------------------------------------------------- Returns the result of dividing the extended double-precision floating-point value `a' by the corresponding value `b'. The operation is performed according to the IEC/IEEE Standard for Binary Floating-point Arithmetic. ------------------------------------------------------------------------------- */ floatx80 floatx80_div( struct roundingData *roundData, floatx80 a, floatx80 b ) { flag aSign, bSign, zSign; int32 aExp, bExp, zExp; bits64 aSig, bSig, zSig0, zSig1; bits64 rem0, rem1, rem2, term0, term1, term2; floatx80 z; aSig = extractFloatx80Frac( a ); aExp = extractFloatx80Exp( a ); aSign = extractFloatx80Sign( a ); bSig = extractFloatx80Frac( b ); bExp = extractFloatx80Exp( b ); bSign = extractFloatx80Sign( b ); zSign = aSign ^ bSign; if ( aExp == 0x7FFF ) { if ( (bits64) ( aSig<<1 ) ) return propagateFloatx80NaN( a, b ); if ( bExp == 0x7FFF ) { if ( (bits64) ( bSig<<1 ) ) return propagateFloatx80NaN( a, b ); goto invalid; } return packFloatx80( zSign, 0x7FFF, LIT64( 0x8000000000000000 ) ); } if ( bExp == 0x7FFF ) { if ( (bits64) ( bSig<<1 ) ) return propagateFloatx80NaN( a, b ); return packFloatx80( zSign, 0, 0 ); } if ( bExp == 0 ) { if ( bSig == 0 ) { if ( ( aExp | aSig ) == 0 ) { invalid: roundData->exception |= float_flag_invalid; z.low = floatx80_default_nan_low; z.high = floatx80_default_nan_high; z.__padding = 0; return z; } roundData->exception |= float_flag_divbyzero; return packFloatx80( zSign, 0x7FFF, LIT64( 0x8000000000000000 ) ); } normalizeFloatx80Subnormal( bSig, &bExp, &bSig ); } if ( aExp == 0 ) { if ( aSig == 0 ) return packFloatx80( zSign, 0, 0 ); normalizeFloatx80Subnormal( aSig, &aExp, &aSig ); } zExp = aExp - bExp + 0x3FFE; rem1 = 0; if ( bSig <= aSig ) { shift128Right( aSig, 0, 1, &aSig, &rem1 ); ++zExp; } zSig0 = estimateDiv128To64( aSig, rem1, bSig ); mul64To128( bSig, zSig0, &term0, &term1 ); sub128( aSig, rem1, term0, term1, &rem0, &rem1 ); while ( (sbits64) rem0 < 0 ) { --zSig0; add128( rem0, rem1, 0, bSig, &rem0, &rem1 ); } zSig1 = estimateDiv128To64( rem1, 0, bSig ); if ( (bits64) ( zSig1<<1 ) <= 8 ) { mul64To128( bSig, zSig1, &term1, &term2 ); sub128( rem1, 0, term1, term2, &rem1, &rem2 ); while ( (sbits64) rem1 < 0 ) { --zSig1; add128( rem1, rem2, 0, bSig, &rem1, &rem2 ); } zSig1 |= ( ( rem1 | rem2 ) != 0 ); } return roundAndPackFloatx80( roundData, zSign, zExp, zSig0, zSig1 ); } /* ------------------------------------------------------------------------------- Returns the remainder of the extended double-precision floating-point value `a' with respect to the corresponding value `b'. The operation is performed according to the IEC/IEEE Standard for Binary Floating-point Arithmetic. ------------------------------------------------------------------------------- */ floatx80 floatx80_rem( struct roundingData *roundData, floatx80 a, floatx80 b ) { flag aSign, bSign, zSign; int32 aExp, bExp, expDiff; bits64 aSig0, aSig1, bSig; bits64 q, term0, term1, alternateASig0, alternateASig1; floatx80 z; aSig0 = extractFloatx80Frac( a ); aExp = extractFloatx80Exp( a ); aSign = extractFloatx80Sign( a ); bSig = extractFloatx80Frac( b ); bExp = extractFloatx80Exp( b ); bSign = extractFloatx80Sign( b ); if ( aExp == 0x7FFF ) { if ( (bits64) ( aSig0<<1 ) || ( ( bExp == 0x7FFF ) && (bits64) ( bSig<<1 ) ) ) { return propagateFloatx80NaN( a, b ); } goto invalid; } if ( bExp == 0x7FFF ) { if ( (bits64) ( bSig<<1 ) ) return propagateFloatx80NaN( a, b ); return a; } if ( bExp == 0 ) { if ( bSig == 0 ) { invalid: roundData->exception |= float_flag_invalid; z.low = floatx80_default_nan_low; z.high = floatx80_default_nan_high; z.__padding = 0; return z; } normalizeFloatx80Subnormal( bSig, &bExp, &bSig ); } if ( aExp == 0 ) { if ( (bits64) ( aSig0<<1 ) == 0 ) return a; normalizeFloatx80Subnormal( aSig0, &aExp, &aSig0 ); } bSig |= LIT64( 0x8000000000000000 ); zSign = aSign; expDiff = aExp - bExp; aSig1 = 0; if ( expDiff < 0 ) { if ( expDiff < -1 ) return a; shift128Right( aSig0, 0, 1, &aSig0, &aSig1 ); expDiff = 0; } q = ( bSig <= aSig0 ); if ( q ) aSig0 -= bSig; expDiff -= 64; while ( 0 < expDiff ) { q = estimateDiv128To64( aSig0, aSig1, bSig ); q = ( 2 < q ) ? q - 2 : 0; mul64To128( bSig, q, &term0, &term1 ); sub128( aSig0, aSig1, term0, term1, &aSig0, &aSig1 ); shortShift128Left( aSig0, aSig1, 62, &aSig0, &aSig1 ); expDiff -= 62; } expDiff += 64; if ( 0 < expDiff ) { q = estimateDiv128To64( aSig0, aSig1, bSig ); q = ( 2 < q ) ? q - 2 : 0; q >>= 64 - expDiff; mul64To128( bSig, q<<( 64 - expDiff ), &term0, &term1 ); sub128( aSig0, aSig1, term0, term1, &aSig0, &aSig1 ); shortShift128Left( 0, bSig, 64 - expDiff, &term0, &term1 ); while ( le128( term0, term1, aSig0, aSig1 ) ) { ++q; sub128( aSig0, aSig1, term0, term1, &aSig0, &aSig1 ); } } else { term1 = 0; term0 = bSig; } sub128( term0, term1, aSig0, aSig1, &alternateASig0, &alternateASig1 ); if ( lt128( alternateASig0, alternateASig1, aSig0, aSig1 ) || ( eq128( alternateASig0, alternateASig1, aSig0, aSig1 ) && ( q & 1 ) ) ) { aSig0 = alternateASig0; aSig1 = alternateASig1; zSign = ! zSign; } return normalizeRoundAndPackFloatx80( roundData, zSign, bExp + expDiff, aSig0, aSig1 ); } /* ------------------------------------------------------------------------------- Returns the square root of the extended double-precision floating-point value `a'. The operation is performed according to the IEC/IEEE Standard for Binary Floating-point Arithmetic. ------------------------------------------------------------------------------- */ floatx80 floatx80_sqrt( struct roundingData *roundData, floatx80 a ) { flag aSign; int32 aExp, zExp; bits64 aSig0, aSig1, zSig0, zSig1; bits64 rem0, rem1, rem2, rem3, term0, term1, term2, term3; bits64 shiftedRem0, shiftedRem1; floatx80 z; aSig0 = extractFloatx80Frac( a ); aExp = extractFloatx80Exp( a ); aSign = extractFloatx80Sign( a ); if ( aExp == 0x7FFF ) { if ( (bits64) ( aSig0<<1 ) ) return propagateFloatx80NaN( a, a ); if ( ! aSign ) return a; goto invalid; } if ( aSign ) { if ( ( aExp | aSig0 ) == 0 ) return a; invalid: roundData->exception |= float_flag_invalid; z.low = floatx80_default_nan_low; z.high = floatx80_default_nan_high; z.__padding = 0; return z; } if ( aExp == 0 ) { if ( aSig0 == 0 ) return packFloatx80( 0, 0, 0 ); normalizeFloatx80Subnormal( aSig0, &aExp, &aSig0 ); } zExp = ( ( aExp - 0x3FFF )>>1 ) + 0x3FFF; zSig0 = estimateSqrt32( aExp, aSig0>>32 ); zSig0 <<= 31; aSig1 = 0; shift128Right( aSig0, 0, ( aExp & 1 ) + 2, &aSig0, &aSig1 ); zSig0 = estimateDiv128To64( aSig0, aSig1, zSig0 ) + zSig0 + 4; if ( 0 <= (sbits64) zSig0 ) zSig0 = LIT64( 0xFFFFFFFFFFFFFFFF ); shortShift128Left( aSig0, aSig1, 2, &aSig0, &aSig1 ); mul64To128( zSig0, zSig0, &term0, &term1 ); sub128( aSig0, aSig1, term0, term1, &rem0, &rem1 ); while ( (sbits64) rem0 < 0 ) { --zSig0; shortShift128Left( 0, zSig0, 1, &term0, &term1 ); term1 |= 1; add128( rem0, rem1, term0, term1, &rem0, &rem1 ); } shortShift128Left( rem0, rem1, 63, &shiftedRem0, &shiftedRem1 ); zSig1 = estimateDiv128To64( shiftedRem0, shiftedRem1, zSig0 ); if ( (bits64) ( zSig1<<1 ) <= 10 ) { if ( zSig1 == 0 ) zSig1 = 1; mul64To128( zSig0, zSig1, &term1, &term2 ); shortShift128Left( term1, term2, 1, &term1, &term2 ); sub128( rem1, 0, term1, term2, &rem1, &rem2 ); mul64To128( zSig1, zSig1, &term2, &term3 ); sub192( rem1, rem2, 0, 0, term2, term3, &rem1, &rem2, &rem3 ); while ( (sbits64) rem1 < 0 ) { --zSig1; shortShift192Left( 0, zSig0, zSig1, 1, &term1, &term2, &term3 ); term3 |= 1; add192( rem1, rem2, rem3, term1, term2, term3, &rem1, &rem2, &rem3 ); } zSig1 |= ( ( rem1 | rem2 | rem3 ) != 0 ); } return roundAndPackFloatx80( roundData, 0, zExp, zSig0, zSig1 ); } /* ------------------------------------------------------------------------------- Returns 1 if the extended double-precision floating-point value `a' is equal to the corresponding value `b', and 0 otherwise. The comparison is performed according to the IEC/IEEE Standard for Binary Floating-point Arithmetic. ------------------------------------------------------------------------------- */ flag floatx80_eq( floatx80 a, floatx80 b ) { if ( ( ( extractFloatx80Exp( a ) == 0x7FFF ) && (bits64) ( extractFloatx80Frac( a )<<1 ) ) || ( ( extractFloatx80Exp( b ) == 0x7FFF ) && (bits64) ( extractFloatx80Frac( b )<<1 ) ) ) { if ( floatx80_is_signaling_nan( a ) || floatx80_is_signaling_nan( b ) ) { float_raise( float_flag_invalid ); } return 0; } return ( a.low == b.low ) && ( ( a.high == b.high ) || ( ( a.low == 0 ) && ( (bits16) ( ( a.high | b.high )<<1 ) == 0 ) ) ); } /* ------------------------------------------------------------------------------- Returns 1 if the extended double-precision floating-point value `a' is less than or equal to the corresponding value `b', and 0 otherwise. The comparison is performed according to the IEC/IEEE Standard for Binary Floating-point Arithmetic. ------------------------------------------------------------------------------- */ flag floatx80_le( floatx80 a, floatx80 b ) { flag aSign, bSign; if ( ( ( extractFloatx80Exp( a ) == 0x7FFF ) && (bits64) ( extractFloatx80Frac( a )<<1 ) ) || ( ( extractFloatx80Exp( b ) == 0x7FFF ) && (bits64) ( extractFloatx80Frac( b )<<1 ) ) ) { float_raise( float_flag_invalid ); return 0; } aSign = extractFloatx80Sign( a ); bSign = extractFloatx80Sign( b ); if ( aSign != bSign ) { return aSign || ( ( ( (bits16) ( ( a.high | b.high )<<1 ) ) | a.low | b.low ) == 0 ); } return aSign ? le128( b.high, b.low, a.high, a.low ) : le128( a.high, a.low, b.high, b.low ); } /* ------------------------------------------------------------------------------- Returns 1 if the extended double-precision floating-point value `a' is less than the corresponding value `b', and 0 otherwise. The comparison is performed according to the IEC/IEEE Standard for Binary Floating-point Arithmetic. ------------------------------------------------------------------------------- */ flag floatx80_lt( floatx80 a, floatx80 b ) { flag aSign, bSign; if ( ( ( extractFloatx80Exp( a ) == 0x7FFF ) && (bits64) ( extractFloatx80Frac( a )<<1 ) ) || ( ( extractFloatx80Exp( b ) == 0x7FFF ) && (bits64) ( extractFloatx80Frac( b )<<1 ) ) ) { float_raise( float_flag_invalid ); return 0; } aSign = extractFloatx80Sign( a ); bSign = extractFloatx80Sign( b ); if ( aSign != bSign ) { return aSign && ( ( ( (bits16) ( ( a.high | b.high )<<1 ) ) | a.low | b.low ) != 0 ); } return aSign ? lt128( b.high, b.low, a.high, a.low ) : lt128( a.high, a.low, b.high, b.low ); } /* ------------------------------------------------------------------------------- Returns 1 if the extended double-precision floating-point value `a' is equal to the corresponding value `b', and 0 otherwise. The invalid exception is raised if either operand is a NaN. Otherwise, the comparison is performed according to the IEC/IEEE Standard for Binary Floating-point Arithmetic. ------------------------------------------------------------------------------- */ flag floatx80_eq_signaling( floatx80 a, floatx80 b ) { if ( ( ( extractFloatx80Exp( a ) == 0x7FFF ) && (bits64) ( extractFloatx80Frac( a )<<1 ) ) || ( ( extractFloatx80Exp( b ) == 0x7FFF ) && (bits64) ( extractFloatx80Frac( b )<<1 ) ) ) { float_raise( float_flag_invalid ); return 0; } return ( a.low == b.low ) && ( ( a.high == b.high ) || ( ( a.low == 0 ) && ( (bits16) ( ( a.high | b.high )<<1 ) == 0 ) ) ); } /* ------------------------------------------------------------------------------- Returns 1 if the extended double-precision floating-point value `a' is less than or equal to the corresponding value `b', and 0 otherwise. Quiet NaNs do not cause an exception. Otherwise, the comparison is performed according to the IEC/IEEE Standard for Binary Floating-point Arithmetic. ------------------------------------------------------------------------------- */ flag floatx80_le_quiet( floatx80 a, floatx80 b ) { flag aSign, bSign; if ( ( ( extractFloatx80Exp( a ) == 0x7FFF ) && (bits64) ( extractFloatx80Frac( a )<<1 ) ) || ( ( extractFloatx80Exp( b ) == 0x7FFF ) && (bits64) ( extractFloatx80Frac( b )<<1 ) ) ) { /* Do nothing, even if NaN as we're quiet */ return 0; } aSign = extractFloatx80Sign( a ); bSign = extractFloatx80Sign( b ); if ( aSign != bSign ) { return aSign || ( ( ( (bits16) ( ( a.high | b.high )<<1 ) ) | a.low | b.low ) == 0 ); } return aSign ? le128( b.high, b.low, a.high, a.low ) : le128( a.high, a.low, b.high, b.low ); } /* ------------------------------------------------------------------------------- Returns 1 if the extended double-precision floating-point value `a' is less than the corresponding value `b', and 0 otherwise. Quiet NaNs do not cause an exception. Otherwise, the comparison is performed according to the IEC/IEEE Standard for Binary Floating-point Arithmetic. ------------------------------------------------------------------------------- */ flag floatx80_lt_quiet( floatx80 a, floatx80 b ) { flag aSign, bSign; if ( ( ( extractFloatx80Exp( a ) == 0x7FFF ) && (bits64) ( extractFloatx80Frac( a )<<1 ) ) || ( ( extractFloatx80Exp( b ) == 0x7FFF ) && (bits64) ( extractFloatx80Frac( b )<<1 ) ) ) { /* Do nothing, even if NaN as we're quiet */ return 0; } aSign = extractFloatx80Sign( a ); bSign = extractFloatx80Sign( b ); if ( aSign != bSign ) { return aSign && ( ( ( (bits16) ( ( a.high | b.high )<<1 ) ) | a.low | b.low ) != 0 ); } return aSign ? lt128( b.high, b.low, a.high, a.low ) : lt128( a.high, a.low, b.high, b.low ); } #endif
linux-master
arch/arm/nwfpe/softfloat.c
// SPDX-License-Identifier: GPL-2.0-or-later /* NetWinder Floating Point Emulator (c) Rebel.COM, 1998,1999 (c) Philip Blundell, 2001 Direct questions, comments to Scott Bambrough <[email protected]> */ #include "fpa11.h" #include "softfloat.h" #include "fpopcode.h" float32 float32_exp(float32 Fm); float32 float32_ln(float32 Fm); float32 float32_sin(float32 rFm); float32 float32_cos(float32 rFm); float32 float32_arcsin(float32 rFm); float32 float32_arctan(float32 rFm); float32 float32_log(float32 rFm); float32 float32_tan(float32 rFm); float32 float32_arccos(float32 rFm); float32 float32_pow(float32 rFn, float32 rFm); float32 float32_pol(float32 rFn, float32 rFm); static float32 float32_rsf(struct roundingData *roundData, float32 rFn, float32 rFm) { return float32_sub(roundData, rFm, rFn); } static float32 float32_rdv(struct roundingData *roundData, float32 rFn, float32 rFm) { return float32_div(roundData, rFm, rFn); } static float32 (*const dyadic_single[16])(struct roundingData *, float32 rFn, float32 rFm) = { [ADF_CODE >> 20] = float32_add, [MUF_CODE >> 20] = float32_mul, [SUF_CODE >> 20] = float32_sub, [RSF_CODE >> 20] = float32_rsf, [DVF_CODE >> 20] = float32_div, [RDF_CODE >> 20] = float32_rdv, [RMF_CODE >> 20] = float32_rem, [FML_CODE >> 20] = float32_mul, [FDV_CODE >> 20] = float32_div, [FRD_CODE >> 20] = float32_rdv, }; static float32 float32_mvf(struct roundingData *roundData, float32 rFm) { return rFm; } static float32 float32_mnf(struct roundingData *roundData, float32 rFm) { return rFm ^ 0x80000000; } static float32 float32_abs(struct roundingData *roundData, float32 rFm) { return rFm & 0x7fffffff; } static float32 (*const monadic_single[16])(struct roundingData*, float32 rFm) = { [MVF_CODE >> 20] = float32_mvf, [MNF_CODE >> 20] = float32_mnf, [ABS_CODE >> 20] = float32_abs, [RND_CODE >> 20] = float32_round_to_int, [URD_CODE >> 20] = float32_round_to_int, [SQT_CODE >> 20] = float32_sqrt, [NRM_CODE >> 20] = float32_mvf, }; unsigned int SingleCPDO(struct roundingData *roundData, const unsigned int opcode, FPREG * rFd) { FPA11 *fpa11 = GET_FPA11(); float32 rFm; unsigned int Fm, opc_mask_shift; Fm = getFm(opcode); if (CONSTANT_FM(opcode)) { rFm = getSingleConstant(Fm); } else if (fpa11->fType[Fm] == typeSingle) { rFm = fpa11->fpreg[Fm].fSingle; } else { return 0; } opc_mask_shift = (opcode & MASK_ARITHMETIC_OPCODE) >> 20; if (!MONADIC_INSTRUCTION(opcode)) { unsigned int Fn = getFn(opcode); float32 rFn; if (fpa11->fType[Fn] == typeSingle && dyadic_single[opc_mask_shift]) { rFn = fpa11->fpreg[Fn].fSingle; rFd->fSingle = dyadic_single[opc_mask_shift](roundData, rFn, rFm); } else { return 0; } } else { if (monadic_single[opc_mask_shift]) { rFd->fSingle = monadic_single[opc_mask_shift](roundData, rFm); } else { return 0; } } return 1; }
linux-master
arch/arm/nwfpe/single_cpdo.c
// SPDX-License-Identifier: GPL-2.0-or-later /* NetWinder Floating Point Emulator (c) Rebel.COM, 1998,1999 (c) Philip Blundell, 2001 Direct questions, comments to Scott Bambrough <[email protected]> */ #include "fpa11.h" #include "fpopcode.h" #include "fpmodule.h" #include "fpmodule.inl" #include <linux/compiler.h> #include <linux/string.h> /* Reset the FPA11 chip. Called to initialize and reset the emulator. */ static void resetFPA11(void) { int i; FPA11 *fpa11 = GET_FPA11(); /* initialize the register type array */ for (i = 0; i <= 7; i++) { fpa11->fType[i] = typeNone; } /* FPSR: set system id to FP_EMULATOR, set AC, clear all other bits */ fpa11->fpsr = FP_EMULATOR | BIT_AC; } int8 SetRoundingMode(const unsigned int opcode) { switch (opcode & MASK_ROUNDING_MODE) { default: case ROUND_TO_NEAREST: return float_round_nearest_even; case ROUND_TO_PLUS_INFINITY: return float_round_up; case ROUND_TO_MINUS_INFINITY: return float_round_down; case ROUND_TO_ZERO: return float_round_to_zero; } } int8 SetRoundingPrecision(const unsigned int opcode) { #ifdef CONFIG_FPE_NWFPE_XP switch (opcode & MASK_ROUNDING_PRECISION) { case ROUND_SINGLE: return 32; case ROUND_DOUBLE: return 64; case ROUND_EXTENDED: return 80; default: return 80; } #endif return 80; } void nwfpe_init_fpa(union fp_state *fp) { FPA11 *fpa11 = (FPA11 *)fp; #ifdef NWFPE_DEBUG printk("NWFPE: setting up state.\n"); #endif memset(fpa11, 0, sizeof(FPA11)); resetFPA11(); fpa11->initflag = 1; } /* Emulate the instruction in the opcode. */ unsigned int EmulateAll(unsigned int opcode) { unsigned int code; #ifdef NWFPE_DEBUG printk("NWFPE: emulating opcode %08x\n", opcode); #endif code = opcode & 0x00000f00; if (code == 0x00000100 || code == 0x00000200) { /* For coprocessor 1 or 2 (FPA11) */ code = opcode & 0x0e000000; if (code == 0x0e000000) { if (opcode & 0x00000010) { /* Emulate conversion opcodes. */ /* Emulate register transfer opcodes. */ /* Emulate comparison opcodes. */ return EmulateCPRT(opcode); } else { /* Emulate monadic arithmetic opcodes. */ /* Emulate dyadic arithmetic opcodes. */ return EmulateCPDO(opcode); } } else if (code == 0x0c000000) { /* Emulate load/store opcodes. */ /* Emulate load/store multiple opcodes. */ return EmulateCPDT(opcode); } } /* Invalid instruction detected. Return FALSE. */ return 0; }
linux-master
arch/arm/nwfpe/fpa11.c
// SPDX-License-Identifier: GPL-2.0-or-later /* NetWinder Floating Point Emulator (c) Rebel.com, 1998-1999 (c) Philip Blundell, 1998, 2001 Direct questions, comments to Scott Bambrough <[email protected]> */ #include "fpa11.h" #include "softfloat.h" #include "fpopcode.h" #include "fpmodule.h" #include "fpmodule.inl" #include <linux/uaccess.h> static inline void loadSingle(const unsigned int Fn, const unsigned int __user *pMem) { FPA11 *fpa11 = GET_FPA11(); fpa11->fType[Fn] = typeSingle; get_user(fpa11->fpreg[Fn].fSingle, pMem); } static inline void loadDouble(const unsigned int Fn, const unsigned int __user *pMem) { FPA11 *fpa11 = GET_FPA11(); unsigned int *p; p = (unsigned int *) &fpa11->fpreg[Fn].fDouble; fpa11->fType[Fn] = typeDouble; #ifdef __ARMEB__ get_user(p[0], &pMem[0]); /* sign & exponent */ get_user(p[1], &pMem[1]); #else get_user(p[0], &pMem[1]); get_user(p[1], &pMem[0]); /* sign & exponent */ #endif } #ifdef CONFIG_FPE_NWFPE_XP static inline void loadExtended(const unsigned int Fn, const unsigned int __user *pMem) { FPA11 *fpa11 = GET_FPA11(); unsigned int *p; p = (unsigned int *) &fpa11->fpreg[Fn].fExtended; fpa11->fType[Fn] = typeExtended; get_user(p[0], &pMem[0]); /* sign & exponent */ #ifdef __ARMEB__ get_user(p[1], &pMem[1]); /* ms bits */ get_user(p[2], &pMem[2]); /* ls bits */ #else get_user(p[1], &pMem[2]); /* ls bits */ get_user(p[2], &pMem[1]); /* ms bits */ #endif } #endif static inline void loadMultiple(const unsigned int Fn, const unsigned int __user *pMem) { FPA11 *fpa11 = GET_FPA11(); register unsigned int *p; unsigned long x; p = (unsigned int *) &(fpa11->fpreg[Fn]); get_user(x, &pMem[0]); fpa11->fType[Fn] = (x >> 14) & 0x00000003; switch (fpa11->fType[Fn]) { case typeSingle: case typeDouble: { get_user(p[0], &pMem[2]); /* Single */ get_user(p[1], &pMem[1]); /* double msw */ p[2] = 0; /* empty */ } break; #ifdef CONFIG_FPE_NWFPE_XP case typeExtended: { get_user(p[1], &pMem[2]); get_user(p[2], &pMem[1]); /* msw */ p[0] = (x & 0x80003fff); } break; #endif } } static inline void storeSingle(struct roundingData *roundData, const unsigned int Fn, unsigned int __user *pMem) { FPA11 *fpa11 = GET_FPA11(); union { float32 f; unsigned int i[1]; } val; switch (fpa11->fType[Fn]) { case typeDouble: val.f = float64_to_float32(roundData, fpa11->fpreg[Fn].fDouble); break; #ifdef CONFIG_FPE_NWFPE_XP case typeExtended: val.f = floatx80_to_float32(roundData, fpa11->fpreg[Fn].fExtended); break; #endif default: val.f = fpa11->fpreg[Fn].fSingle; } put_user(val.i[0], pMem); } static inline void storeDouble(struct roundingData *roundData, const unsigned int Fn, unsigned int __user *pMem) { FPA11 *fpa11 = GET_FPA11(); union { float64 f; unsigned int i[2]; } val; switch (fpa11->fType[Fn]) { case typeSingle: val.f = float32_to_float64(fpa11->fpreg[Fn].fSingle); break; #ifdef CONFIG_FPE_NWFPE_XP case typeExtended: val.f = floatx80_to_float64(roundData, fpa11->fpreg[Fn].fExtended); break; #endif default: val.f = fpa11->fpreg[Fn].fDouble; } #ifdef __ARMEB__ put_user(val.i[0], &pMem[0]); /* msw */ put_user(val.i[1], &pMem[1]); /* lsw */ #else put_user(val.i[1], &pMem[0]); /* msw */ put_user(val.i[0], &pMem[1]); /* lsw */ #endif } #ifdef CONFIG_FPE_NWFPE_XP static inline void storeExtended(const unsigned int Fn, unsigned int __user *pMem) { FPA11 *fpa11 = GET_FPA11(); union { floatx80 f; unsigned int i[3]; } val; switch (fpa11->fType[Fn]) { case typeSingle: val.f = float32_to_floatx80(fpa11->fpreg[Fn].fSingle); break; case typeDouble: val.f = float64_to_floatx80(fpa11->fpreg[Fn].fDouble); break; default: val.f = fpa11->fpreg[Fn].fExtended; } put_user(val.i[0], &pMem[0]); /* sign & exp */ #ifdef __ARMEB__ put_user(val.i[1], &pMem[1]); /* msw */ put_user(val.i[2], &pMem[2]); #else put_user(val.i[1], &pMem[2]); put_user(val.i[2], &pMem[1]); /* msw */ #endif } #endif static inline void storeMultiple(const unsigned int Fn, unsigned int __user *pMem) { FPA11 *fpa11 = GET_FPA11(); register unsigned int nType, *p; p = (unsigned int *) &(fpa11->fpreg[Fn]); nType = fpa11->fType[Fn]; switch (nType) { case typeSingle: case typeDouble: { put_user(p[0], &pMem[2]); /* single */ put_user(p[1], &pMem[1]); /* double msw */ put_user(nType << 14, &pMem[0]); } break; #ifdef CONFIG_FPE_NWFPE_XP case typeExtended: { put_user(p[2], &pMem[1]); /* msw */ put_user(p[1], &pMem[2]); put_user((p[0] & 0x80003fff) | (nType << 14), &pMem[0]); } break; #endif } } unsigned int PerformLDF(const unsigned int opcode) { unsigned int __user *pBase, *pAddress, *pFinal; unsigned int nRc = 1, write_back = WRITE_BACK(opcode); pBase = (unsigned int __user *) readRegister(getRn(opcode)); if (REG_PC == getRn(opcode)) { pBase += 2; write_back = 0; } pFinal = pBase; if (BIT_UP_SET(opcode)) pFinal += getOffset(opcode); else pFinal -= getOffset(opcode); if (PREINDEXED(opcode)) pAddress = pFinal; else pAddress = pBase; switch (opcode & MASK_TRANSFER_LENGTH) { case TRANSFER_SINGLE: loadSingle(getFd(opcode), pAddress); break; case TRANSFER_DOUBLE: loadDouble(getFd(opcode), pAddress); break; #ifdef CONFIG_FPE_NWFPE_XP case TRANSFER_EXTENDED: loadExtended(getFd(opcode), pAddress); break; #endif default: nRc = 0; } if (write_back) writeRegister(getRn(opcode), (unsigned long) pFinal); return nRc; } unsigned int PerformSTF(const unsigned int opcode) { unsigned int __user *pBase, *pAddress, *pFinal; unsigned int nRc = 1, write_back = WRITE_BACK(opcode); struct roundingData roundData; roundData.mode = SetRoundingMode(opcode); roundData.precision = SetRoundingPrecision(opcode); roundData.exception = 0; pBase = (unsigned int __user *) readRegister(getRn(opcode)); if (REG_PC == getRn(opcode)) { pBase += 2; write_back = 0; } pFinal = pBase; if (BIT_UP_SET(opcode)) pFinal += getOffset(opcode); else pFinal -= getOffset(opcode); if (PREINDEXED(opcode)) pAddress = pFinal; else pAddress = pBase; switch (opcode & MASK_TRANSFER_LENGTH) { case TRANSFER_SINGLE: storeSingle(&roundData, getFd(opcode), pAddress); break; case TRANSFER_DOUBLE: storeDouble(&roundData, getFd(opcode), pAddress); break; #ifdef CONFIG_FPE_NWFPE_XP case TRANSFER_EXTENDED: storeExtended(getFd(opcode), pAddress); break; #endif default: nRc = 0; } if (roundData.exception) float_raise(roundData.exception); if (write_back) writeRegister(getRn(opcode), (unsigned long) pFinal); return nRc; } unsigned int PerformLFM(const unsigned int opcode) { unsigned int __user *pBase, *pAddress, *pFinal; unsigned int i, Fd, write_back = WRITE_BACK(opcode); pBase = (unsigned int __user *) readRegister(getRn(opcode)); if (REG_PC == getRn(opcode)) { pBase += 2; write_back = 0; } pFinal = pBase; if (BIT_UP_SET(opcode)) pFinal += getOffset(opcode); else pFinal -= getOffset(opcode); if (PREINDEXED(opcode)) pAddress = pFinal; else pAddress = pBase; Fd = getFd(opcode); for (i = getRegisterCount(opcode); i > 0; i--) { loadMultiple(Fd, pAddress); pAddress += 3; Fd++; if (Fd == 8) Fd = 0; } if (write_back) writeRegister(getRn(opcode), (unsigned long) pFinal); return 1; } unsigned int PerformSFM(const unsigned int opcode) { unsigned int __user *pBase, *pAddress, *pFinal; unsigned int i, Fd, write_back = WRITE_BACK(opcode); pBase = (unsigned int __user *) readRegister(getRn(opcode)); if (REG_PC == getRn(opcode)) { pBase += 2; write_back = 0; } pFinal = pBase; if (BIT_UP_SET(opcode)) pFinal += getOffset(opcode); else pFinal -= getOffset(opcode); if (PREINDEXED(opcode)) pAddress = pFinal; else pAddress = pBase; Fd = getFd(opcode); for (i = getRegisterCount(opcode); i > 0; i--) { storeMultiple(Fd, pAddress); pAddress += 3; Fd++; if (Fd == 8) Fd = 0; } if (write_back) writeRegister(getRn(opcode), (unsigned long) pFinal); return 1; } unsigned int EmulateCPDT(const unsigned int opcode) { unsigned int nRc = 0; if (LDF_OP(opcode)) { nRc = PerformLDF(opcode); } else if (LFM_OP(opcode)) { nRc = PerformLFM(opcode); } else if (STF_OP(opcode)) { nRc = PerformSTF(opcode); } else if (SFM_OP(opcode)) { nRc = PerformSFM(opcode); } else { nRc = 0; } return nRc; }
linux-master
arch/arm/nwfpe/fpa11_cpdt.c
// SPDX-License-Identifier: GPL-2.0-or-later /* NetWinder Floating Point Emulator (c) Rebel.COM, 1998,1999 Direct questions, comments to Scott Bambrough <[email protected]> */ #include "fpa11.h" #include "softfloat.h" #include "fpopcode.h" floatx80 floatx80_exp(floatx80 Fm); floatx80 floatx80_ln(floatx80 Fm); floatx80 floatx80_sin(floatx80 rFm); floatx80 floatx80_cos(floatx80 rFm); floatx80 floatx80_arcsin(floatx80 rFm); floatx80 floatx80_arctan(floatx80 rFm); floatx80 floatx80_log(floatx80 rFm); floatx80 floatx80_tan(floatx80 rFm); floatx80 floatx80_arccos(floatx80 rFm); floatx80 floatx80_pow(floatx80 rFn, floatx80 rFm); floatx80 floatx80_pol(floatx80 rFn, floatx80 rFm); static floatx80 floatx80_rsf(struct roundingData *roundData, floatx80 rFn, floatx80 rFm) { return floatx80_sub(roundData, rFm, rFn); } static floatx80 floatx80_rdv(struct roundingData *roundData, floatx80 rFn, floatx80 rFm) { return floatx80_div(roundData, rFm, rFn); } static floatx80 (*const dyadic_extended[16])(struct roundingData*, floatx80 rFn, floatx80 rFm) = { [ADF_CODE >> 20] = floatx80_add, [MUF_CODE >> 20] = floatx80_mul, [SUF_CODE >> 20] = floatx80_sub, [RSF_CODE >> 20] = floatx80_rsf, [DVF_CODE >> 20] = floatx80_div, [RDF_CODE >> 20] = floatx80_rdv, [RMF_CODE >> 20] = floatx80_rem, /* strictly, these opcodes should not be implemented */ [FML_CODE >> 20] = floatx80_mul, [FDV_CODE >> 20] = floatx80_div, [FRD_CODE >> 20] = floatx80_rdv, }; static floatx80 floatx80_mvf(struct roundingData *roundData, floatx80 rFm) { return rFm; } static floatx80 floatx80_mnf(struct roundingData *roundData, floatx80 rFm) { rFm.high ^= 0x8000; return rFm; } static floatx80 floatx80_abs(struct roundingData *roundData, floatx80 rFm) { rFm.high &= 0x7fff; return rFm; } static floatx80 (*const monadic_extended[16])(struct roundingData*, floatx80 rFm) = { [MVF_CODE >> 20] = floatx80_mvf, [MNF_CODE >> 20] = floatx80_mnf, [ABS_CODE >> 20] = floatx80_abs, [RND_CODE >> 20] = floatx80_round_to_int, [URD_CODE >> 20] = floatx80_round_to_int, [SQT_CODE >> 20] = floatx80_sqrt, [NRM_CODE >> 20] = floatx80_mvf, }; unsigned int ExtendedCPDO(struct roundingData *roundData, const unsigned int opcode, FPREG * rFd) { FPA11 *fpa11 = GET_FPA11(); floatx80 rFm; unsigned int Fm, opc_mask_shift; Fm = getFm(opcode); if (CONSTANT_FM(opcode)) { rFm = getExtendedConstant(Fm); } else { switch (fpa11->fType[Fm]) { case typeSingle: rFm = float32_to_floatx80(fpa11->fpreg[Fm].fSingle); break; case typeDouble: rFm = float64_to_floatx80(fpa11->fpreg[Fm].fDouble); break; case typeExtended: rFm = fpa11->fpreg[Fm].fExtended; break; default: return 0; } } opc_mask_shift = (opcode & MASK_ARITHMETIC_OPCODE) >> 20; if (!MONADIC_INSTRUCTION(opcode)) { unsigned int Fn = getFn(opcode); floatx80 rFn; switch (fpa11->fType[Fn]) { case typeSingle: rFn = float32_to_floatx80(fpa11->fpreg[Fn].fSingle); break; case typeDouble: rFn = float64_to_floatx80(fpa11->fpreg[Fn].fDouble); break; case typeExtended: rFn = fpa11->fpreg[Fn].fExtended; break; default: return 0; } if (dyadic_extended[opc_mask_shift]) { rFd->fExtended = dyadic_extended[opc_mask_shift](roundData, rFn, rFm); } else { return 0; } } else { if (monadic_extended[opc_mask_shift]) { rFd->fExtended = monadic_extended[opc_mask_shift](roundData, rFm); } else { return 0; } } return 1; }
linux-master
arch/arm/nwfpe/extended_cpdo.c
// SPDX-License-Identifier: GPL-2.0-or-later /* NetWinder Floating Point Emulator (c) Rebel.COM, 1998,1999 Direct questions, comments to Scott Bambrough <[email protected]> */ #include "fpa11.h" #include "softfloat.h" #include "fpopcode.h" #include "fpsr.h" #include "fpmodule.h" #include "fpmodule.inl" #ifdef CONFIG_FPE_NWFPE_XP const floatx80 floatx80Constant[] = { { .high = 0x0000, .low = 0x0000000000000000ULL},/* extended 0.0 */ { .high = 0x3fff, .low = 0x8000000000000000ULL},/* extended 1.0 */ { .high = 0x4000, .low = 0x8000000000000000ULL},/* extended 2.0 */ { .high = 0x4000, .low = 0xc000000000000000ULL},/* extended 3.0 */ { .high = 0x4001, .low = 0x8000000000000000ULL},/* extended 4.0 */ { .high = 0x4001, .low = 0xa000000000000000ULL},/* extended 5.0 */ { .high = 0x3ffe, .low = 0x8000000000000000ULL},/* extended 0.5 */ { .high = 0x4002, .low = 0xa000000000000000ULL},/* extended 10.0 */ }; #endif const float64 float64Constant[] = { 0x0000000000000000ULL, /* double 0.0 */ 0x3ff0000000000000ULL, /* double 1.0 */ 0x4000000000000000ULL, /* double 2.0 */ 0x4008000000000000ULL, /* double 3.0 */ 0x4010000000000000ULL, /* double 4.0 */ 0x4014000000000000ULL, /* double 5.0 */ 0x3fe0000000000000ULL, /* double 0.5 */ 0x4024000000000000ULL /* double 10.0 */ }; const float32 float32Constant[] = { 0x00000000, /* single 0.0 */ 0x3f800000, /* single 1.0 */ 0x40000000, /* single 2.0 */ 0x40400000, /* single 3.0 */ 0x40800000, /* single 4.0 */ 0x40a00000, /* single 5.0 */ 0x3f000000, /* single 0.5 */ 0x41200000 /* single 10.0 */ };
linux-master
arch/arm/nwfpe/fpopcode.c
// SPDX-License-Identifier: GPL-2.0-or-later /* NetWinder Floating Point Emulator (c) Rebel.com, 1998-1999 (c) Philip Blundell, 1998-1999 Direct questions, comments to Scott Bambrough <[email protected]> */ #include "fpa11.h" #include <linux/module.h> #include <linux/moduleparam.h> /* XXX */ #include <linux/errno.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/signal.h> #include <linux/sched/signal.h> #include <linux/init.h> #include <asm/thread_notify.h> #include "softfloat.h" #include "fpopcode.h" #include "fpmodule.h" #include "fpa11.inl" /* kernel symbols required for signal handling */ #ifdef CONFIG_FPE_NWFPE_XP #define NWFPE_BITS "extended" #else #define NWFPE_BITS "double" #endif #ifdef MODULE void fp_send_sig(unsigned long sig, struct task_struct *p, int priv); #else #define fp_send_sig send_sig #define kern_fp_enter fp_enter extern char fpe_type[]; #endif static int nwfpe_notify(struct notifier_block *self, unsigned long cmd, void *v) { struct thread_info *thread = v; if (cmd == THREAD_NOTIFY_FLUSH) nwfpe_init_fpa(&thread->fpstate); return NOTIFY_DONE; } static struct notifier_block nwfpe_notifier_block = { .notifier_call = nwfpe_notify, }; /* kernel function prototypes required */ void fp_setup(void); /* external declarations for saved kernel symbols */ extern void (*kern_fp_enter)(void); /* Original value of fp_enter from kernel before patched by fpe_init. */ static void (*orig_fp_enter)(void); /* forward declarations */ extern void nwfpe_enter(void); static int __init fpe_init(void) { if (sizeof(FPA11) > sizeof(union fp_state)) { pr_err("nwfpe: bad structure size\n"); return -EINVAL; } if (sizeof(FPREG) != 12) { pr_err("nwfpe: bad register size\n"); return -EINVAL; } if (fpe_type[0] && strcmp(fpe_type, "nwfpe")) return 0; /* Display title, version and copyright information. */ pr_info("NetWinder Floating Point Emulator V0.97 (" NWFPE_BITS " precision)\n"); thread_register_notifier(&nwfpe_notifier_block); /* Save pointer to the old FP handler and then patch ourselves in */ orig_fp_enter = kern_fp_enter; kern_fp_enter = nwfpe_enter; return 0; } static void __exit fpe_exit(void) { thread_unregister_notifier(&nwfpe_notifier_block); /* Restore the values we saved earlier. */ kern_fp_enter = orig_fp_enter; } /* ScottB: November 4, 1998 Moved this function out of softfloat-specialize into fpmodule.c. This effectively isolates all the changes required for integrating with the Linux kernel into fpmodule.c. Porting to NetBSD should only require modifying fpmodule.c to integrate with the NetBSD kernel (I hope!). [1/1/99: Not quite true any more unfortunately. There is Linux-specific code to access data in user space in some other source files at the moment (grep for get_user / put_user calls). --philb] This function is called by the SoftFloat routines to raise a floating point exception. We check the trap enable byte in the FPSR, and raise a SIGFPE exception if necessary. If not the relevant bits in the cumulative exceptions flag byte are set and we return. */ #ifdef CONFIG_DEBUG_USER /* By default, ignore inexact errors as there are far too many of them to log */ static int debug = ~BIT_IXC; #endif void float_raise(signed char flags) { register unsigned int fpsr, cumulativeTraps; #ifdef CONFIG_DEBUG_USER if (flags & debug) printk(KERN_DEBUG "NWFPE: %s[%d] takes exception %08x at %ps from %08lx\n", current->comm, current->pid, flags, __builtin_return_address(0), GET_USERREG()->ARM_pc); #endif /* Read fpsr and initialize the cumulativeTraps. */ fpsr = readFPSR(); cumulativeTraps = 0; /* For each type of exception, the cumulative trap exception bit is only set if the corresponding trap enable bit is not set. */ if ((!(fpsr & BIT_IXE)) && (flags & BIT_IXC)) cumulativeTraps |= BIT_IXC; if ((!(fpsr & BIT_UFE)) && (flags & BIT_UFC)) cumulativeTraps |= BIT_UFC; if ((!(fpsr & BIT_OFE)) && (flags & BIT_OFC)) cumulativeTraps |= BIT_OFC; if ((!(fpsr & BIT_DZE)) && (flags & BIT_DZC)) cumulativeTraps |= BIT_DZC; if ((!(fpsr & BIT_IOE)) && (flags & BIT_IOC)) cumulativeTraps |= BIT_IOC; /* Set the cumulative exceptions flags. */ if (cumulativeTraps) writeFPSR(fpsr | cumulativeTraps); /* Raise an exception if necessary. */ if (fpsr & (flags << 16)) fp_send_sig(SIGFPE, current, 1); } module_init(fpe_init); module_exit(fpe_exit); MODULE_AUTHOR("Scott Bambrough <[email protected]>"); MODULE_DESCRIPTION("NWFPE floating point emulator (" NWFPE_BITS " precision)"); MODULE_LICENSE("GPL"); #ifdef CONFIG_DEBUG_USER module_param(debug, int, 0644); #endif
linux-master
arch/arm/nwfpe/fpmodule.c
// SPDX-License-Identifier: GPL-2.0 /* * Device Tree support for Allwinner A1X SoCs * * Copyright (C) 2012 Maxime Ripard * * Maxime Ripard <[email protected]> * */ #include <linux/clocksource.h> #include <linux/init.h> #include <linux/of_clk.h> #include <linux/platform_device.h> #include <linux/reset/sunxi.h> #include <asm/mach/arch.h> #include <asm/secure_cntvoff.h> static const char * const sunxi_board_dt_compat[] = { "allwinner,sun4i-a10", "allwinner,sun5i-a10s", "allwinner,sun5i-a13", "allwinner,sun5i-r8", "nextthing,gr8", NULL, }; DT_MACHINE_START(SUNXI_DT, "Allwinner sun4i/sun5i Families") .dt_compat = sunxi_board_dt_compat, MACHINE_END static const char * const sun6i_board_dt_compat[] = { "allwinner,sun6i-a31", "allwinner,sun6i-a31s", NULL, }; static void __init sun6i_timer_init(void) { of_clk_init(NULL); if (IS_ENABLED(CONFIG_RESET_CONTROLLER)) sun6i_reset_init(); timer_probe(); } DT_MACHINE_START(SUN6I_DT, "Allwinner sun6i (A31) Family") .init_time = sun6i_timer_init, .dt_compat = sun6i_board_dt_compat, MACHINE_END static const char * const sun7i_board_dt_compat[] = { "allwinner,sun7i-a20", NULL, }; DT_MACHINE_START(SUN7I_DT, "Allwinner sun7i (A20) Family") .dt_compat = sun7i_board_dt_compat, MACHINE_END static const char * const sun8i_board_dt_compat[] = { "allwinner,sun8i-a23", "allwinner,sun8i-a33", "allwinner,sun8i-h2-plus", "allwinner,sun8i-h3", "allwinner,sun8i-r40", "allwinner,sun8i-v3", "allwinner,sun8i-v3s", NULL, }; DT_MACHINE_START(SUN8I_DT, "Allwinner sun8i Family") .init_time = sun6i_timer_init, .dt_compat = sun8i_board_dt_compat, MACHINE_END static void __init sun8i_a83t_cntvoff_init(void) { #ifdef CONFIG_SMP secure_cntvoff_init(); #endif } static const char * const sun8i_a83t_cntvoff_board_dt_compat[] = { "allwinner,sun8i-a83t", NULL, }; DT_MACHINE_START(SUN8I_A83T_CNTVOFF_DT, "Allwinner A83t board") .init_early = sun8i_a83t_cntvoff_init, .init_time = sun6i_timer_init, .dt_compat = sun8i_a83t_cntvoff_board_dt_compat, MACHINE_END static const char * const sun9i_board_dt_compat[] = { "allwinner,sun9i-a80", NULL, }; DT_MACHINE_START(SUN9I_DT, "Allwinner sun9i Family") .dt_compat = sun9i_board_dt_compat, MACHINE_END static const char * const suniv_board_dt_compat[] = { "allwinner,suniv-f1c100s", NULL, }; DT_MACHINE_START(SUNIV_DT, "Allwinner suniv Family") .dt_compat = suniv_board_dt_compat, MACHINE_END
linux-master
arch/arm/mach-sunxi/sunxi.c
// SPDX-License-Identifier: GPL-2.0 /* * SMP support for Allwinner SoCs * * Copyright (C) 2013 Maxime Ripard * * Maxime Ripard <[email protected]> * * Based on code * Copyright (C) 2012-2013 Allwinner Ltd. * */ #include <linux/delay.h> #include <linux/init.h> #include <linux/io.h> #include <linux/memory.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/smp.h> #define CPUCFG_CPU_PWR_CLAMP_STATUS_REG(cpu) ((cpu) * 0x40 + 0x64) #define CPUCFG_CPU_RST_CTRL_REG(cpu) (((cpu) + 1) * 0x40) #define CPUCFG_CPU_CTRL_REG(cpu) (((cpu) + 1) * 0x40 + 0x04) #define CPUCFG_CPU_STATUS_REG(cpu) (((cpu) + 1) * 0x40 + 0x08) #define CPUCFG_GEN_CTRL_REG 0x184 #define CPUCFG_PRIVATE0_REG 0x1a4 #define CPUCFG_PRIVATE1_REG 0x1a8 #define CPUCFG_DBG_CTL0_REG 0x1e0 #define CPUCFG_DBG_CTL1_REG 0x1e4 #define PRCM_CPU_PWROFF_REG 0x100 #define PRCM_CPU_PWR_CLAMP_REG(cpu) (((cpu) * 4) + 0x140) static void __iomem *cpucfg_membase; static void __iomem *prcm_membase; static DEFINE_SPINLOCK(cpu_lock); static void __init sun6i_smp_prepare_cpus(unsigned int max_cpus) { struct device_node *node; node = of_find_compatible_node(NULL, NULL, "allwinner,sun6i-a31-prcm"); if (!node) { pr_err("Missing A31 PRCM node in the device tree\n"); return; } prcm_membase = of_iomap(node, 0); of_node_put(node); if (!prcm_membase) { pr_err("Couldn't map A31 PRCM registers\n"); return; } node = of_find_compatible_node(NULL, NULL, "allwinner,sun6i-a31-cpuconfig"); if (!node) { pr_err("Missing A31 CPU config node in the device tree\n"); return; } cpucfg_membase = of_iomap(node, 0); of_node_put(node); if (!cpucfg_membase) pr_err("Couldn't map A31 CPU config registers\n"); } static int sun6i_smp_boot_secondary(unsigned int cpu, struct task_struct *idle) { u32 reg; int i; if (!(prcm_membase && cpucfg_membase)) return -EFAULT; spin_lock(&cpu_lock); /* Set CPU boot address */ writel(__pa_symbol(secondary_startup), cpucfg_membase + CPUCFG_PRIVATE0_REG); /* Assert the CPU core in reset */ writel(0, cpucfg_membase + CPUCFG_CPU_RST_CTRL_REG(cpu)); /* Assert the L1 cache in reset */ reg = readl(cpucfg_membase + CPUCFG_GEN_CTRL_REG); writel(reg & ~BIT(cpu), cpucfg_membase + CPUCFG_GEN_CTRL_REG); /* Disable external debug access */ reg = readl(cpucfg_membase + CPUCFG_DBG_CTL1_REG); writel(reg & ~BIT(cpu), cpucfg_membase + CPUCFG_DBG_CTL1_REG); /* Power up the CPU */ for (i = 0; i <= 8; i++) writel(0xff >> i, prcm_membase + PRCM_CPU_PWR_CLAMP_REG(cpu)); mdelay(10); /* Clear CPU power-off gating */ reg = readl(prcm_membase + PRCM_CPU_PWROFF_REG); writel(reg & ~BIT(cpu), prcm_membase + PRCM_CPU_PWROFF_REG); mdelay(1); /* Deassert the CPU core reset */ writel(3, cpucfg_membase + CPUCFG_CPU_RST_CTRL_REG(cpu)); /* Enable back the external debug accesses */ reg = readl(cpucfg_membase + CPUCFG_DBG_CTL1_REG); writel(reg | BIT(cpu), cpucfg_membase + CPUCFG_DBG_CTL1_REG); spin_unlock(&cpu_lock); return 0; } static const struct smp_operations sun6i_smp_ops __initconst = { .smp_prepare_cpus = sun6i_smp_prepare_cpus, .smp_boot_secondary = sun6i_smp_boot_secondary, }; CPU_METHOD_OF_DECLARE(sun6i_a31_smp, "allwinner,sun6i-a31", &sun6i_smp_ops); static void __init sun8i_smp_prepare_cpus(unsigned int max_cpus) { struct device_node *node; node = of_find_compatible_node(NULL, NULL, "allwinner,sun8i-a23-prcm"); if (!node) { pr_err("Missing A23 PRCM node in the device tree\n"); return; } prcm_membase = of_iomap(node, 0); of_node_put(node); if (!prcm_membase) { pr_err("Couldn't map A23 PRCM registers\n"); return; } node = of_find_compatible_node(NULL, NULL, "allwinner,sun8i-a23-cpuconfig"); if (!node) { pr_err("Missing A23 CPU config node in the device tree\n"); return; } cpucfg_membase = of_iomap(node, 0); of_node_put(node); if (!cpucfg_membase) pr_err("Couldn't map A23 CPU config registers\n"); } static int sun8i_smp_boot_secondary(unsigned int cpu, struct task_struct *idle) { u32 reg; if (!(prcm_membase && cpucfg_membase)) return -EFAULT; spin_lock(&cpu_lock); /* Set CPU boot address */ writel(__pa_symbol(secondary_startup), cpucfg_membase + CPUCFG_PRIVATE0_REG); /* Assert the CPU core in reset */ writel(0, cpucfg_membase + CPUCFG_CPU_RST_CTRL_REG(cpu)); /* Assert the L1 cache in reset */ reg = readl(cpucfg_membase + CPUCFG_GEN_CTRL_REG); writel(reg & ~BIT(cpu), cpucfg_membase + CPUCFG_GEN_CTRL_REG); /* Clear CPU power-off gating */ reg = readl(prcm_membase + PRCM_CPU_PWROFF_REG); writel(reg & ~BIT(cpu), prcm_membase + PRCM_CPU_PWROFF_REG); mdelay(1); /* Deassert the CPU core reset */ writel(3, cpucfg_membase + CPUCFG_CPU_RST_CTRL_REG(cpu)); spin_unlock(&cpu_lock); return 0; } static const struct smp_operations sun8i_smp_ops __initconst = { .smp_prepare_cpus = sun8i_smp_prepare_cpus, .smp_boot_secondary = sun8i_smp_boot_secondary, }; CPU_METHOD_OF_DECLARE(sun8i_a23_smp, "allwinner,sun8i-a23", &sun8i_smp_ops);
linux-master
arch/arm/mach-sunxi/platsmp.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2018 Chen-Yu Tsai * * Chen-Yu Tsai <[email protected]> * * arch/arm/mach-sunxi/mc_smp.c * * Based on Allwinner code, arch/arm/mach-exynos/mcpm-exynos.c, and * arch/arm/mach-hisi/platmcpm.c * Cluster cache enable trampoline code adapted from MCPM framework */ #include <linux/arm-cci.h> #include <linux/cpu_pm.h> #include <linux/delay.h> #include <linux/io.h> #include <linux/iopoll.h> #include <linux/irqchip/arm-gic.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/smp.h> #include <asm/cacheflush.h> #include <asm/cp15.h> #include <asm/cputype.h> #include <asm/idmap.h> #include <asm/smp_plat.h> #include <asm/suspend.h> #define SUNXI_CPUS_PER_CLUSTER 4 #define SUNXI_NR_CLUSTERS 2 #define POLL_USEC 100 #define TIMEOUT_USEC 100000 #define CPUCFG_CX_CTRL_REG0(c) (0x10 * (c)) #define CPUCFG_CX_CTRL_REG0_L1_RST_DISABLE(n) BIT(n) #define CPUCFG_CX_CTRL_REG0_L1_RST_DISABLE_ALL 0xf #define CPUCFG_CX_CTRL_REG0_L2_RST_DISABLE_A7 BIT(4) #define CPUCFG_CX_CTRL_REG0_L2_RST_DISABLE_A15 BIT(0) #define CPUCFG_CX_CTRL_REG1(c) (0x10 * (c) + 0x4) #define CPUCFG_CX_CTRL_REG1_ACINACTM BIT(0) #define CPUCFG_CX_STATUS(c) (0x30 + 0x4 * (c)) #define CPUCFG_CX_STATUS_STANDBYWFI(n) BIT(16 + (n)) #define CPUCFG_CX_STATUS_STANDBYWFIL2 BIT(0) #define CPUCFG_CX_RST_CTRL(c) (0x80 + 0x4 * (c)) #define CPUCFG_CX_RST_CTRL_DBG_SOC_RST BIT(24) #define CPUCFG_CX_RST_CTRL_ETM_RST(n) BIT(20 + (n)) #define CPUCFG_CX_RST_CTRL_ETM_RST_ALL (0xf << 20) #define CPUCFG_CX_RST_CTRL_DBG_RST(n) BIT(16 + (n)) #define CPUCFG_CX_RST_CTRL_DBG_RST_ALL (0xf << 16) #define CPUCFG_CX_RST_CTRL_H_RST BIT(12) #define CPUCFG_CX_RST_CTRL_L2_RST BIT(8) #define CPUCFG_CX_RST_CTRL_CX_RST(n) BIT(4 + (n)) #define CPUCFG_CX_RST_CTRL_CORE_RST(n) BIT(n) #define CPUCFG_CX_RST_CTRL_CORE_RST_ALL (0xf << 0) #define PRCM_CPU_PO_RST_CTRL(c) (0x4 + 0x4 * (c)) #define PRCM_CPU_PO_RST_CTRL_CORE(n) BIT(n) #define PRCM_CPU_PO_RST_CTRL_CORE_ALL 0xf #define PRCM_PWROFF_GATING_REG(c) (0x100 + 0x4 * (c)) /* The power off register for clusters are different from a80 and a83t */ #define PRCM_PWROFF_GATING_REG_CLUSTER_SUN8I BIT(0) #define PRCM_PWROFF_GATING_REG_CLUSTER_SUN9I BIT(4) #define PRCM_PWROFF_GATING_REG_CORE(n) BIT(n) #define PRCM_PWR_SWITCH_REG(c, cpu) (0x140 + 0x10 * (c) + 0x4 * (cpu)) #define PRCM_CPU_SOFT_ENTRY_REG 0x164 /* R_CPUCFG registers, specific to sun8i-a83t */ #define R_CPUCFG_CLUSTER_PO_RST_CTRL(c) (0x30 + (c) * 0x4) #define R_CPUCFG_CLUSTER_PO_RST_CTRL_CORE(n) BIT(n) #define R_CPUCFG_CPU_SOFT_ENTRY_REG 0x01a4 #define CPU0_SUPPORT_HOTPLUG_MAGIC0 0xFA50392F #define CPU0_SUPPORT_HOTPLUG_MAGIC1 0x790DCA3A static void __iomem *cpucfg_base; static void __iomem *prcm_base; static void __iomem *sram_b_smp_base; static void __iomem *r_cpucfg_base; extern void sunxi_mc_smp_secondary_startup(void); extern void sunxi_mc_smp_resume(void); static bool is_a83t; static bool sunxi_core_is_cortex_a15(unsigned int core, unsigned int cluster) { struct device_node *node; int cpu = cluster * SUNXI_CPUS_PER_CLUSTER + core; bool is_compatible; node = of_cpu_device_node_get(cpu); /* In case of_cpu_device_node_get fails */ if (!node) node = of_get_cpu_node(cpu, NULL); if (!node) { /* * There's no point in returning an error, since we * would be mid way in a core or cluster power sequence. */ pr_err("%s: Couldn't get CPU cluster %u core %u device node\n", __func__, cluster, core); return false; } is_compatible = of_device_is_compatible(node, "arm,cortex-a15"); of_node_put(node); return is_compatible; } static int sunxi_cpu_power_switch_set(unsigned int cpu, unsigned int cluster, bool enable) { u32 reg; /* control sequence from Allwinner A80 user manual v1.2 PRCM section */ reg = readl(prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu)); if (enable) { if (reg == 0x00) { pr_debug("power clamp for cluster %u cpu %u already open\n", cluster, cpu); return 0; } writel(0xff, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu)); udelay(10); writel(0xfe, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu)); udelay(10); writel(0xf8, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu)); udelay(10); writel(0xf0, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu)); udelay(10); writel(0x00, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu)); udelay(10); } else { writel(0xff, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu)); udelay(10); } return 0; } static void sunxi_cpu0_hotplug_support_set(bool enable) { if (enable) { writel(CPU0_SUPPORT_HOTPLUG_MAGIC0, sram_b_smp_base); writel(CPU0_SUPPORT_HOTPLUG_MAGIC1, sram_b_smp_base + 0x4); } else { writel(0x0, sram_b_smp_base); writel(0x0, sram_b_smp_base + 0x4); } } static int sunxi_cpu_powerup(unsigned int cpu, unsigned int cluster) { u32 reg; pr_debug("%s: cluster %u cpu %u\n", __func__, cluster, cpu); if (cpu >= SUNXI_CPUS_PER_CLUSTER || cluster >= SUNXI_NR_CLUSTERS) return -EINVAL; /* Set hotplug support magic flags for cpu0 */ if (cluster == 0 && cpu == 0) sunxi_cpu0_hotplug_support_set(true); /* assert processor power-on reset */ reg = readl(prcm_base + PRCM_CPU_PO_RST_CTRL(cluster)); reg &= ~PRCM_CPU_PO_RST_CTRL_CORE(cpu); writel(reg, prcm_base + PRCM_CPU_PO_RST_CTRL(cluster)); if (is_a83t) { /* assert cpu power-on reset */ reg = readl(r_cpucfg_base + R_CPUCFG_CLUSTER_PO_RST_CTRL(cluster)); reg &= ~(R_CPUCFG_CLUSTER_PO_RST_CTRL_CORE(cpu)); writel(reg, r_cpucfg_base + R_CPUCFG_CLUSTER_PO_RST_CTRL(cluster)); udelay(10); } /* Cortex-A7: hold L1 reset disable signal low */ if (!sunxi_core_is_cortex_a15(cpu, cluster)) { reg = readl(cpucfg_base + CPUCFG_CX_CTRL_REG0(cluster)); reg &= ~CPUCFG_CX_CTRL_REG0_L1_RST_DISABLE(cpu); writel(reg, cpucfg_base + CPUCFG_CX_CTRL_REG0(cluster)); } /* assert processor related resets */ reg = readl(cpucfg_base + CPUCFG_CX_RST_CTRL(cluster)); reg &= ~CPUCFG_CX_RST_CTRL_DBG_RST(cpu); /* * Allwinner code also asserts resets for NEON on A15. According * to ARM manuals, asserting power-on reset is sufficient. */ if (!sunxi_core_is_cortex_a15(cpu, cluster)) reg &= ~CPUCFG_CX_RST_CTRL_ETM_RST(cpu); writel(reg, cpucfg_base + CPUCFG_CX_RST_CTRL(cluster)); /* open power switch */ sunxi_cpu_power_switch_set(cpu, cluster, true); /* Handle A83T bit swap */ if (is_a83t) { if (cpu == 0) cpu = 4; } /* clear processor power gate */ reg = readl(prcm_base + PRCM_PWROFF_GATING_REG(cluster)); reg &= ~PRCM_PWROFF_GATING_REG_CORE(cpu); writel(reg, prcm_base + PRCM_PWROFF_GATING_REG(cluster)); udelay(20); /* Handle A83T bit swap */ if (is_a83t) { if (cpu == 4) cpu = 0; } /* de-assert processor power-on reset */ reg = readl(prcm_base + PRCM_CPU_PO_RST_CTRL(cluster)); reg |= PRCM_CPU_PO_RST_CTRL_CORE(cpu); writel(reg, prcm_base + PRCM_CPU_PO_RST_CTRL(cluster)); if (is_a83t) { reg = readl(r_cpucfg_base + R_CPUCFG_CLUSTER_PO_RST_CTRL(cluster)); reg |= R_CPUCFG_CLUSTER_PO_RST_CTRL_CORE(cpu); writel(reg, r_cpucfg_base + R_CPUCFG_CLUSTER_PO_RST_CTRL(cluster)); udelay(10); } /* de-assert all processor resets */ reg = readl(cpucfg_base + CPUCFG_CX_RST_CTRL(cluster)); reg |= CPUCFG_CX_RST_CTRL_DBG_RST(cpu); reg |= CPUCFG_CX_RST_CTRL_CORE_RST(cpu); if (!sunxi_core_is_cortex_a15(cpu, cluster)) reg |= CPUCFG_CX_RST_CTRL_ETM_RST(cpu); else reg |= CPUCFG_CX_RST_CTRL_CX_RST(cpu); /* NEON */ writel(reg, cpucfg_base + CPUCFG_CX_RST_CTRL(cluster)); return 0; } static int sunxi_cluster_powerup(unsigned int cluster) { u32 reg; pr_debug("%s: cluster %u\n", __func__, cluster); if (cluster >= SUNXI_NR_CLUSTERS) return -EINVAL; /* For A83T, assert cluster cores resets */ if (is_a83t) { reg = readl(cpucfg_base + CPUCFG_CX_RST_CTRL(cluster)); reg &= ~CPUCFG_CX_RST_CTRL_CORE_RST_ALL; /* Core Reset */ writel(reg, cpucfg_base + CPUCFG_CX_RST_CTRL(cluster)); udelay(10); } /* assert ACINACTM */ reg = readl(cpucfg_base + CPUCFG_CX_CTRL_REG1(cluster)); reg |= CPUCFG_CX_CTRL_REG1_ACINACTM; writel(reg, cpucfg_base + CPUCFG_CX_CTRL_REG1(cluster)); /* assert cluster processor power-on resets */ reg = readl(prcm_base + PRCM_CPU_PO_RST_CTRL(cluster)); reg &= ~PRCM_CPU_PO_RST_CTRL_CORE_ALL; writel(reg, prcm_base + PRCM_CPU_PO_RST_CTRL(cluster)); /* assert cluster cores resets */ if (is_a83t) { reg = readl(r_cpucfg_base + R_CPUCFG_CLUSTER_PO_RST_CTRL(cluster)); reg &= ~CPUCFG_CX_RST_CTRL_CORE_RST_ALL; writel(reg, r_cpucfg_base + R_CPUCFG_CLUSTER_PO_RST_CTRL(cluster)); udelay(10); } /* assert cluster resets */ reg = readl(cpucfg_base + CPUCFG_CX_RST_CTRL(cluster)); reg &= ~CPUCFG_CX_RST_CTRL_DBG_SOC_RST; reg &= ~CPUCFG_CX_RST_CTRL_DBG_RST_ALL; reg &= ~CPUCFG_CX_RST_CTRL_H_RST; reg &= ~CPUCFG_CX_RST_CTRL_L2_RST; /* * Allwinner code also asserts resets for NEON on A15. According * to ARM manuals, asserting power-on reset is sufficient. */ if (!sunxi_core_is_cortex_a15(0, cluster)) reg &= ~CPUCFG_CX_RST_CTRL_ETM_RST_ALL; writel(reg, cpucfg_base + CPUCFG_CX_RST_CTRL(cluster)); /* hold L1/L2 reset disable signals low */ reg = readl(cpucfg_base + CPUCFG_CX_CTRL_REG0(cluster)); if (sunxi_core_is_cortex_a15(0, cluster)) { /* Cortex-A15: hold L2RSTDISABLE low */ reg &= ~CPUCFG_CX_CTRL_REG0_L2_RST_DISABLE_A15; } else { /* Cortex-A7: hold L1RSTDISABLE and L2RSTDISABLE low */ reg &= ~CPUCFG_CX_CTRL_REG0_L1_RST_DISABLE_ALL; reg &= ~CPUCFG_CX_CTRL_REG0_L2_RST_DISABLE_A7; } writel(reg, cpucfg_base + CPUCFG_CX_CTRL_REG0(cluster)); /* clear cluster power gate */ reg = readl(prcm_base + PRCM_PWROFF_GATING_REG(cluster)); if (is_a83t) reg &= ~PRCM_PWROFF_GATING_REG_CLUSTER_SUN8I; else reg &= ~PRCM_PWROFF_GATING_REG_CLUSTER_SUN9I; writel(reg, prcm_base + PRCM_PWROFF_GATING_REG(cluster)); udelay(20); /* de-assert cluster resets */ reg = readl(cpucfg_base + CPUCFG_CX_RST_CTRL(cluster)); reg |= CPUCFG_CX_RST_CTRL_DBG_SOC_RST; reg |= CPUCFG_CX_RST_CTRL_H_RST; reg |= CPUCFG_CX_RST_CTRL_L2_RST; writel(reg, cpucfg_base + CPUCFG_CX_RST_CTRL(cluster)); /* de-assert ACINACTM */ reg = readl(cpucfg_base + CPUCFG_CX_CTRL_REG1(cluster)); reg &= ~CPUCFG_CX_CTRL_REG1_ACINACTM; writel(reg, cpucfg_base + CPUCFG_CX_CTRL_REG1(cluster)); return 0; } /* * This bit is shared between the initial nocache_trampoline call to * enable CCI-400 and proper cluster cache disable before power down. */ static void sunxi_cluster_cache_disable_without_axi(void) { if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A15) { /* * On the Cortex-A15 we need to disable * L2 prefetching before flushing the cache. */ asm volatile( "mcr p15, 1, %0, c15, c0, 3\n" "isb\n" "dsb" : : "r" (0x400)); } /* Flush all cache levels for this cluster. */ v7_exit_coherency_flush(all); /* * Disable cluster-level coherency by masking * incoming snoops and DVM messages: */ cci_disable_port_by_cpu(read_cpuid_mpidr()); } static int sunxi_mc_smp_cpu_table[SUNXI_NR_CLUSTERS][SUNXI_CPUS_PER_CLUSTER]; int sunxi_mc_smp_first_comer; static DEFINE_SPINLOCK(boot_lock); static bool sunxi_mc_smp_cluster_is_down(unsigned int cluster) { int i; for (i = 0; i < SUNXI_CPUS_PER_CLUSTER; i++) if (sunxi_mc_smp_cpu_table[cluster][i]) return false; return true; } static void sunxi_mc_smp_secondary_init(unsigned int cpu) { /* Clear hotplug support magic flags for cpu0 */ if (cpu == 0) sunxi_cpu0_hotplug_support_set(false); } static int sunxi_mc_smp_boot_secondary(unsigned int l_cpu, struct task_struct *idle) { unsigned int mpidr, cpu, cluster; mpidr = cpu_logical_map(l_cpu); cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); if (!cpucfg_base) return -ENODEV; if (cluster >= SUNXI_NR_CLUSTERS || cpu >= SUNXI_CPUS_PER_CLUSTER) return -EINVAL; spin_lock_irq(&boot_lock); if (sunxi_mc_smp_cpu_table[cluster][cpu]) goto out; if (sunxi_mc_smp_cluster_is_down(cluster)) { sunxi_mc_smp_first_comer = true; sunxi_cluster_powerup(cluster); } else { sunxi_mc_smp_first_comer = false; } /* This is read by incoming CPUs with their cache and MMU disabled */ sync_cache_w(&sunxi_mc_smp_first_comer); sunxi_cpu_powerup(cpu, cluster); out: sunxi_mc_smp_cpu_table[cluster][cpu]++; spin_unlock_irq(&boot_lock); return 0; } #ifdef CONFIG_HOTPLUG_CPU static void sunxi_cluster_cache_disable(void) { unsigned int cluster = MPIDR_AFFINITY_LEVEL(read_cpuid_mpidr(), 1); u32 reg; pr_debug("%s: cluster %u\n", __func__, cluster); sunxi_cluster_cache_disable_without_axi(); /* last man standing, assert ACINACTM */ reg = readl(cpucfg_base + CPUCFG_CX_CTRL_REG1(cluster)); reg |= CPUCFG_CX_CTRL_REG1_ACINACTM; writel(reg, cpucfg_base + CPUCFG_CX_CTRL_REG1(cluster)); } static void sunxi_mc_smp_cpu_die(unsigned int l_cpu) { unsigned int mpidr, cpu, cluster; bool last_man; mpidr = cpu_logical_map(l_cpu); cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); pr_debug("%s: cluster %u cpu %u\n", __func__, cluster, cpu); spin_lock(&boot_lock); sunxi_mc_smp_cpu_table[cluster][cpu]--; if (sunxi_mc_smp_cpu_table[cluster][cpu] == 1) { /* A power_up request went ahead of us. */ pr_debug("%s: aborting due to a power up request\n", __func__); spin_unlock(&boot_lock); return; } else if (sunxi_mc_smp_cpu_table[cluster][cpu] > 1) { pr_err("Cluster %d CPU%d boots multiple times\n", cluster, cpu); BUG(); } last_man = sunxi_mc_smp_cluster_is_down(cluster); spin_unlock(&boot_lock); gic_cpu_if_down(0); if (last_man) sunxi_cluster_cache_disable(); else v7_exit_coherency_flush(louis); for (;;) wfi(); } static int sunxi_cpu_powerdown(unsigned int cpu, unsigned int cluster) { u32 reg; int gating_bit = cpu; pr_debug("%s: cluster %u cpu %u\n", __func__, cluster, cpu); if (cpu >= SUNXI_CPUS_PER_CLUSTER || cluster >= SUNXI_NR_CLUSTERS) return -EINVAL; if (is_a83t && cpu == 0) gating_bit = 4; /* gate processor power */ reg = readl(prcm_base + PRCM_PWROFF_GATING_REG(cluster)); reg |= PRCM_PWROFF_GATING_REG_CORE(gating_bit); writel(reg, prcm_base + PRCM_PWROFF_GATING_REG(cluster)); udelay(20); /* close power switch */ sunxi_cpu_power_switch_set(cpu, cluster, false); return 0; } static int sunxi_cluster_powerdown(unsigned int cluster) { u32 reg; pr_debug("%s: cluster %u\n", __func__, cluster); if (cluster >= SUNXI_NR_CLUSTERS) return -EINVAL; /* assert cluster resets or system will hang */ pr_debug("%s: assert cluster reset\n", __func__); reg = readl(cpucfg_base + CPUCFG_CX_RST_CTRL(cluster)); reg &= ~CPUCFG_CX_RST_CTRL_DBG_SOC_RST; reg &= ~CPUCFG_CX_RST_CTRL_H_RST; reg &= ~CPUCFG_CX_RST_CTRL_L2_RST; writel(reg, cpucfg_base + CPUCFG_CX_RST_CTRL(cluster)); /* gate cluster power */ pr_debug("%s: gate cluster power\n", __func__); reg = readl(prcm_base + PRCM_PWROFF_GATING_REG(cluster)); if (is_a83t) reg |= PRCM_PWROFF_GATING_REG_CLUSTER_SUN8I; else reg |= PRCM_PWROFF_GATING_REG_CLUSTER_SUN9I; writel(reg, prcm_base + PRCM_PWROFF_GATING_REG(cluster)); udelay(20); return 0; } static int sunxi_mc_smp_cpu_kill(unsigned int l_cpu) { unsigned int mpidr, cpu, cluster; unsigned int tries, count; int ret = 0; u32 reg; mpidr = cpu_logical_map(l_cpu); cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); /* This should never happen */ if (WARN_ON(cluster >= SUNXI_NR_CLUSTERS || cpu >= SUNXI_CPUS_PER_CLUSTER)) return 0; /* wait for CPU core to die and enter WFI */ count = TIMEOUT_USEC / POLL_USEC; spin_lock_irq(&boot_lock); for (tries = 0; tries < count; tries++) { spin_unlock_irq(&boot_lock); usleep_range(POLL_USEC / 2, POLL_USEC); spin_lock_irq(&boot_lock); /* * If the user turns off a bunch of cores at the same * time, the kernel might call cpu_kill before some of * them are ready. This is because boot_lock serializes * both cpu_die and cpu_kill callbacks. Either one could * run first. We should wait for cpu_die to complete. */ if (sunxi_mc_smp_cpu_table[cluster][cpu]) continue; reg = readl(cpucfg_base + CPUCFG_CX_STATUS(cluster)); if (reg & CPUCFG_CX_STATUS_STANDBYWFI(cpu)) break; } if (tries >= count) { ret = ETIMEDOUT; goto out; } /* power down CPU core */ sunxi_cpu_powerdown(cpu, cluster); if (!sunxi_mc_smp_cluster_is_down(cluster)) goto out; /* wait for cluster L2 WFI */ ret = readl_poll_timeout(cpucfg_base + CPUCFG_CX_STATUS(cluster), reg, reg & CPUCFG_CX_STATUS_STANDBYWFIL2, POLL_USEC, TIMEOUT_USEC); if (ret) { /* * Ignore timeout on the cluster. Leaving the cluster on * will not affect system execution, just use a bit more * power. But returning an error here will only confuse * the user as the CPU has already been shutdown. */ ret = 0; goto out; } /* Power down cluster */ sunxi_cluster_powerdown(cluster); out: spin_unlock_irq(&boot_lock); pr_debug("%s: cluster %u cpu %u powerdown: %d\n", __func__, cluster, cpu, ret); return !ret; } static bool sunxi_mc_smp_cpu_can_disable(unsigned int cpu) { /* CPU0 hotplug not handled for sun8i-a83t */ if (is_a83t) if (cpu == 0) return false; return true; } #endif static const struct smp_operations sunxi_mc_smp_smp_ops __initconst = { .smp_secondary_init = sunxi_mc_smp_secondary_init, .smp_boot_secondary = sunxi_mc_smp_boot_secondary, #ifdef CONFIG_HOTPLUG_CPU .cpu_die = sunxi_mc_smp_cpu_die, .cpu_kill = sunxi_mc_smp_cpu_kill, .cpu_can_disable = sunxi_mc_smp_cpu_can_disable, #endif }; static bool __init sunxi_mc_smp_cpu_table_init(void) { unsigned int mpidr, cpu, cluster; mpidr = read_cpuid_mpidr(); cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); if (cluster >= SUNXI_NR_CLUSTERS || cpu >= SUNXI_CPUS_PER_CLUSTER) { pr_err("%s: boot CPU is out of bounds!\n", __func__); return false; } sunxi_mc_smp_cpu_table[cluster][cpu] = 1; return true; } /* * Adapted from arch/arm/common/mc_smp_entry.c * * We need the trampoline code to enable CCI-400 on the first cluster */ typedef typeof(cpu_reset) phys_reset_t; static int __init nocache_trampoline(unsigned long __unused) { phys_reset_t phys_reset; setup_mm_for_reboot(); sunxi_cluster_cache_disable_without_axi(); phys_reset = (phys_reset_t)(unsigned long)__pa_symbol(cpu_reset); phys_reset(__pa_symbol(sunxi_mc_smp_resume), false); BUG(); } static int __init sunxi_mc_smp_loopback(void) { int ret; /* * We're going to soft-restart the current CPU through the * low-level MCPM code by leveraging the suspend/resume * infrastructure. Let's play it safe by using cpu_pm_enter() * in case the CPU init code path resets the VFP or similar. */ sunxi_mc_smp_first_comer = true; local_irq_disable(); local_fiq_disable(); ret = cpu_pm_enter(); if (!ret) { ret = cpu_suspend(0, nocache_trampoline); cpu_pm_exit(); } local_fiq_enable(); local_irq_enable(); sunxi_mc_smp_first_comer = false; return ret; } /* * This holds any device nodes that we requested resources for, * so that we may easily release resources in the error path. */ struct sunxi_mc_smp_nodes { struct device_node *prcm_node; struct device_node *cpucfg_node; struct device_node *sram_node; struct device_node *r_cpucfg_node; }; /* This structure holds SoC-specific bits tied to an enable-method string. */ struct sunxi_mc_smp_data { const char *enable_method; int (*get_smp_nodes)(struct sunxi_mc_smp_nodes *nodes); bool is_a83t; }; static void __init sunxi_mc_smp_put_nodes(struct sunxi_mc_smp_nodes *nodes) { of_node_put(nodes->prcm_node); of_node_put(nodes->cpucfg_node); of_node_put(nodes->sram_node); of_node_put(nodes->r_cpucfg_node); memset(nodes, 0, sizeof(*nodes)); } static int __init sun9i_a80_get_smp_nodes(struct sunxi_mc_smp_nodes *nodes) { nodes->prcm_node = of_find_compatible_node(NULL, NULL, "allwinner,sun9i-a80-prcm"); if (!nodes->prcm_node) { pr_err("%s: PRCM not available\n", __func__); return -ENODEV; } nodes->cpucfg_node = of_find_compatible_node(NULL, NULL, "allwinner,sun9i-a80-cpucfg"); if (!nodes->cpucfg_node) { pr_err("%s: CPUCFG not available\n", __func__); return -ENODEV; } nodes->sram_node = of_find_compatible_node(NULL, NULL, "allwinner,sun9i-a80-smp-sram"); if (!nodes->sram_node) { pr_err("%s: Secure SRAM not available\n", __func__); return -ENODEV; } return 0; } static int __init sun8i_a83t_get_smp_nodes(struct sunxi_mc_smp_nodes *nodes) { nodes->prcm_node = of_find_compatible_node(NULL, NULL, "allwinner,sun8i-a83t-r-ccu"); if (!nodes->prcm_node) { pr_err("%s: PRCM not available\n", __func__); return -ENODEV; } nodes->cpucfg_node = of_find_compatible_node(NULL, NULL, "allwinner,sun8i-a83t-cpucfg"); if (!nodes->cpucfg_node) { pr_err("%s: CPUCFG not available\n", __func__); return -ENODEV; } nodes->r_cpucfg_node = of_find_compatible_node(NULL, NULL, "allwinner,sun8i-a83t-r-cpucfg"); if (!nodes->r_cpucfg_node) { pr_err("%s: RCPUCFG not available\n", __func__); return -ENODEV; } return 0; } static const struct sunxi_mc_smp_data sunxi_mc_smp_data[] __initconst = { { .enable_method = "allwinner,sun9i-a80-smp", .get_smp_nodes = sun9i_a80_get_smp_nodes, }, { .enable_method = "allwinner,sun8i-a83t-smp", .get_smp_nodes = sun8i_a83t_get_smp_nodes, .is_a83t = true, }, }; static int __init sunxi_mc_smp_init(void) { struct sunxi_mc_smp_nodes nodes = { 0 }; struct device_node *node; struct resource res; void __iomem *addr; int i, ret; /* * Don't bother checking the "cpus" node, as an enable-method * property in that node is undocumented. */ node = of_cpu_device_node_get(0); if (!node) return -ENODEV; /* * We can't actually use the enable-method magic in the kernel. * Our loopback / trampoline code uses the CPU suspend framework, * which requires the identity mapping be available. It would not * yet be available if we used the .init_cpus or .prepare_cpus * callbacks in smp_operations, which we would use if we were to * use CPU_METHOD_OF_DECLARE */ for (i = 0; i < ARRAY_SIZE(sunxi_mc_smp_data); i++) { ret = of_property_match_string(node, "enable-method", sunxi_mc_smp_data[i].enable_method); if (!ret) break; } is_a83t = sunxi_mc_smp_data[i].is_a83t; of_node_put(node); if (ret) return -ENODEV; if (!sunxi_mc_smp_cpu_table_init()) return -EINVAL; if (!cci_probed()) { pr_err("%s: CCI-400 not available\n", __func__); return -ENODEV; } /* Get needed device tree nodes */ ret = sunxi_mc_smp_data[i].get_smp_nodes(&nodes); if (ret) goto err_put_nodes; /* * Unfortunately we can not request the I/O region for the PRCM. * It is shared with the PRCM clock. */ prcm_base = of_iomap(nodes.prcm_node, 0); if (!prcm_base) { pr_err("%s: failed to map PRCM registers\n", __func__); ret = -ENOMEM; goto err_put_nodes; } cpucfg_base = of_io_request_and_map(nodes.cpucfg_node, 0, "sunxi-mc-smp"); if (IS_ERR(cpucfg_base)) { ret = PTR_ERR(cpucfg_base); pr_err("%s: failed to map CPUCFG registers: %d\n", __func__, ret); goto err_unmap_prcm; } if (is_a83t) { r_cpucfg_base = of_io_request_and_map(nodes.r_cpucfg_node, 0, "sunxi-mc-smp"); if (IS_ERR(r_cpucfg_base)) { ret = PTR_ERR(r_cpucfg_base); pr_err("%s: failed to map R-CPUCFG registers\n", __func__); goto err_unmap_release_cpucfg; } } else { sram_b_smp_base = of_io_request_and_map(nodes.sram_node, 0, "sunxi-mc-smp"); if (IS_ERR(sram_b_smp_base)) { ret = PTR_ERR(sram_b_smp_base); pr_err("%s: failed to map secure SRAM\n", __func__); goto err_unmap_release_cpucfg; } } /* Configure CCI-400 for boot cluster */ ret = sunxi_mc_smp_loopback(); if (ret) { pr_err("%s: failed to configure boot cluster: %d\n", __func__, ret); goto err_unmap_release_sram_rcpucfg; } /* We don't need the device nodes anymore */ sunxi_mc_smp_put_nodes(&nodes); /* Set the hardware entry point address */ if (is_a83t) addr = r_cpucfg_base + R_CPUCFG_CPU_SOFT_ENTRY_REG; else addr = prcm_base + PRCM_CPU_SOFT_ENTRY_REG; writel(__pa_symbol(sunxi_mc_smp_secondary_startup), addr); /* Actually enable multi cluster SMP */ smp_set_ops(&sunxi_mc_smp_smp_ops); pr_info("sunxi multi cluster SMP support installed\n"); return 0; err_unmap_release_sram_rcpucfg: if (is_a83t) { iounmap(r_cpucfg_base); of_address_to_resource(nodes.r_cpucfg_node, 0, &res); } else { iounmap(sram_b_smp_base); of_address_to_resource(nodes.sram_node, 0, &res); } release_mem_region(res.start, resource_size(&res)); err_unmap_release_cpucfg: iounmap(cpucfg_base); of_address_to_resource(nodes.cpucfg_node, 0, &res); release_mem_region(res.start, resource_size(&res)); err_unmap_prcm: iounmap(prcm_base); err_put_nodes: sunxi_mc_smp_put_nodes(&nodes); return ret; } early_initcall(sunxi_mc_smp_init);
linux-master
arch/arm/mach-sunxi/mc_smp.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Realtek RTD1195 * * Copyright (c) 2017-2019 Andreas Färber */ #include <linux/memblock.h> #include <asm/mach/arch.h> static void __init rtd1195_memblock_remove(phys_addr_t base, phys_addr_t size) { int ret; ret = memblock_remove(base, size); if (ret) pr_err("Failed to remove memblock %pa (%d)\n", &base, ret); } static void __init rtd1195_reserve(void) { /* Exclude boot ROM from RAM */ rtd1195_memblock_remove(0x00000000, 0x0000a800); /* Exclude peripheral register spaces from RAM */ rtd1195_memblock_remove(0x18000000, 0x00070000); rtd1195_memblock_remove(0x18100000, 0x01000000); } static const char *const rtd1195_dt_compat[] __initconst = { "realtek,rtd1195", NULL }; DT_MACHINE_START(rtd1195, "Realtek RTD1195") .dt_compat = rtd1195_dt_compat, .reserve = rtd1195_reserve, .l2c_aux_val = 0x0, .l2c_aux_mask = ~0x0, MACHINE_END
linux-master
arch/arm/mach-realtek/rtd1195.c
// SPDX-License-Identifier: GPL-2.0-only // Copyright (C) 2013-2014 Broadcom Corporation #include <linux/init.h> #include <linux/irqchip.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> /* * Storage for debug-macro.S's state. * * This must be in .data not .bss so that it gets initialized each time the * kernel is loaded. The data is declared here rather than debug-macro.S so * that multiple inclusions of debug-macro.S point at the same data. */ u32 brcmstb_uart_config[3] = { /* Debug UART initialization required */ 1, /* Debug UART physical address */ 0, /* Debug UART virtual address */ 0, }; static void __init brcmstb_init_irq(void) { irqchip_init(); } static const char *const brcmstb_match[] __initconst = { "brcm,bcm7445", "brcm,brcmstb", NULL }; DT_MACHINE_START(BRCMSTB, "Broadcom STB (Flattened Device Tree)") .dt_compat = brcmstb_match, .init_irq = brcmstb_init_irq, MACHINE_END
linux-master
arch/arm/mach-bcm/brcmstb.c
// SPDX-License-Identifier: GPL-2.0-only // Copyright (C) 2012-2014 Broadcom Corporation #include <linux/clocksource.h> #include <linux/of_address.h> #include <asm/mach/arch.h> #include "kona_l2_cache.h" #define SECWDOG_OFFSET 0x00000000 #define SECWDOG_RESERVED_MASK 0xe2000000 #define SECWDOG_WD_LOAD_FLAG_MASK 0x10000000 #define SECWDOG_EN_MASK 0x08000000 #define SECWDOG_SRSTEN_MASK 0x04000000 #define SECWDOG_CLKS_SHIFT 20 #define SECWDOG_COUNT_SHIFT 0 static void bcm281xx_restart(enum reboot_mode mode, const char *cmd) { uint32_t val; void __iomem *base; struct device_node *np_wdog; np_wdog = of_find_compatible_node(NULL, NULL, "brcm,kona-wdt"); if (!np_wdog) { pr_emerg("Couldn't find brcm,kona-wdt\n"); return; } base = of_iomap(np_wdog, 0); of_node_put(np_wdog); if (!base) { pr_emerg("Couldn't map brcm,kona-wdt\n"); return; } /* Enable watchdog with short timeout (244us). */ val = readl(base + SECWDOG_OFFSET); val &= SECWDOG_RESERVED_MASK | SECWDOG_WD_LOAD_FLAG_MASK; val |= SECWDOG_EN_MASK | SECWDOG_SRSTEN_MASK | (0x15 << SECWDOG_CLKS_SHIFT) | (0x8 << SECWDOG_COUNT_SHIFT); writel(val, base + SECWDOG_OFFSET); /* Wait for reset */ while (1); } static void __init bcm281xx_init(void) { kona_l2_cache_init(); } static const char * const bcm281xx_dt_compat[] = { "brcm,bcm11351", /* Have to use the first number upstreamed */ NULL, }; DT_MACHINE_START(BCM281XX_DT, "BCM281xx Broadcom Application Processor") .init_machine = bcm281xx_init, .restart = bcm281xx_restart, .dt_compat = bcm281xx_dt_compat, MACHINE_END
linux-master
arch/arm/mach-bcm/board_bcm281xx.c
// SPDX-License-Identifier: GPL-2.0-only // Copyright (C) 2013 Broadcom Corporation #include <linux/smp.h> #include <linux/io.h> #include <linux/ioport.h> #include <asm/cacheflush.h> #include <linux/of_address.h> #include "bcm_kona_smc.h" static u32 bcm_smc_buffer_phys; /* physical address */ static void __iomem *bcm_smc_buffer; /* virtual address */ struct bcm_kona_smc_data { unsigned service_id; unsigned arg0; unsigned arg1; unsigned arg2; unsigned arg3; unsigned result; }; static const struct of_device_id bcm_kona_smc_ids[] __initconst = { {.compatible = "brcm,kona-smc"}, {.compatible = "bcm,kona-smc"}, /* deprecated name */ {}, }; /* Map in the args buffer area */ int __init bcm_kona_smc_init(void) { struct device_node *node; struct resource res; int ret; /* Read buffer addr and size from the device tree node */ node = of_find_matching_node(NULL, bcm_kona_smc_ids); if (!node) return -ENODEV; ret = of_address_to_resource(node, 0, &res); of_node_put(node); if (ret) return -EINVAL; bcm_smc_buffer = ioremap(res.start, resource_size(&res)); if (!bcm_smc_buffer) return -ENOMEM; bcm_smc_buffer_phys = res.start; pr_info("Kona Secure API initialized\n"); return 0; } /* * int bcm_kona_do_smc(u32 service_id, u32 buffer_addr) * * Only core 0 can run the secure monitor code. If an "smc" request * is initiated on a different core it must be redirected to core 0 * for execution. We rely on the caller to handle this. * * Each "smc" request supplies a service id and the address of a * buffer containing parameters related to the service to be * performed. A flags value defines the behavior of the level 2 * cache and interrupt handling while the secure monitor executes. * * Parameters to the "smc" request are passed in r4-r6 as follows: * r4 service id * r5 flags (SEC_ROM_*) * r6 physical address of buffer with other parameters * * Execution of an "smc" request produces two distinct results. * * First, the secure monitor call itself (regardless of the specific * service request) can succeed, or can produce an error. When an * "smc" request completes this value is found in r12; it should * always be SEC_EXIT_NORMAL. * * In addition, the particular service performed produces a result. * The values that should be expected depend on the service. We * therefore return this value to the caller, so it can handle the * request result appropriately. This result value is found in r0 * when the "smc" request completes. */ static int bcm_kona_do_smc(u32 service_id, u32 buffer_phys) { register u32 ip asm("ip"); /* Also called r12 */ register u32 r0 asm("r0"); register u32 r4 asm("r4"); register u32 r5 asm("r5"); register u32 r6 asm("r6"); r4 = service_id; r5 = 0x3; /* Keep IRQ and FIQ off in SM */ r6 = buffer_phys; asm volatile ( /* Make sure we got the registers we want */ __asmeq("%0", "ip") __asmeq("%1", "r0") __asmeq("%2", "r4") __asmeq("%3", "r5") __asmeq("%4", "r6") ".arch_extension sec\n" " smc #0\n" : "=r" (ip), "=r" (r0) : "r" (r4), "r" (r5), "r" (r6) : "r1", "r2", "r3", "r7", "lr"); BUG_ON(ip != SEC_EXIT_NORMAL); return r0; } /* __bcm_kona_smc() should only run on CPU 0, with pre-emption disabled */ static void __bcm_kona_smc(void *info) { struct bcm_kona_smc_data *data = info; u32 __iomem *args = bcm_smc_buffer; BUG_ON(smp_processor_id() != 0); BUG_ON(!args); /* Copy the four 32 bit argument values into the bounce area */ writel_relaxed(data->arg0, args++); writel_relaxed(data->arg1, args++); writel_relaxed(data->arg2, args++); writel(data->arg3, args); /* Flush caches for input data passed to Secure Monitor */ flush_cache_all(); /* Trap into Secure Monitor and record the request result */ data->result = bcm_kona_do_smc(data->service_id, bcm_smc_buffer_phys); } unsigned bcm_kona_smc(unsigned service_id, unsigned arg0, unsigned arg1, unsigned arg2, unsigned arg3) { struct bcm_kona_smc_data data; data.service_id = service_id; data.arg0 = arg0; data.arg1 = arg1; data.arg2 = arg2; data.arg3 = arg3; data.result = 0; /* * Due to a limitation of the secure monitor, we must use the SMP * infrastructure to forward all secure monitor calls to Core 0. */ smp_call_function_single(0, __bcm_kona_smc, &data, 1); return data.result; }
linux-master
arch/arm/mach-bcm/bcm_kona_smc.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Broadcom BCM63138 PMB initialization for secondary CPU(s) * * Copyright (C) 2015 Broadcom Corporation * Author: Florian Fainelli <[email protected]> */ #include <linux/kernel.h> #include <linux/io.h> #include <linux/spinlock.h> #include <linux/reset/bcm63xx_pmb.h> #include <linux/of.h> #include <linux/of_address.h> #include "bcm63xx_smp.h" /* ARM Control register definitions */ #define CORE_PWR_CTRL_SHIFT 0 #define CORE_PWR_CTRL_MASK 0x3 #define PLL_PWR_ON BIT(8) #define PLL_LDO_PWR_ON BIT(9) #define PLL_CLAMP_ON BIT(10) #define CPU_RESET_N(x) BIT(13 + (x)) #define NEON_RESET_N BIT(15) #define PWR_CTRL_STATUS_SHIFT 28 #define PWR_CTRL_STATUS_MASK 0x3 #define PWR_DOWN_SHIFT 30 #define PWR_DOWN_MASK 0x3 /* CPU Power control register definitions */ #define MEM_PWR_OK BIT(0) #define MEM_PWR_ON BIT(1) #define MEM_CLAMP_ON BIT(2) #define MEM_PWR_OK_STATUS BIT(4) #define MEM_PWR_ON_STATUS BIT(5) #define MEM_PDA_SHIFT 8 #define MEM_PDA_MASK 0xf #define MEM_PDA_CPU_MASK 0x1 #define MEM_PDA_NEON_MASK 0xf #define CLAMP_ON BIT(15) #define PWR_OK_SHIFT 16 #define PWR_OK_MASK 0xf #define PWR_ON_SHIFT 20 #define PWR_CPU_MASK 0x03 #define PWR_NEON_MASK 0x01 #define PWR_ON_MASK 0xf #define PWR_OK_STATUS_SHIFT 24 #define PWR_OK_STATUS_MASK 0xf #define PWR_ON_STATUS_SHIFT 28 #define PWR_ON_STATUS_MASK 0xf #define ARM_CONTROL 0x30 #define ARM_PWR_CONTROL_BASE 0x34 #define ARM_PWR_CONTROL(x) (ARM_PWR_CONTROL_BASE + (x) * 0x4) #define ARM_NEON_L2 0x3c /* Perform a value write, then spin until the value shifted by * shift is seen, masked with mask and is different from cond. */ static int bpcm_wr_rd_mask(void __iomem *master, unsigned int addr, u32 off, u32 *val, u32 shift, u32 mask, u32 cond) { int ret; ret = bpcm_wr(master, addr, off, *val); if (ret) return ret; do { ret = bpcm_rd(master, addr, off, val); if (ret) return ret; cpu_relax(); } while (((*val >> shift) & mask) != cond); return ret; } /* Global lock to serialize accesses to the PMB registers while we * are bringing up the secondary CPU */ static DEFINE_SPINLOCK(pmb_lock); static int bcm63xx_pmb_get_resources(struct device_node *dn, void __iomem **base, unsigned int *cpu, unsigned int *addr) { struct of_phandle_args args; int ret; *cpu = of_get_cpu_hwid(dn, 0); if (*cpu == ~0U) { pr_err("CPU is missing a reg node\n"); return -ENODEV; } ret = of_parse_phandle_with_args(dn, "resets", "#reset-cells", 0, &args); if (ret) { pr_err("CPU is missing a resets phandle\n"); return ret; } if (args.args_count != 2) { pr_err("reset-controller does not conform to reset-cells\n"); return -EINVAL; } *base = of_iomap(args.np, 0); if (!*base) { pr_err("failed remapping PMB register\n"); return -ENOMEM; } /* We do not need the number of zones */ *addr = args.args[0]; return 0; } int bcm63xx_pmb_power_on_cpu(struct device_node *dn) { void __iomem *base; unsigned int cpu, addr; unsigned long flags; u32 val, ctrl; int ret; ret = bcm63xx_pmb_get_resources(dn, &base, &cpu, &addr); if (ret) return ret; /* We would not know how to enable a third and greater CPU */ WARN_ON(cpu > 1); spin_lock_irqsave(&pmb_lock, flags); /* Check if the CPU is already on and save the ARM_CONTROL register * value since we will use it later for CPU de-assert once done with * the CPU-specific power sequence */ ret = bpcm_rd(base, addr, ARM_CONTROL, &ctrl); if (ret) goto out; if (ctrl & CPU_RESET_N(cpu)) { pr_info("PMB: CPU%d is already powered on\n", cpu); ret = 0; goto out; } /* Power on PLL */ ret = bpcm_rd(base, addr, ARM_PWR_CONTROL(cpu), &val); if (ret) goto out; val |= (PWR_CPU_MASK << PWR_ON_SHIFT); ret = bpcm_wr_rd_mask(base, addr, ARM_PWR_CONTROL(cpu), &val, PWR_ON_STATUS_SHIFT, PWR_CPU_MASK, PWR_CPU_MASK); if (ret) goto out; val |= (PWR_CPU_MASK << PWR_OK_SHIFT); ret = bpcm_wr_rd_mask(base, addr, ARM_PWR_CONTROL(cpu), &val, PWR_OK_STATUS_SHIFT, PWR_CPU_MASK, PWR_CPU_MASK); if (ret) goto out; val &= ~CLAMP_ON; ret = bpcm_wr(base, addr, ARM_PWR_CONTROL(cpu), val); if (ret) goto out; /* Power on CPU<N> RAM */ val &= ~(MEM_PDA_MASK << MEM_PDA_SHIFT); ret = bpcm_wr(base, addr, ARM_PWR_CONTROL(cpu), val); if (ret) goto out; val |= MEM_PWR_ON; ret = bpcm_wr_rd_mask(base, addr, ARM_PWR_CONTROL(cpu), &val, 0, MEM_PWR_ON_STATUS, MEM_PWR_ON_STATUS); if (ret) goto out; val |= MEM_PWR_OK; ret = bpcm_wr_rd_mask(base, addr, ARM_PWR_CONTROL(cpu), &val, 0, MEM_PWR_OK_STATUS, MEM_PWR_OK_STATUS); if (ret) goto out; val &= ~MEM_CLAMP_ON; ret = bpcm_wr(base, addr, ARM_PWR_CONTROL(cpu), val); if (ret) goto out; /* De-assert CPU reset */ ctrl |= CPU_RESET_N(cpu); ret = bpcm_wr(base, addr, ARM_CONTROL, ctrl); out: spin_unlock_irqrestore(&pmb_lock, flags); iounmap(base); return ret; }
linux-master
arch/arm/mach-bcm/bcm63xx_pmb.c
// SPDX-License-Identifier: GPL-2.0-only // Copyright (C) 2012-2014 Broadcom Corporation #include <linux/init.h> #include <linux/printk.h> #include <asm/hardware/cache-l2x0.h> #include "bcm_kona_smc.h" #include "kona_l2_cache.h" void __init kona_l2_cache_init(void) { unsigned int result; int ret; ret = bcm_kona_smc_init(); if (ret) { pr_info("Secure API not available (%d). Skipping L2 init.\n", ret); return; } result = bcm_kona_smc(SSAPI_ENABLE_L2_CACHE, 0, 0, 0, 0); if (result != SEC_ROM_RET_OK) { pr_err("Secure Monitor call failed (%u)! Skipping L2 init.\n", result); return; } /* * The aux_val and aux_mask have no effect since L2 cache is already * enabled. Pass 0s for aux_val and 1s for aux_mask for default value. */ ret = l2x0_of_init(0, ~0); if (ret) pr_err("Couldn't enable L2 cache: %d\n", ret); }
linux-master
arch/arm/mach-bcm/kona_l2_cache.c
// SPDX-License-Identifier: GPL-2.0+ /* * Copyright (C) 2010 Broadcom */ #include <linux/init.h> #include <linux/irqchip.h> #include <linux/of_address.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include "platsmp.h" static const char * const bcm2835_compat[] = { #ifdef CONFIG_ARCH_MULTI_V6 "brcm,bcm2835", #endif #ifdef CONFIG_ARCH_MULTI_V7 "brcm,bcm2836", "brcm,bcm2837", #endif NULL }; DT_MACHINE_START(BCM2835, "BCM2835") .dt_compat = bcm2835_compat, .smp = smp_ops(bcm2836_smp_ops), MACHINE_END
linux-master
arch/arm/mach-bcm/board_bcm2835.c
// SPDX-License-Identifier: GPL-2.0-only /* * Broadcom BCM63138 DSL SoCs SMP support code * * Copyright (C) 2015, Broadcom Corporation */ #include <linux/delay.h> #include <linux/init.h> #include <linux/smp.h> #include <linux/io.h> #include <linux/of.h> #include <linux/of_address.h> #include <asm/cacheflush.h> #include <asm/smp_scu.h> #include <asm/smp_plat.h> #include <asm/vfp.h> #include "bcm63xx_smp.h" /* Size of mapped Cortex A9 SCU address space */ #define CORTEX_A9_SCU_SIZE 0x58 /* * Enable the Cortex A9 Snoop Control Unit * * By the time this is called we already know there are multiple * cores present. We assume we're running on a Cortex A9 processor, * so any trouble getting the base address register or getting the * SCU base is a problem. * * Return 0 if successful or an error code otherwise. */ static int __init scu_a9_enable(void) { unsigned long config_base; void __iomem *scu_base; unsigned int i, ncores; if (!scu_a9_has_base()) { pr_err("no configuration base address register!\n"); return -ENXIO; } /* Config base address register value is zero for uniprocessor */ config_base = scu_a9_get_base(); if (!config_base) { pr_err("hardware reports only one core\n"); return -ENOENT; } scu_base = ioremap((phys_addr_t)config_base, CORTEX_A9_SCU_SIZE); if (!scu_base) { pr_err("failed to remap config base (%lu/%u) for SCU\n", config_base, CORTEX_A9_SCU_SIZE); return -ENOMEM; } scu_enable(scu_base); ncores = scu_base ? scu_get_core_count(scu_base) : 1; if (ncores > nr_cpu_ids) { pr_warn("SMP: %u cores greater than maximum (%u), clipping\n", ncores, nr_cpu_ids); ncores = nr_cpu_ids; } /* The BCM63138 SoC has two Cortex-A9 CPUs, CPU0 features a complete * and fully functional VFP unit that can be used, but CPU1 does not. * Since we will not be able to trap kernel-mode NEON to force * migration to CPU0, just do not advertise VFP support at all. * * This will make vfp_init bail out and do not attempt to use VFP at * all, for kernel-mode NEON, we do not want to introduce any * conditionals in hot-paths, so we just restrict the system to UP. */ #ifdef CONFIG_VFP if (ncores > 1) { pr_warn("SMP: secondary CPUs lack VFP unit, disabling VFP\n"); vfp_disable(); #ifdef CONFIG_KERNEL_MODE_NEON WARN(1, "SMP: kernel-mode NEON enabled, restricting to UP\n"); ncores = 1; #endif } #endif for (i = 0; i < ncores; i++) set_cpu_possible(i, true); iounmap(scu_base); /* That's the last we'll need of this */ return 0; } static const struct of_device_id bcm63138_bootlut_ids[] = { { .compatible = "brcm,bcm63138-bootlut", }, { /* sentinel */ }, }; #define BOOTLUT_RESET_VECT 0x20 static int bcm63138_smp_boot_secondary(unsigned int cpu, struct task_struct *idle) { void __iomem *bootlut_base; struct device_node *dn; int ret = 0; u32 val; dn = of_find_matching_node(NULL, bcm63138_bootlut_ids); if (!dn) { pr_err("SMP: unable to find bcm63138 boot LUT node\n"); return -ENODEV; } bootlut_base = of_iomap(dn, 0); of_node_put(dn); if (!bootlut_base) { pr_err("SMP: unable to remap boot LUT base register\n"); return -ENOMEM; } /* Locate the secondary CPU node */ dn = of_get_cpu_node(cpu, NULL); if (!dn) { pr_err("SMP: failed to locate secondary CPU%d node\n", cpu); ret = -ENODEV; goto out; } /* Write the secondary init routine to the BootLUT reset vector */ val = __pa_symbol(secondary_startup); writel_relaxed(val, bootlut_base + BOOTLUT_RESET_VECT); /* Power up the core, will jump straight to its reset vector when we * return */ ret = bcm63xx_pmb_power_on_cpu(dn); of_node_put(dn); out: iounmap(bootlut_base); return ret; } static void __init bcm63138_smp_prepare_cpus(unsigned int max_cpus) { int ret; ret = scu_a9_enable(); if (ret) { pr_warn("SMP: Cortex-A9 SCU setup failed\n"); return; } } static const struct smp_operations bcm63138_smp_ops __initconst = { .smp_prepare_cpus = bcm63138_smp_prepare_cpus, .smp_boot_secondary = bcm63138_smp_boot_secondary, }; CPU_METHOD_OF_DECLARE(bcm63138_smp, "brcm,bcm63138", &bcm63138_smp_ops);
linux-master
arch/arm/mach-bcm/bcm63xx_smp.c
// SPDX-License-Identifier: GPL-2.0+ /* * Copyright (C) 2019 Stefan Wahren */ #include <linux/of_address.h> #include <asm/mach/arch.h> #include "platsmp.h" static const char * const bcm2711_compat[] = { #ifdef CONFIG_ARCH_MULTI_V7 "brcm,bcm2711", #endif NULL }; DT_MACHINE_START(BCM2711, "BCM2711") #ifdef CONFIG_ZONE_DMA .dma_zone_size = SZ_1G, #endif .dt_compat = bcm2711_compat, .smp = smp_ops(bcm2836_smp_ops), MACHINE_END
linux-master
arch/arm/mach-bcm/bcm2711.c
// SPDX-License-Identifier: GPL-2.0-only // Copyright (C) 2014 Broadcom Corporation #include <asm/mach/arch.h> #include "kona_l2_cache.h" static void __init bcm21664_init(void) { kona_l2_cache_init(); } static const char * const bcm21664_dt_compat[] = { "brcm,bcm21664", NULL, }; DT_MACHINE_START(BCM21664_DT, "BCM21664 Broadcom Application Processor") .init_machine = bcm21664_init, .dt_compat = bcm21664_dt_compat, MACHINE_END
linux-master
arch/arm/mach-bcm/board_bcm21664.c
/* * Broadcom BCM470X / BCM5301X ARM platform code. * * Copyright 2013 Hauke Mehrtens <[email protected]> * * Licensed under the GNU/GPL. See COPYING for details. */ #include <asm/mach/arch.h> #include <asm/siginfo.h> #include <asm/signal.h> #define FSR_EXTERNAL (1 << 12) #define FSR_READ (0 << 10) #define FSR_IMPRECISE 0x0406 static const char *const bcm5301x_dt_compat[] __initconst = { "brcm,bcm4708", NULL, }; static int bcm5301x_abort_handler(unsigned long addr, unsigned int fsr, struct pt_regs *regs) { /* * We want to ignore aborts forwarded from the PCIe bus that are * expected and shouldn't really be passed by the PCIe controller. * The biggest disadvantage is the same FSR code may be reported when * reading non-existing APB register and we shouldn't ignore that. */ if (fsr == (FSR_EXTERNAL | FSR_READ | FSR_IMPRECISE)) return 0; return 1; } static void __init bcm5301x_init_early(void) { hook_fault_code(16 + 6, bcm5301x_abort_handler, SIGBUS, BUS_OBJERR, "imprecise external abort"); } DT_MACHINE_START(BCM5301X, "BCM5301X") .l2c_aux_val = 0, .l2c_aux_mask = ~0, .dt_compat = bcm5301x_dt_compat, .init_early = bcm5301x_init_early, MACHINE_END
linux-master
arch/arm/mach-bcm/bcm_5301x.c
// SPDX-License-Identifier: GPL-2.0-only // Copyright (C) 2015 Broadcom Corporation #include <asm/mach/arch.h> static const char *const bcm_nsp_dt_compat[] __initconst = { "brcm,nsp", NULL, }; DT_MACHINE_START(NSP_DT, "Broadcom Northstar Plus SoC") .l2c_aux_val = 0, .l2c_aux_mask = ~0, .dt_compat = bcm_nsp_dt_compat, MACHINE_END
linux-master
arch/arm/mach-bcm/bcm_nsp.c
// SPDX-License-Identifier: GPL-2.0-only // Copyright (C) 2017 Broadcom #include <asm/mach/arch.h> static const char * const bcm_hr2_dt_compat[] __initconst = { "brcm,hr2", NULL, }; DT_MACHINE_START(BCM_HR2_DT, "Broadcom Hurricane 2 SoC") .l2c_aux_val = 0, .l2c_aux_mask = ~0, .dt_compat = bcm_hr2_dt_compat, MACHINE_END
linux-master
arch/arm/mach-bcm/bcm_hr2.c
// SPDX-License-Identifier: GPL-2.0-only // Copyright (C) 2016 Broadcom #include <asm/mach/arch.h> static const char * const bcm23550_dt_compat[] = { "brcm,bcm23550", NULL, }; DT_MACHINE_START(BCM23550_DT, "BCM23550 Broadcom Application Processor") .dt_compat = bcm23550_dt_compat, MACHINE_END
linux-master
arch/arm/mach-bcm/board_bcm23550.c
// SPDX-License-Identifier: GPL-2.0-only // Copyright (C) 2014 Broadcom Corporation #include <asm/mach/arch.h> static const char * const bcm_cygnus_dt_compat[] __initconst = { "brcm,cygnus", NULL, }; DT_MACHINE_START(BCM_CYGNUS_DT, "Broadcom Cygnus SoC") .l2c_aux_val = 0, .l2c_aux_mask = ~0, .dt_compat = bcm_cygnus_dt_compat, MACHINE_END
linux-master
arch/arm/mach-bcm/bcm_cygnus.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2014-2015 Broadcom Corporation * Copyright 2014 Linaro Limited */ #include <linux/cpumask.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/io.h> #include <linux/irqchip/irq-bcm2836.h> #include <linux/jiffies.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/sched.h> #include <linux/sched/clock.h> #include <linux/smp.h> #include <asm/cacheflush.h> #include <asm/smp.h> #include <asm/smp_plat.h> #include <asm/smp_scu.h> #include "platsmp.h" /* Size of mapped Cortex A9 SCU address space */ #define CORTEX_A9_SCU_SIZE 0x58 #define SECONDARY_TIMEOUT_NS NSEC_PER_MSEC /* 1 msec (in nanoseconds) */ #define BOOT_ADDR_CPUID_MASK 0x3 /* Name of device node property defining secondary boot register location */ #define OF_SECONDARY_BOOT "secondary-boot-reg" #define MPIDR_CPUID_BITMASK 0x3 /* * Enable the Cortex A9 Snoop Control Unit * * By the time this is called we already know there are multiple * cores present. We assume we're running on a Cortex A9 processor, * so any trouble getting the base address register or getting the * SCU base is a problem. * * Return 0 if successful or an error code otherwise. */ static int __init scu_a9_enable(void) { unsigned long config_base; void __iomem *scu_base; if (!scu_a9_has_base()) { pr_err("no configuration base address register!\n"); return -ENXIO; } /* Config base address register value is zero for uniprocessor */ config_base = scu_a9_get_base(); if (!config_base) { pr_err("hardware reports only one core\n"); return -ENOENT; } scu_base = ioremap((phys_addr_t)config_base, CORTEX_A9_SCU_SIZE); if (!scu_base) { pr_err("failed to remap config base (%lu/%u) for SCU\n", config_base, CORTEX_A9_SCU_SIZE); return -ENOMEM; } scu_enable(scu_base); iounmap(scu_base); /* That's the last we'll need of this */ return 0; } static u32 secondary_boot_addr_for(unsigned int cpu) { u32 secondary_boot_addr = 0; struct device_node *cpu_node = of_get_cpu_node(cpu, NULL); if (!cpu_node) { pr_err("Failed to find device tree node for CPU%u\n", cpu); return 0; } if (of_property_read_u32(cpu_node, OF_SECONDARY_BOOT, &secondary_boot_addr)) pr_err("required secondary boot register not specified for CPU%u\n", cpu); of_node_put(cpu_node); return secondary_boot_addr; } static int nsp_write_lut(unsigned int cpu) { void __iomem *sku_rom_lut; phys_addr_t secondary_startup_phy; const u32 secondary_boot_addr = secondary_boot_addr_for(cpu); if (!secondary_boot_addr) return -EINVAL; sku_rom_lut = ioremap((phys_addr_t)secondary_boot_addr, sizeof(phys_addr_t)); if (!sku_rom_lut) { pr_warn("unable to ioremap SKU-ROM LUT register for cpu %u\n", cpu); return -ENOMEM; } secondary_startup_phy = __pa_symbol(secondary_startup); BUG_ON(secondary_startup_phy > (phys_addr_t)U32_MAX); writel_relaxed(secondary_startup_phy, sku_rom_lut); /* Ensure the write is visible to the secondary core */ smp_wmb(); iounmap(sku_rom_lut); return 0; } static void __init bcm_smp_prepare_cpus(unsigned int max_cpus) { const cpumask_t only_cpu_0 = { CPU_BITS_CPU0 }; /* Enable the SCU on Cortex A9 based SoCs */ if (scu_a9_enable()) { /* Update the CPU present map to reflect uniprocessor mode */ pr_warn("failed to enable A9 SCU - disabling SMP\n"); init_cpu_present(&only_cpu_0); } } /* * The ROM code has the secondary cores looping, waiting for an event. * When an event occurs each core examines the bottom two bits of the * secondary boot register. When a core finds those bits contain its * own core id, it performs initialization, including computing its boot * address by clearing the boot register value's bottom two bits. The * core signals that it is beginning its execution by writing its boot * address back to the secondary boot register, and finally jumps to * that address. * * So to start a core executing we need to: * - Encode the (hardware) CPU id with the bottom bits of the secondary * start address. * - Write that value into the secondary boot register. * - Generate an event to wake up the secondary CPU(s). * - Wait for the secondary boot register to be re-written, which * indicates the secondary core has started. */ static int kona_boot_secondary(unsigned int cpu, struct task_struct *idle) { void __iomem *boot_reg; phys_addr_t boot_func; u64 start_clock; u32 cpu_id; u32 boot_val; bool timeout = false; const u32 secondary_boot_addr = secondary_boot_addr_for(cpu); cpu_id = cpu_logical_map(cpu); if (cpu_id & ~BOOT_ADDR_CPUID_MASK) { pr_err("bad cpu id (%u > %u)\n", cpu_id, BOOT_ADDR_CPUID_MASK); return -EINVAL; } if (!secondary_boot_addr) return -EINVAL; boot_reg = ioremap((phys_addr_t)secondary_boot_addr, sizeof(phys_addr_t)); if (!boot_reg) { pr_err("unable to map boot register for cpu %u\n", cpu_id); return -ENOMEM; } /* * Secondary cores will start in secondary_startup(), * defined in "arch/arm/kernel/head.S" */ boot_func = __pa_symbol(secondary_startup); BUG_ON(boot_func & BOOT_ADDR_CPUID_MASK); BUG_ON(boot_func > (phys_addr_t)U32_MAX); /* The core to start is encoded in the low bits */ boot_val = (u32)boot_func | cpu_id; writel_relaxed(boot_val, boot_reg); sev(); /* The low bits will be cleared once the core has started */ start_clock = local_clock(); while (!timeout && readl_relaxed(boot_reg) == boot_val) timeout = local_clock() - start_clock > SECONDARY_TIMEOUT_NS; iounmap(boot_reg); if (!timeout) return 0; pr_err("timeout waiting for cpu %u to start\n", cpu_id); return -ENXIO; } /* Cluster Dormant Control command to bring CPU into a running state */ #define CDC_CMD 6 #define CDC_CMD_OFFSET 0 #define CDC_CMD_REG(cpu) (CDC_CMD_OFFSET + 4*(cpu)) /* * BCM23550 has a Cluster Dormant Control block that keeps the core in * idle state. A command needs to be sent to the block to bring the CPU * into running state. */ static int bcm23550_boot_secondary(unsigned int cpu, struct task_struct *idle) { void __iomem *cdc_base; struct device_node *dn; char *name; int ret; /* Make sure a CDC node exists before booting the * secondary core. */ name = "brcm,bcm23550-cdc"; dn = of_find_compatible_node(NULL, NULL, name); if (!dn) { pr_err("unable to find cdc node\n"); return -ENODEV; } cdc_base = of_iomap(dn, 0); of_node_put(dn); if (!cdc_base) { pr_err("unable to remap cdc base register\n"); return -ENOMEM; } /* Boot the secondary core */ ret = kona_boot_secondary(cpu, idle); if (ret) goto out; /* Bring this CPU to RUN state so that nIRQ nFIQ * signals are unblocked. */ writel_relaxed(CDC_CMD, cdc_base + CDC_CMD_REG(cpu)); out: iounmap(cdc_base); return ret; } static int nsp_boot_secondary(unsigned int cpu, struct task_struct *idle) { int ret; /* * After wake up, secondary core branches to the startup * address programmed at SKU ROM LUT location. */ ret = nsp_write_lut(cpu); if (ret) { pr_err("unable to write startup addr to SKU ROM LUT\n"); goto out; } /* Send a CPU wakeup interrupt to the secondary core */ arch_send_wakeup_ipi_mask(cpumask_of(cpu)); out: return ret; } static int bcm2836_boot_secondary(unsigned int cpu, struct task_struct *idle) { void __iomem *intc_base; struct device_node *dn; char *name; name = "brcm,bcm2836-l1-intc"; dn = of_find_compatible_node(NULL, NULL, name); if (!dn) { pr_err("unable to find intc node\n"); return -ENODEV; } intc_base = of_iomap(dn, 0); of_node_put(dn); if (!intc_base) { pr_err("unable to remap intc base register\n"); return -ENOMEM; } writel(virt_to_phys(secondary_startup), intc_base + LOCAL_MAILBOX3_SET0 + 16 * cpu); dsb(sy); sev(); iounmap(intc_base); return 0; } static const struct smp_operations kona_smp_ops __initconst = { .smp_prepare_cpus = bcm_smp_prepare_cpus, .smp_boot_secondary = kona_boot_secondary, }; CPU_METHOD_OF_DECLARE(bcm_smp_bcm281xx, "brcm,bcm11351-cpu-method", &kona_smp_ops); static const struct smp_operations bcm23550_smp_ops __initconst = { .smp_boot_secondary = bcm23550_boot_secondary, }; CPU_METHOD_OF_DECLARE(bcm_smp_bcm23550, "brcm,bcm23550", &bcm23550_smp_ops); static const struct smp_operations nsp_smp_ops __initconst = { .smp_prepare_cpus = bcm_smp_prepare_cpus, .smp_boot_secondary = nsp_boot_secondary, }; CPU_METHOD_OF_DECLARE(bcm_smp_nsp, "brcm,bcm-nsp-smp", &nsp_smp_ops); const struct smp_operations bcm2836_smp_ops __initconst = { .smp_boot_secondary = bcm2836_boot_secondary, }; CPU_METHOD_OF_DECLARE(bcm_smp_bcm2836, "brcm,bcm2836-smp", &bcm2836_smp_ops);
linux-master
arch/arm/mach-bcm/platsmp.c
// SPDX-License-Identifier: GPL-2.0-only /* * Broadcom STB CPU SMP and hotplug support for ARM * * Copyright (C) 2013-2014 Broadcom Corporation */ #include <linux/delay.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/io.h> #include <linux/jiffies.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/printk.h> #include <linux/regmap.h> #include <linux/smp.h> #include <linux/mfd/syscon.h> #include <asm/cacheflush.h> #include <asm/cp15.h> #include <asm/mach-types.h> #include <asm/smp_plat.h> enum { ZONE_MAN_CLKEN_MASK = BIT(0), ZONE_MAN_RESET_CNTL_MASK = BIT(1), ZONE_MAN_MEM_PWR_MASK = BIT(4), ZONE_RESERVED_1_MASK = BIT(5), ZONE_MAN_ISO_CNTL_MASK = BIT(6), ZONE_MANUAL_CONTROL_MASK = BIT(7), ZONE_PWR_DN_REQ_MASK = BIT(9), ZONE_PWR_UP_REQ_MASK = BIT(10), ZONE_BLK_RST_ASSERT_MASK = BIT(12), ZONE_PWR_OFF_STATE_MASK = BIT(25), ZONE_PWR_ON_STATE_MASK = BIT(26), ZONE_DPG_PWR_STATE_MASK = BIT(28), ZONE_MEM_PWR_STATE_MASK = BIT(29), ZONE_RESET_STATE_MASK = BIT(31), CPU0_PWR_ZONE_CTRL_REG = 1, CPU_RESET_CONFIG_REG = 2, }; static void __iomem *cpubiuctrl_block; static void __iomem *hif_cont_block; static u32 cpu0_pwr_zone_ctrl_reg; static u32 cpu_rst_cfg_reg; static u32 hif_cont_reg; #ifdef CONFIG_HOTPLUG_CPU /* * We must quiesce a dying CPU before it can be killed by the boot CPU. Because * one or more cache may be disabled, we must flush to ensure coherency. We * cannot use traditional completion structures or spinlocks as they rely on * coherency. */ static DEFINE_PER_CPU_ALIGNED(int, per_cpu_sw_state); static int per_cpu_sw_state_rd(u32 cpu) { sync_cache_r(SHIFT_PERCPU_PTR(&per_cpu_sw_state, per_cpu_offset(cpu))); return per_cpu(per_cpu_sw_state, cpu); } static void per_cpu_sw_state_wr(u32 cpu, int val) { dmb(); per_cpu(per_cpu_sw_state, cpu) = val; sync_cache_w(SHIFT_PERCPU_PTR(&per_cpu_sw_state, per_cpu_offset(cpu))); } #else static inline void per_cpu_sw_state_wr(u32 cpu, int val) { } #endif static void __iomem *pwr_ctrl_get_base(u32 cpu) { void __iomem *base = cpubiuctrl_block + cpu0_pwr_zone_ctrl_reg; base += (cpu_logical_map(cpu) * 4); return base; } static u32 pwr_ctrl_rd(u32 cpu) { void __iomem *base = pwr_ctrl_get_base(cpu); return readl_relaxed(base); } static void pwr_ctrl_set(unsigned int cpu, u32 val, u32 mask) { void __iomem *base = pwr_ctrl_get_base(cpu); writel((readl(base) & mask) | val, base); } static void pwr_ctrl_clr(unsigned int cpu, u32 val, u32 mask) { void __iomem *base = pwr_ctrl_get_base(cpu); writel((readl(base) & mask) & ~val, base); } #define POLL_TMOUT_MS 500 static int pwr_ctrl_wait_tmout(unsigned int cpu, u32 set, u32 mask) { const unsigned long timeo = jiffies + msecs_to_jiffies(POLL_TMOUT_MS); u32 tmp; do { tmp = pwr_ctrl_rd(cpu) & mask; if (!set == !tmp) return 0; } while (time_before(jiffies, timeo)); tmp = pwr_ctrl_rd(cpu) & mask; if (!set == !tmp) return 0; return -ETIMEDOUT; } static void cpu_rst_cfg_set(u32 cpu, int set) { u32 val; val = readl_relaxed(cpubiuctrl_block + cpu_rst_cfg_reg); if (set) val |= BIT(cpu_logical_map(cpu)); else val &= ~BIT(cpu_logical_map(cpu)); writel_relaxed(val, cpubiuctrl_block + cpu_rst_cfg_reg); } static void cpu_set_boot_addr(u32 cpu, unsigned long boot_addr) { const int reg_ofs = cpu_logical_map(cpu) * 8; writel_relaxed(0, hif_cont_block + hif_cont_reg + reg_ofs); writel_relaxed(boot_addr, hif_cont_block + hif_cont_reg + 4 + reg_ofs); } static void brcmstb_cpu_boot(u32 cpu) { /* Mark this CPU as "up" */ per_cpu_sw_state_wr(cpu, 1); /* * Set the reset vector to point to the secondary_startup * routine */ cpu_set_boot_addr(cpu, __pa_symbol(secondary_startup)); /* Unhalt the cpu */ cpu_rst_cfg_set(cpu, 0); } static void brcmstb_cpu_power_on(u32 cpu) { /* * The secondary cores power was cut, so we must go through * power-on initialization. */ pwr_ctrl_set(cpu, ZONE_MAN_ISO_CNTL_MASK, 0xffffff00); pwr_ctrl_set(cpu, ZONE_MANUAL_CONTROL_MASK, -1); pwr_ctrl_set(cpu, ZONE_RESERVED_1_MASK, -1); pwr_ctrl_set(cpu, ZONE_MAN_MEM_PWR_MASK, -1); if (pwr_ctrl_wait_tmout(cpu, 1, ZONE_MEM_PWR_STATE_MASK)) panic("ZONE_MEM_PWR_STATE_MASK set timeout"); pwr_ctrl_set(cpu, ZONE_MAN_CLKEN_MASK, -1); if (pwr_ctrl_wait_tmout(cpu, 1, ZONE_DPG_PWR_STATE_MASK)) panic("ZONE_DPG_PWR_STATE_MASK set timeout"); pwr_ctrl_clr(cpu, ZONE_MAN_ISO_CNTL_MASK, -1); pwr_ctrl_set(cpu, ZONE_MAN_RESET_CNTL_MASK, -1); } static int brcmstb_cpu_get_power_state(u32 cpu) { int tmp = pwr_ctrl_rd(cpu); return (tmp & ZONE_RESET_STATE_MASK) ? 0 : 1; } #ifdef CONFIG_HOTPLUG_CPU static void brcmstb_cpu_die(u32 cpu) { v7_exit_coherency_flush(all); per_cpu_sw_state_wr(cpu, 0); /* Sit and wait to die */ wfi(); /* We should never get here... */ while (1) ; } static int brcmstb_cpu_kill(u32 cpu) { /* * Ordinarily, the hardware forbids power-down of CPU0 (which is good * because it is the boot CPU), but this is not true when using BPCM * manual mode. Consequently, we must avoid turning off CPU0 here to * ensure that TI2C master reset will work. */ if (cpu == 0) { pr_warn("SMP: refusing to power off CPU0\n"); return 1; } while (per_cpu_sw_state_rd(cpu)) ; pwr_ctrl_set(cpu, ZONE_MANUAL_CONTROL_MASK, -1); pwr_ctrl_clr(cpu, ZONE_MAN_RESET_CNTL_MASK, -1); pwr_ctrl_clr(cpu, ZONE_MAN_CLKEN_MASK, -1); pwr_ctrl_set(cpu, ZONE_MAN_ISO_CNTL_MASK, -1); pwr_ctrl_clr(cpu, ZONE_MAN_MEM_PWR_MASK, -1); if (pwr_ctrl_wait_tmout(cpu, 0, ZONE_MEM_PWR_STATE_MASK)) panic("ZONE_MEM_PWR_STATE_MASK clear timeout"); pwr_ctrl_clr(cpu, ZONE_RESERVED_1_MASK, -1); if (pwr_ctrl_wait_tmout(cpu, 0, ZONE_DPG_PWR_STATE_MASK)) panic("ZONE_DPG_PWR_STATE_MASK clear timeout"); /* Flush pipeline before resetting CPU */ mb(); /* Assert reset on the CPU */ cpu_rst_cfg_set(cpu, 1); return 1; } #endif /* CONFIG_HOTPLUG_CPU */ static int __init setup_hifcpubiuctrl_regs(struct device_node *np) { int rc = 0; char *name; struct device_node *syscon_np = NULL; name = "syscon-cpu"; syscon_np = of_parse_phandle(np, name, 0); if (!syscon_np) { pr_err("can't find phandle %s\n", name); rc = -EINVAL; goto cleanup; } cpubiuctrl_block = of_iomap(syscon_np, 0); if (!cpubiuctrl_block) { pr_err("iomap failed for cpubiuctrl_block\n"); rc = -EINVAL; goto cleanup; } rc = of_property_read_u32_index(np, name, CPU0_PWR_ZONE_CTRL_REG, &cpu0_pwr_zone_ctrl_reg); if (rc) { pr_err("failed to read 1st entry from %s property (%d)\n", name, rc); rc = -EINVAL; goto cleanup; } rc = of_property_read_u32_index(np, name, CPU_RESET_CONFIG_REG, &cpu_rst_cfg_reg); if (rc) { pr_err("failed to read 2nd entry from %s property (%d)\n", name, rc); rc = -EINVAL; goto cleanup; } cleanup: of_node_put(syscon_np); return rc; } static int __init setup_hifcont_regs(struct device_node *np) { int rc = 0; char *name; struct device_node *syscon_np = NULL; name = "syscon-cont"; syscon_np = of_parse_phandle(np, name, 0); if (!syscon_np) { pr_err("can't find phandle %s\n", name); rc = -EINVAL; goto cleanup; } hif_cont_block = of_iomap(syscon_np, 0); if (!hif_cont_block) { pr_err("iomap failed for hif_cont_block\n"); rc = -EINVAL; goto cleanup; } /* Offset is at top of hif_cont_block */ hif_cont_reg = 0; cleanup: of_node_put(syscon_np); return rc; } static void __init brcmstb_cpu_ctrl_setup(unsigned int max_cpus) { int rc; struct device_node *np; char *name; name = "brcm,brcmstb-smpboot"; np = of_find_compatible_node(NULL, NULL, name); if (!np) { pr_err("can't find compatible node %s\n", name); return; } rc = setup_hifcpubiuctrl_regs(np); if (rc) goto out_put_node; rc = setup_hifcont_regs(np); if (rc) goto out_put_node; out_put_node: of_node_put(np); } static int brcmstb_boot_secondary(unsigned int cpu, struct task_struct *idle) { /* Missing the brcm,brcmstb-smpboot DT node? */ if (!cpubiuctrl_block || !hif_cont_block) return -ENODEV; /* Bring up power to the core if necessary */ if (brcmstb_cpu_get_power_state(cpu) == 0) brcmstb_cpu_power_on(cpu); brcmstb_cpu_boot(cpu); return 0; } static const struct smp_operations brcmstb_smp_ops __initconst = { .smp_prepare_cpus = brcmstb_cpu_ctrl_setup, .smp_boot_secondary = brcmstb_boot_secondary, #ifdef CONFIG_HOTPLUG_CPU .cpu_kill = brcmstb_cpu_kill, .cpu_die = brcmstb_cpu_die, #endif }; CPU_METHOD_OF_DECLARE(brcmstb_smp, "brcm,brahma-b15", &brcmstb_smp_ops);
linux-master
arch/arm/mach-bcm/platsmp-brcmstb.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2014 Carlo Caione <[email protected]> */ #include <asm/mach/arch.h> static const char * const meson_common_board_compat[] = { "amlogic,meson6", "amlogic,meson8", "amlogic,meson8b", "amlogic,meson8m2", NULL, }; DT_MACHINE_START(MESON, "Amlogic Meson platform") .dt_compat = meson_common_board_compat, .l2c_aux_val = 0, .l2c_aux_mask = ~0, MACHINE_END
linux-master
arch/arm/mach-meson/meson.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2015 Carlo Caione <[email protected]> * Copyright (C) 2017 Martin Blumenstingl <[email protected]> */ #include <linux/delay.h> #include <linux/init.h> #include <linux/io.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/regmap.h> #include <linux/reset.h> #include <linux/smp.h> #include <linux/mfd/syscon.h> #include <asm/cacheflush.h> #include <asm/cp15.h> #include <asm/smp_scu.h> #include <asm/smp_plat.h> #define MESON_SMP_SRAM_CPU_CTRL_REG (0x00) #define MESON_SMP_SRAM_CPU_CTRL_ADDR_REG(c) (0x04 + ((c - 1) << 2)) #define MESON_CPU_AO_RTI_PWR_A9_CNTL0 (0x00) #define MESON_CPU_AO_RTI_PWR_A9_CNTL1 (0x04) #define MESON_CPU_AO_RTI_PWR_A9_MEM_PD0 (0x14) #define MESON_CPU_PWR_A9_CNTL0_M(c) (0x03 << ((c * 2) + 16)) #define MESON_CPU_PWR_A9_CNTL1_M(c) (0x03 << ((c + 1) << 1)) #define MESON_CPU_PWR_A9_MEM_PD0_M(c) (0x0f << (32 - (c * 4))) #define MESON_CPU_PWR_A9_CNTL1_ST(c) (0x01 << (c + 16)) static void __iomem *sram_base; static void __iomem *scu_base; static struct regmap *pmu; static struct reset_control *meson_smp_get_core_reset(int cpu) { struct device_node *np = of_get_cpu_node(cpu, 0); return of_reset_control_get_exclusive(np, NULL); } static void meson_smp_set_cpu_ctrl(int cpu, bool on_off) { u32 val = readl(sram_base + MESON_SMP_SRAM_CPU_CTRL_REG); if (on_off) val |= BIT(cpu); else val &= ~BIT(cpu); /* keep bit 0 always enabled */ val |= BIT(0); writel(val, sram_base + MESON_SMP_SRAM_CPU_CTRL_REG); } static void __init meson_smp_prepare_cpus(const char *scu_compatible, const char *pmu_compatible, const char *sram_compatible) { static struct device_node *node; /* SMP SRAM */ node = of_find_compatible_node(NULL, NULL, sram_compatible); if (!node) { pr_err("Missing SRAM node\n"); return; } sram_base = of_iomap(node, 0); of_node_put(node); if (!sram_base) { pr_err("Couldn't map SRAM registers\n"); return; } /* PMU */ pmu = syscon_regmap_lookup_by_compatible(pmu_compatible); if (IS_ERR(pmu)) { pr_err("Couldn't map PMU registers\n"); return; } /* SCU */ node = of_find_compatible_node(NULL, NULL, scu_compatible); if (!node) { pr_err("Missing SCU node\n"); return; } scu_base = of_iomap(node, 0); of_node_put(node); if (!scu_base) { pr_err("Couldn't map SCU registers\n"); return; } scu_enable(scu_base); } static void __init meson8b_smp_prepare_cpus(unsigned int max_cpus) { meson_smp_prepare_cpus("arm,cortex-a5-scu", "amlogic,meson8b-pmu", "amlogic,meson8b-smp-sram"); } static void __init meson8_smp_prepare_cpus(unsigned int max_cpus) { meson_smp_prepare_cpus("arm,cortex-a9-scu", "amlogic,meson8-pmu", "amlogic,meson8-smp-sram"); } static void meson_smp_begin_secondary_boot(unsigned int cpu) { /* * Set the entry point before powering on the CPU through the SCU. This * is needed if the CPU is in "warm" state (= after rebooting the * system without power-cycling, or when taking the CPU offline and * then taking it online again. */ writel(__pa_symbol(secondary_startup), sram_base + MESON_SMP_SRAM_CPU_CTRL_ADDR_REG(cpu)); /* * SCU Power on CPU (needs to be done before starting the CPU, * otherwise the secondary CPU will not start). */ scu_cpu_power_enable(scu_base, cpu); } static int meson_smp_finalize_secondary_boot(unsigned int cpu) { unsigned long timeout; timeout = jiffies + (10 * HZ); while (readl(sram_base + MESON_SMP_SRAM_CPU_CTRL_ADDR_REG(cpu))) { if (!time_before(jiffies, timeout)) { pr_err("Timeout while waiting for CPU%d status\n", cpu); return -ETIMEDOUT; } } writel(__pa_symbol(secondary_startup), sram_base + MESON_SMP_SRAM_CPU_CTRL_ADDR_REG(cpu)); meson_smp_set_cpu_ctrl(cpu, true); return 0; } static int meson8_smp_boot_secondary(unsigned int cpu, struct task_struct *idle) { struct reset_control *rstc; int ret; rstc = meson_smp_get_core_reset(cpu); if (IS_ERR(rstc)) { pr_err("Couldn't get the reset controller for CPU%d\n", cpu); return PTR_ERR(rstc); } meson_smp_begin_secondary_boot(cpu); /* Reset enable */ ret = reset_control_assert(rstc); if (ret) { pr_err("Failed to assert CPU%d reset\n", cpu); goto out; } /* CPU power ON */ ret = regmap_update_bits(pmu, MESON_CPU_AO_RTI_PWR_A9_CNTL1, MESON_CPU_PWR_A9_CNTL1_M(cpu), 0); if (ret < 0) { pr_err("Couldn't wake up CPU%d\n", cpu); goto out; } udelay(10); /* Isolation disable */ ret = regmap_update_bits(pmu, MESON_CPU_AO_RTI_PWR_A9_CNTL0, BIT(cpu), 0); if (ret < 0) { pr_err("Error when disabling isolation of CPU%d\n", cpu); goto out; } /* Reset disable */ ret = reset_control_deassert(rstc); if (ret) { pr_err("Failed to de-assert CPU%d reset\n", cpu); goto out; } ret = meson_smp_finalize_secondary_boot(cpu); if (ret) goto out; out: reset_control_put(rstc); return 0; } static int meson8b_smp_boot_secondary(unsigned int cpu, struct task_struct *idle) { struct reset_control *rstc; int ret; u32 val; rstc = meson_smp_get_core_reset(cpu); if (IS_ERR(rstc)) { pr_err("Couldn't get the reset controller for CPU%d\n", cpu); return PTR_ERR(rstc); } meson_smp_begin_secondary_boot(cpu); /* CPU power UP */ ret = regmap_update_bits(pmu, MESON_CPU_AO_RTI_PWR_A9_CNTL0, MESON_CPU_PWR_A9_CNTL0_M(cpu), 0); if (ret < 0) { pr_err("Couldn't power up CPU%d\n", cpu); goto out; } udelay(5); /* Reset enable */ ret = reset_control_assert(rstc); if (ret) { pr_err("Failed to assert CPU%d reset\n", cpu); goto out; } /* Memory power UP */ ret = regmap_update_bits(pmu, MESON_CPU_AO_RTI_PWR_A9_MEM_PD0, MESON_CPU_PWR_A9_MEM_PD0_M(cpu), 0); if (ret < 0) { pr_err("Couldn't power up the memory for CPU%d\n", cpu); goto out; } /* Wake up CPU */ ret = regmap_update_bits(pmu, MESON_CPU_AO_RTI_PWR_A9_CNTL1, MESON_CPU_PWR_A9_CNTL1_M(cpu), 0); if (ret < 0) { pr_err("Couldn't wake up CPU%d\n", cpu); goto out; } udelay(10); ret = regmap_read_poll_timeout(pmu, MESON_CPU_AO_RTI_PWR_A9_CNTL1, val, val & MESON_CPU_PWR_A9_CNTL1_ST(cpu), 10, 10000); if (ret) { pr_err("Timeout while polling PMU for CPU%d status\n", cpu); goto out; } /* Isolation disable */ ret = regmap_update_bits(pmu, MESON_CPU_AO_RTI_PWR_A9_CNTL0, BIT(cpu), 0); if (ret < 0) { pr_err("Error when disabling isolation of CPU%d\n", cpu); goto out; } /* Reset disable */ ret = reset_control_deassert(rstc); if (ret) { pr_err("Failed to de-assert CPU%d reset\n", cpu); goto out; } ret = meson_smp_finalize_secondary_boot(cpu); if (ret) goto out; out: reset_control_put(rstc); return 0; } #ifdef CONFIG_HOTPLUG_CPU static void meson8_smp_cpu_die(unsigned int cpu) { meson_smp_set_cpu_ctrl(cpu, false); v7_exit_coherency_flush(louis); scu_power_mode(scu_base, SCU_PM_POWEROFF); dsb(); wfi(); /* we should never get here */ WARN_ON(1); } static int meson8_smp_cpu_kill(unsigned int cpu) { int ret, power_mode; unsigned long timeout; timeout = jiffies + (50 * HZ); do { power_mode = scu_get_cpu_power_mode(scu_base, cpu); if (power_mode == SCU_PM_POWEROFF) break; usleep_range(10000, 15000); } while (time_before(jiffies, timeout)); if (power_mode != SCU_PM_POWEROFF) { pr_err("Error while waiting for SCU power-off on CPU%d\n", cpu); return -ETIMEDOUT; } msleep(30); /* Isolation enable */ ret = regmap_update_bits(pmu, MESON_CPU_AO_RTI_PWR_A9_CNTL0, BIT(cpu), 0x3); if (ret < 0) { pr_err("Error when enabling isolation for CPU%d\n", cpu); return ret; } udelay(10); /* CPU power OFF */ ret = regmap_update_bits(pmu, MESON_CPU_AO_RTI_PWR_A9_CNTL1, MESON_CPU_PWR_A9_CNTL1_M(cpu), 0x3); if (ret < 0) { pr_err("Couldn't change sleep status of CPU%d\n", cpu); return ret; } return 1; } static int meson8b_smp_cpu_kill(unsigned int cpu) { int ret, power_mode, count = 5000; do { power_mode = scu_get_cpu_power_mode(scu_base, cpu); if (power_mode == SCU_PM_POWEROFF) break; udelay(10); } while (++count); if (power_mode != SCU_PM_POWEROFF) { pr_err("Error while waiting for SCU power-off on CPU%d\n", cpu); return -ETIMEDOUT; } udelay(10); /* CPU power DOWN */ ret = regmap_update_bits(pmu, MESON_CPU_AO_RTI_PWR_A9_CNTL0, MESON_CPU_PWR_A9_CNTL0_M(cpu), 0x3); if (ret < 0) { pr_err("Couldn't power down CPU%d\n", cpu); return ret; } /* Isolation enable */ ret = regmap_update_bits(pmu, MESON_CPU_AO_RTI_PWR_A9_CNTL0, BIT(cpu), 0x3); if (ret < 0) { pr_err("Error when enabling isolation for CPU%d\n", cpu); return ret; } udelay(10); /* Sleep status */ ret = regmap_update_bits(pmu, MESON_CPU_AO_RTI_PWR_A9_CNTL1, MESON_CPU_PWR_A9_CNTL1_M(cpu), 0x3); if (ret < 0) { pr_err("Couldn't change sleep status of CPU%d\n", cpu); return ret; } /* Memory power DOWN */ ret = regmap_update_bits(pmu, MESON_CPU_AO_RTI_PWR_A9_MEM_PD0, MESON_CPU_PWR_A9_MEM_PD0_M(cpu), 0xf); if (ret < 0) { pr_err("Couldn't power down the memory of CPU%d\n", cpu); return ret; } return 1; } #endif static struct smp_operations meson8_smp_ops __initdata = { .smp_prepare_cpus = meson8_smp_prepare_cpus, .smp_boot_secondary = meson8_smp_boot_secondary, #ifdef CONFIG_HOTPLUG_CPU .cpu_die = meson8_smp_cpu_die, .cpu_kill = meson8_smp_cpu_kill, #endif }; static struct smp_operations meson8b_smp_ops __initdata = { .smp_prepare_cpus = meson8b_smp_prepare_cpus, .smp_boot_secondary = meson8b_smp_boot_secondary, #ifdef CONFIG_HOTPLUG_CPU .cpu_die = meson8_smp_cpu_die, .cpu_kill = meson8b_smp_cpu_kill, #endif }; CPU_METHOD_OF_DECLARE(meson8_smp, "amlogic,meson8-smp", &meson8_smp_ops); CPU_METHOD_OF_DECLARE(meson8b_smp, "amlogic,meson8b-smp", &meson8b_smp_ops);
linux-master
arch/arm/mach-meson/platsmp.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Author: Alexander Shiyan <[email protected]>, 2016 */ #include <linux/io.h> #include <linux/of_fdt.h> #include <linux/platform_device.h> #include <linux/random.h> #include <linux/sizes.h> #include <linux/mfd/syscon/clps711x.h> #include <asm/system_info.h> #include <asm/system_misc.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #define CLPS711X_VIRT_BASE IOMEM(0xfeff4000) #define CLPS711X_PHYS_BASE (0x80000000) # define SYSFLG1 (0x0140) # define HALT (0x0800) # define UNIQID (0x2440) # define RANDID0 (0x2700) # define RANDID1 (0x2704) # define RANDID2 (0x2708) # define RANDID3 (0x270c) static struct map_desc clps711x_io_desc __initdata = { .virtual = (unsigned long)CLPS711X_VIRT_BASE, .pfn = __phys_to_pfn(CLPS711X_PHYS_BASE), .length = 48 * SZ_1K, .type = MT_DEVICE, }; static void __init clps711x_map_io(void) { iotable_init(&clps711x_io_desc, 1); } static const struct resource clps711x_cpuidle_res = DEFINE_RES_MEM(CLPS711X_PHYS_BASE + HALT, SZ_128); static void __init clps711x_init(void) { u32 id[5]; id[0] = readl(CLPS711X_VIRT_BASE + UNIQID); id[1] = readl(CLPS711X_VIRT_BASE + RANDID0); id[2] = readl(CLPS711X_VIRT_BASE + RANDID1); id[3] = readl(CLPS711X_VIRT_BASE + RANDID2); id[4] = readl(CLPS711X_VIRT_BASE + RANDID3); system_rev = SYSFLG1_VERID(readl(CLPS711X_VIRT_BASE + SYSFLG1)); add_device_randomness(id, sizeof(id)); system_serial_low = id[0]; platform_device_register_simple("clps711x-cpuidle", PLATFORM_DEVID_NONE, &clps711x_cpuidle_res, 1); } static void clps711x_restart(enum reboot_mode mode, const char *cmd) { soft_restart(0); } static const char *const clps711x_compat[] __initconst = { "cirrus,ep7209", NULL }; DT_MACHINE_START(CLPS711X_DT, "Cirrus Logic CLPS711X (Device Tree Support)") .dt_compat = clps711x_compat, .map_io = clps711x_map_io, .init_late = clps711x_init, .restart = clps711x_restart, MACHINE_END
linux-master
arch/arm/mach-clps711x/board-dt.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) Maxime Coquelin 2015 * Copyright (C) STMicroelectronics 2017 * Author: Maxime Coquelin <[email protected]> */ #include <linux/kernel.h> #include <asm/mach/arch.h> #ifdef CONFIG_ARM_SINGLE_ARMV7M #include <asm/v7m.h> #endif static const char *const stm32_compat[] __initconst = { "st,stm32f429", "st,stm32f469", "st,stm32f746", "st,stm32f769", "st,stm32h743", "st,stm32h750", "st,stm32mp131", "st,stm32mp133", "st,stm32mp135", "st,stm32mp151", "st,stm32mp157", NULL }; DT_MACHINE_START(STM32DT, "STM32 (Device Tree Support)") .dt_compat = stm32_compat, #ifdef CONFIG_ARM_SINGLE_ARMV7M .restart = armv7m_restart, #endif MACHINE_END
linux-master
arch/arm/mach-stm32/board-dt.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Device Tree support for Airoha SoCs * * Copyright (c) 2022 Felix Fietkau <[email protected]> */ #include <asm/mach/arch.h> static const char * const airoha_board_dt_compat[] = { "airoha,en7523", NULL, }; DT_MACHINE_START(MEDIATEK_DT, "Airoha Cortex-A53 (Device Tree)") .dt_compat = airoha_board_dt_compat, MACHINE_END
linux-master
arch/arm/mach-airoha/airoha.c
// SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2018 Nuvoton Technology corporation. // Copyright 2018 Google, Inc. #include <linux/kernel.h> #include <linux/types.h> #include <asm/mach/arch.h> #include <asm/mach-types.h> #include <asm/mach/map.h> #include <asm/hardware/cache-l2x0.h> static const char *const npcm7xx_dt_match[] = { "nuvoton,npcm750", NULL }; DT_MACHINE_START(NPCM7XX_DT, "NPCM7XX Chip family") .atag_offset = 0x100, .dt_compat = npcm7xx_dt_match, .l2c_aux_val = 0x0, .l2c_aux_mask = ~0x0, MACHINE_END
linux-master
arch/arm/mach-npcm/npcm7xx.c
// SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2018 Nuvoton Technology corporation. // Copyright 2018 Google, Inc. #define pr_fmt(fmt) "nuvoton,npcm7xx-smp: " fmt #include <linux/delay.h> #include <linux/smp.h> #include <linux/io.h> #include <linux/of.h> #include <linux/of_address.h> #include <asm/cacheflush.h> #include <asm/smp.h> #include <asm/smp_plat.h> #include <asm/smp_scu.h> #define NPCM7XX_SCRPAD_REG 0x13c extern void npcm7xx_secondary_startup(void); static int npcm7xx_smp_boot_secondary(unsigned int cpu, struct task_struct *idle) { struct device_node *gcr_np; void __iomem *gcr_base; int ret = 0; gcr_np = of_find_compatible_node(NULL, NULL, "nuvoton,npcm750-gcr"); if (!gcr_np) { pr_err("no gcr device node\n"); ret = -ENODEV; goto out; } gcr_base = of_iomap(gcr_np, 0); if (!gcr_base) { pr_err("could not iomap gcr"); ret = -ENOMEM; goto out; } /* give boot ROM kernel start address. */ iowrite32(__pa_symbol(npcm7xx_secondary_startup), gcr_base + NPCM7XX_SCRPAD_REG); /* make sure the previous write is seen by all observers. */ dsb_sev(); iounmap(gcr_base); out: return ret; } static void __init npcm7xx_smp_prepare_cpus(unsigned int max_cpus) { struct device_node *scu_np; void __iomem *scu_base; scu_np = of_find_compatible_node(NULL, NULL, "arm,cortex-a9-scu"); if (!scu_np) { pr_err("no scu device node\n"); return; } scu_base = of_iomap(scu_np, 0); if (!scu_base) { pr_err("could not iomap scu"); return; } scu_enable(scu_base); iounmap(scu_base); } static struct smp_operations npcm7xx_smp_ops __initdata = { .smp_prepare_cpus = npcm7xx_smp_prepare_cpus, .smp_boot_secondary = npcm7xx_smp_boot_secondary, }; CPU_METHOD_OF_DECLARE(npcm7xx_smp, "nuvoton,npcm750-smp", &npcm7xx_smp_ops);
linux-master
arch/arm/mach-npcm/platsmp.c
// SPDX-License-Identifier: GPL-2.0 // Copyright 2021 Jonathan Neuschäfer #include <asm/mach/arch.h> static const char *const wpcm450_dt_match[] = { "nuvoton,wpcm450", NULL }; DT_MACHINE_START(WPCM450_DT, "WPCM450 chip") .dt_compat = wpcm450_dt_match, MACHINE_END
linux-master
arch/arm/mach-npcm/wpcm450.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2013 Daniel Tang <[email protected]> */ #include <asm/mach/arch.h> static const char *const nspire_dt_match[] __initconst = { "ti,nspire", "ti,nspire-cx", "ti,nspire-tp", "ti,nspire-clp", NULL, }; DT_MACHINE_START(NSPIRE, "TI-NSPIRE") .dt_compat = nspire_dt_match, MACHINE_END
linux-master
arch/arm/mach-nspire/nspire.c
// SPDX-License-Identifier: GPL-2.0 // // Copyright (c) 2010-2014 Samsung Electronics Co., Ltd. // http://www.samsung.com // // S5PV210 - Power Management support // // Based on arch/arm/mach-s3c2410/pm.c // Copyright (c) 2006 Simtec Electronics // Ben Dooks <[email protected]> #include <linux/init.h> #include <linux/suspend.h> #include <linux/syscore_ops.h> #include <linux/io.h> #include <linux/soc/samsung/s3c-pm.h> #include <asm/cacheflush.h> #include <asm/suspend.h> #include "common.h" #include "regs-clock.h" /* helper functions to save and restore register state */ struct sleep_save { void __iomem *reg; unsigned long val; }; #define SAVE_ITEM(x) \ { .reg = (x) } /** * s3c_pm_do_save() - save a set of registers for restoration on resume. * @ptr: Pointer to an array of registers. * @count: Size of the ptr array. * * Run through the list of registers given, saving their contents in the * array for later restoration when we wakeup. */ static void s3c_pm_do_save(struct sleep_save *ptr, int count) { for (; count > 0; count--, ptr++) { ptr->val = readl_relaxed(ptr->reg); S3C_PMDBG("saved %p value %08lx\n", ptr->reg, ptr->val); } } /** * s3c_pm_do_restore() - restore register values from the save list. * @ptr: Pointer to an array of registers. * @count: Size of the ptr array. * * Restore the register values saved from s3c_pm_do_save(). * * WARNING: Do not put any debug in here that may effect memory or use * peripherals, as things may be changing! */ static void s3c_pm_do_restore_core(const struct sleep_save *ptr, int count) { for (; count > 0; count--, ptr++) writel_relaxed(ptr->val, ptr->reg); } static struct sleep_save s5pv210_core_save[] = { /* Clock ETC */ SAVE_ITEM(S5P_MDNIE_SEL), }; /* * VIC wake-up support (TODO) */ static u32 s5pv210_irqwake_intmask = 0xffffffff; static u32 s5pv210_read_eint_wakeup_mask(void) { return __raw_readl(S5P_EINT_WAKEUP_MASK); } /* * Suspend helpers. */ static int s5pv210_cpu_suspend(unsigned long arg) { unsigned long tmp; /* issue the standby signal into the pm unit. Note, we * issue a write-buffer drain just in case */ tmp = 0; asm("b 1f\n\t" ".align 5\n\t" "1:\n\t" "mcr p15, 0, %0, c7, c10, 5\n\t" "mcr p15, 0, %0, c7, c10, 4\n\t" "wfi" : : "r" (tmp)); pr_info("Failed to suspend the system\n"); return 1; /* Aborting suspend */ } static void s5pv210_pm_prepare(void) { unsigned int tmp; /* * Set wake-up mask registers * S5P_EINT_WAKEUP_MASK is set by pinctrl driver in late suspend. */ __raw_writel(s5pv210_irqwake_intmask, S5P_WAKEUP_MASK); /* ensure at least INFORM0 has the resume address */ __raw_writel(__pa_symbol(s5pv210_cpu_resume), S5P_INFORM0); tmp = __raw_readl(S5P_SLEEP_CFG); tmp &= ~(S5P_SLEEP_CFG_OSC_EN | S5P_SLEEP_CFG_USBOSC_EN); __raw_writel(tmp, S5P_SLEEP_CFG); /* WFI for SLEEP mode configuration by SYSCON */ tmp = __raw_readl(S5P_PWR_CFG); tmp &= S5P_CFG_WFI_CLEAN; tmp |= S5P_CFG_WFI_SLEEP; __raw_writel(tmp, S5P_PWR_CFG); /* SYSCON interrupt handling disable */ tmp = __raw_readl(S5P_OTHERS); tmp |= S5P_OTHER_SYSC_INTOFF; __raw_writel(tmp, S5P_OTHERS); s3c_pm_do_save(s5pv210_core_save, ARRAY_SIZE(s5pv210_core_save)); } /* * Suspend operations. */ static int s5pv210_suspend_enter(suspend_state_t state) { u32 eint_wakeup_mask = s5pv210_read_eint_wakeup_mask(); int ret; S3C_PMDBG("%s: suspending the system...\n", __func__); S3C_PMDBG("%s: wakeup masks: %08x,%08x\n", __func__, s5pv210_irqwake_intmask, eint_wakeup_mask); if (s5pv210_irqwake_intmask == -1U && eint_wakeup_mask == -1U) { pr_err("%s: No wake-up sources!\n", __func__); pr_err("%s: Aborting sleep\n", __func__); return -EINVAL; } s3c_pm_save_uarts(false); s5pv210_pm_prepare(); flush_cache_all(); s3c_pm_check_store(); ret = cpu_suspend(0, s5pv210_cpu_suspend); if (ret) return ret; s3c_pm_restore_uarts(false); S3C_PMDBG("%s: wakeup stat: %08x\n", __func__, __raw_readl(S5P_WAKEUP_STAT)); s3c_pm_check_restore(); S3C_PMDBG("%s: resuming the system...\n", __func__); return 0; } static int s5pv210_suspend_prepare(void) { s3c_pm_check_prepare(); return 0; } static void s5pv210_suspend_finish(void) { s3c_pm_check_cleanup(); } static const struct platform_suspend_ops s5pv210_suspend_ops = { .enter = s5pv210_suspend_enter, .prepare = s5pv210_suspend_prepare, .finish = s5pv210_suspend_finish, .valid = suspend_valid_only_mem, }; /* * Syscore operations used to delay restore of certain registers. */ static void s5pv210_pm_resume(void) { s3c_pm_do_restore_core(s5pv210_core_save, ARRAY_SIZE(s5pv210_core_save)); } static struct syscore_ops s5pv210_pm_syscore_ops = { .resume = s5pv210_pm_resume, }; /* * Initialization entry point. */ void __init s5pv210_pm_init(void) { register_syscore_ops(&s5pv210_pm_syscore_ops); suspend_set_ops(&s5pv210_suspend_ops); }
linux-master
arch/arm/mach-s5pv210/pm.c
// SPDX-License-Identifier: GPL-2.0 // // Samsung's S5PC110/S5PV210 flattened device tree enabled machine. // // Copyright (c) 2013-2014 Samsung Electronics Co., Ltd. // Mateusz Krawczuk <[email protected]> // Tomasz Figa <[email protected]> #include <linux/of_fdt.h> #include <linux/platform_device.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/system_misc.h> #include "common.h" #include "regs-clock.h" static int __init s5pv210_fdt_map_sys(unsigned long node, const char *uname, int depth, void *data) { struct map_desc iodesc; const __be32 *reg; int len; if (!of_flat_dt_is_compatible(node, "samsung,s5pv210-clock")) return 0; reg = of_get_flat_dt_prop(node, "reg", &len); if (reg == NULL || len != (sizeof(unsigned long) * 2)) return 0; iodesc.pfn = __phys_to_pfn(be32_to_cpu(reg[0])); iodesc.length = be32_to_cpu(reg[1]) - 1; iodesc.virtual = (unsigned long)S3C_VA_SYS; iodesc.type = MT_DEVICE; iotable_init(&iodesc, 1); return 1; } static void __init s5pv210_dt_map_io(void) { debug_ll_io_init(); of_scan_flat_dt(s5pv210_fdt_map_sys, NULL); } static void s5pv210_dt_restart(enum reboot_mode mode, const char *cmd) { __raw_writel(0x1, S5P_SWRESET); } static void __init s5pv210_dt_init_late(void) { platform_device_register_simple("s5pv210-cpufreq", -1, NULL, 0); s5pv210_pm_init(); } static char const *const s5pv210_dt_compat[] __initconst = { "samsung,s5pc110", "samsung,s5pv210", NULL }; DT_MACHINE_START(S5PV210_DT, "Samsung S5PC110/S5PV210-based board") .dt_compat = s5pv210_dt_compat, .map_io = s5pv210_dt_map_io, .restart = s5pv210_dt_restart, .init_late = s5pv210_dt_init_late, MACHINE_END
linux-master
arch/arm/mach-s5pv210/s5pv210.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2011 Google, Inc. * * Author: * Colin Cross <[email protected]> * * Copyright (C) 2010,2013, NVIDIA Corporation */ #include <linux/cpu_pm.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/irqchip/arm-gic.h> #include <linux/irq.h> #include <linux/kernel.h> #include <linux/of_address.h> #include <linux/of.h> #include <linux/syscore_ops.h> #include <soc/tegra/irq.h> #include "board.h" #include "iomap.h" #define SGI_MASK 0xFFFF #ifdef CONFIG_PM_SLEEP static void __iomem *tegra_gic_cpu_base; #endif bool tegra_pending_sgi(void) { u32 pending_set; void __iomem *distbase = IO_ADDRESS(TEGRA_ARM_INT_DIST_BASE); pending_set = readl_relaxed(distbase + GIC_DIST_PENDING_SET); if (pending_set & SGI_MASK) return true; return false; } #ifdef CONFIG_PM_SLEEP static int tegra_gic_notifier(struct notifier_block *self, unsigned long cmd, void *v) { switch (cmd) { case CPU_PM_ENTER: writel_relaxed(0x1E0, tegra_gic_cpu_base + GIC_CPU_CTRL); break; } return NOTIFY_OK; } static struct notifier_block tegra_gic_notifier_block = { .notifier_call = tegra_gic_notifier, }; static const struct of_device_id tegra114_dt_gic_match[] __initconst = { { .compatible = "arm,cortex-a15-gic" }, { } }; static void __init tegra114_gic_cpu_pm_registration(void) { struct device_node *dn; dn = of_find_matching_node(NULL, tegra114_dt_gic_match); if (!dn) return; tegra_gic_cpu_base = of_iomap(dn, 1); cpu_pm_register_notifier(&tegra_gic_notifier_block); } #else static void __init tegra114_gic_cpu_pm_registration(void) { } #endif static const struct of_device_id tegra_ictlr_match[] __initconst = { { .compatible = "nvidia,tegra20-ictlr" }, { .compatible = "nvidia,tegra30-ictlr" }, { } }; void __init tegra_init_irq(void) { if (WARN_ON(!of_find_matching_node(NULL, tegra_ictlr_match))) pr_warn("Outdated DT detected, suspend/resume will NOT work\n"); tegra114_gic_cpu_pm_registration(); }
linux-master
arch/arm/mach-tegra/irq.c
// SPDX-License-Identifier: GPL-2.0-only /* * arch/arm/mach-tegra/io.c * * Copyright (C) 2010 Google, Inc. * * Author: * Colin Cross <[email protected]> * Erik Gilling <[email protected]> */ #include <linux/init.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/module.h> #include <asm/mach/map.h> #include <asm/page.h> #include "board.h" #include "iomap.h" static struct map_desc tegra_io_desc[] __initdata = { { .virtual = (unsigned long)IO_PPSB_VIRT, .pfn = __phys_to_pfn(IO_PPSB_PHYS), .length = IO_PPSB_SIZE, .type = MT_DEVICE, }, { .virtual = (unsigned long)IO_APB_VIRT, .pfn = __phys_to_pfn(IO_APB_PHYS), .length = IO_APB_SIZE, .type = MT_DEVICE, }, { .virtual = (unsigned long)IO_CPU_VIRT, .pfn = __phys_to_pfn(IO_CPU_PHYS), .length = IO_CPU_SIZE, .type = MT_DEVICE, }, { .virtual = (unsigned long)IO_IRAM_VIRT, .pfn = __phys_to_pfn(IO_IRAM_PHYS), .length = IO_IRAM_SIZE, .type = MT_DEVICE, }, }; void __init tegra_map_common_io(void) { debug_ll_io_init(); iotable_init(tegra_io_desc, ARRAY_SIZE(tegra_io_desc)); }
linux-master
arch/arm/mach-tegra/io.c
// SPDX-License-Identifier: GPL-2.0-only /* * NVIDIA Tegra SoC device tree board support * * Copyright (C) 2011, 2013, NVIDIA Corporation * Copyright (C) 2010 Secret Lab Technologies, Ltd. * Copyright (C) 2010 Google, Inc. */ #include <linux/clk.h> #include <linux/clk/tegra.h> #include <linux/dma-mapping.h> #include <linux/init.h> #include <linux/io.h> #include <linux/irqchip.h> #include <linux/irqdomain.h> #include <linux/kernel.h> #include <linux/of_address.h> #include <linux/of_fdt.h> #include <linux/of.h> #include <linux/of_platform.h> #include <linux/platform_device.h> #include <linux/serial_8250.h> #include <linux/slab.h> #include <linux/sys_soc.h> #include <linux/usb/tegra_usb_phy.h> #include <linux/firmware/trusted_foundations.h> #include <soc/tegra/fuse.h> #include <soc/tegra/pmc.h> #include <asm/firmware.h> #include <asm/hardware/cache-l2x0.h> #include <asm/mach/arch.h> #include <asm/mach/time.h> #include <asm/mach-types.h> #include <asm/psci.h> #include <asm/setup.h> #include "board.h" #include "common.h" #include "iomap.h" #include "pm.h" #include "reset.h" #include "sleep.h" /* * Storage for debug-macro.S's state. * * This must be in .data not .bss so that it gets initialized each time the * kernel is loaded. The data is declared here rather than debug-macro.S so * that multiple inclusions of debug-macro.S point at the same data. */ u32 tegra_uart_config[3] = { /* Debug UART initialization required */ 1, /* Debug UART physical address */ 0, /* Debug UART virtual address */ 0, }; static void __init tegra_init_early(void) { of_register_trusted_foundations(); tegra_cpu_reset_handler_init(); call_firmware_op(l2x0_init); } static void __init tegra_dt_init_irq(void) { tegra_init_irq(); irqchip_init(); } static void __init tegra_dt_init(void) { struct device *parent = tegra_soc_device_register(); of_platform_default_populate(NULL, NULL, parent); } static void __init tegra_dt_init_late(void) { if (IS_ENABLED(CONFIG_ARCH_TEGRA_2x_SOC) && of_machine_is_compatible("compal,paz00")) tegra_paz00_wifikill_init(); if (IS_ENABLED(CONFIG_ARCH_TEGRA_2x_SOC) && of_machine_is_compatible("nvidia,tegra20")) platform_device_register_simple("tegra20-cpufreq", -1, NULL, 0); if (IS_ENABLED(CONFIG_ARM_TEGRA_CPUIDLE) && !psci_smp_available()) platform_device_register_simple("tegra-cpuidle", -1, NULL, 0); if (IS_ENABLED(CONFIG_ARCH_TEGRA_3x_SOC) && of_machine_is_compatible("nvidia,tegra30")) platform_device_register_simple("tegra20-cpufreq", -1, NULL, 0); } static const char * const tegra_dt_board_compat[] = { "nvidia,tegra124", "nvidia,tegra114", "nvidia,tegra30", "nvidia,tegra20", NULL }; DT_MACHINE_START(TEGRA_DT, "NVIDIA Tegra SoC (Flattened Device Tree)") .l2c_aux_val = 0x3c400000, .l2c_aux_mask = 0xc20fc3ff, .smp = smp_ops(tegra_smp_ops), .map_io = tegra_map_common_io, .init_early = tegra_init_early, .init_irq = tegra_dt_init_irq, .init_machine = tegra_dt_init, .init_late = tegra_dt_init_late, .dt_compat = tegra_dt_board_compat, MACHINE_END
linux-master
arch/arm/mach-tegra/tegra.c
// SPDX-License-Identifier: GPL-2.0-only /* * arch/arm/mach-tegra/board-paz00.c * * Copyright (C) 2011 Marc Dietrich <[email protected]> * * Based on board-harmony.c * Copyright (C) 2010 Google, Inc. */ #include <linux/property.h> #include <linux/gpio/machine.h> #include <linux/platform_device.h> #include "board.h" static struct property_entry wifi_rfkill_prop[] __initdata = { PROPERTY_ENTRY_STRING("name", "wifi_rfkill"), PROPERTY_ENTRY_STRING("type", "wlan"), { }, }; static struct platform_device wifi_rfkill_device = { .name = "rfkill_gpio", .id = -1, }; static struct gpiod_lookup_table wifi_gpio_lookup = { .dev_id = "rfkill_gpio", .table = { GPIO_LOOKUP("tegra-gpio", 25, "reset", 0), GPIO_LOOKUP("tegra-gpio", 85, "shutdown", 0), { }, }, }; void __init tegra_paz00_wifikill_init(void) { device_create_managed_software_node(&wifi_rfkill_device.dev, wifi_rfkill_prop, NULL); gpiod_add_lookup_table(&wifi_gpio_lookup); platform_device_register(&wifi_rfkill_device); }
linux-master
arch/arm/mach-tegra/board-paz00.c
// SPDX-License-Identifier: GPL-2.0-only /* * arch/arm/mach-tegra/reset.c * * Copyright (C) 2011,2012 NVIDIA Corporation. */ #include <linux/bitops.h> #include <linux/cpumask.h> #include <linux/init.h> #include <linux/io.h> #include <linux/firmware/trusted_foundations.h> #include <soc/tegra/fuse.h> #include <asm/cacheflush.h> #include <asm/firmware.h> #include <asm/hardware/cache-l2x0.h> #include "iomap.h" #include "irammap.h" #include "reset.h" #include "sleep.h" #define TEGRA_IRAM_RESET_BASE (TEGRA_IRAM_BASE + \ TEGRA_IRAM_RESET_HANDLER_OFFSET) static bool is_enabled; static void __init tegra_cpu_reset_handler_set(const u32 reset_address) { void __iomem *evp_cpu_reset = IO_ADDRESS(TEGRA_EXCEPTION_VECTORS_BASE + 0x100); void __iomem *sb_ctrl = IO_ADDRESS(TEGRA_SB_BASE); u32 reg; /* * NOTE: This must be the one and only write to the EVP CPU reset * vector in the entire system. */ writel(reset_address, evp_cpu_reset); wmb(); reg = readl(evp_cpu_reset); /* * Prevent further modifications to the physical reset vector. * NOTE: Has no effect on chips prior to Tegra30. */ reg = readl(sb_ctrl); reg |= 2; writel(reg, sb_ctrl); wmb(); } static void __init tegra_cpu_reset_handler_enable(void) { void __iomem *iram_base = IO_ADDRESS(TEGRA_IRAM_RESET_BASE); const u32 reset_address = TEGRA_IRAM_RESET_BASE + tegra_cpu_reset_handler_offset; int err; BUG_ON(is_enabled); BUG_ON(tegra_cpu_reset_handler_size > TEGRA_IRAM_RESET_HANDLER_SIZE); memcpy(iram_base, (void *)__tegra_cpu_reset_handler_start, tegra_cpu_reset_handler_size); err = call_firmware_op(set_cpu_boot_addr, 0, reset_address); switch (err) { case -ENOSYS: tegra_cpu_reset_handler_set(reset_address); fallthrough; case 0: is_enabled = true; break; default: pr_crit("Cannot set CPU reset handler: %d\n", err); BUG(); } } void __init tegra_cpu_reset_handler_init(void) { __tegra_cpu_reset_handler_data[TEGRA_RESET_TF_PRESENT] = trusted_foundations_registered(); #ifdef CONFIG_SMP __tegra_cpu_reset_handler_data[TEGRA_RESET_MASK_PRESENT] = *((u32 *)cpu_possible_mask); __tegra_cpu_reset_handler_data[TEGRA_RESET_STARTUP_SECONDARY] = __pa_symbol((void *)secondary_startup); #endif #ifdef CONFIG_PM_SLEEP __tegra_cpu_reset_handler_data[TEGRA_RESET_STARTUP_LP1] = TEGRA_IRAM_LPx_RESUME_AREA; __tegra_cpu_reset_handler_data[TEGRA_RESET_STARTUP_LP2] = __pa_symbol((void *)tegra_resume); #endif tegra_cpu_reset_handler_enable(); }
linux-master
arch/arm/mach-tegra/reset.c
// SPDX-License-Identifier: GPL-2.0-only /* * CPU complex suspend & resume functions for Tegra SoCs * * Copyright (c) 2009-2012, NVIDIA Corporation. All rights reserved. */ #include <linux/clk/tegra.h> #include <linux/cpumask.h> #include <linux/cpu_pm.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/suspend.h> #include <linux/firmware/trusted_foundations.h> #include <soc/tegra/flowctrl.h> #include <soc/tegra/fuse.h> #include <soc/tegra/pm.h> #include <soc/tegra/pmc.h> #include <asm/cacheflush.h> #include <asm/firmware.h> #include <asm/idmap.h> #include <asm/proc-fns.h> #include <asm/smp_plat.h> #include <asm/suspend.h> #include <asm/tlbflush.h> #include "iomap.h" #include "pm.h" #include "reset.h" #include "sleep.h" #ifdef CONFIG_PM_SLEEP static DEFINE_SPINLOCK(tegra_lp2_lock); static u32 iram_save_size; static void *iram_save_addr; struct tegra_lp1_iram tegra_lp1_iram; void (*tegra_tear_down_cpu)(void); void (*tegra_sleep_core_finish)(unsigned long v2p); static int (*tegra_sleep_func)(unsigned long v2p); static void tegra_tear_down_cpu_init(void) { switch (tegra_get_chip_id()) { case TEGRA20: if (IS_ENABLED(CONFIG_ARCH_TEGRA_2x_SOC)) tegra_tear_down_cpu = tegra20_tear_down_cpu; break; case TEGRA30: case TEGRA114: case TEGRA124: if (IS_ENABLED(CONFIG_ARCH_TEGRA_3x_SOC) || IS_ENABLED(CONFIG_ARCH_TEGRA_114_SOC) || IS_ENABLED(CONFIG_ARCH_TEGRA_124_SOC)) tegra_tear_down_cpu = tegra30_tear_down_cpu; break; } } /* * restore_cpu_complex * * restores cpu clock setting, clears flow controller * * Always called on CPU 0. */ static void restore_cpu_complex(void) { int cpu = smp_processor_id(); BUG_ON(cpu != 0); #ifdef CONFIG_SMP cpu = cpu_logical_map(cpu); #endif /* Restore the CPU clock settings */ tegra_cpu_clock_resume(); flowctrl_cpu_suspend_exit(cpu); } /* * suspend_cpu_complex * * saves pll state for use by restart_plls, prepares flow controller for * transition to suspend state * * Must always be called on cpu 0. */ static void suspend_cpu_complex(void) { int cpu = smp_processor_id(); BUG_ON(cpu != 0); #ifdef CONFIG_SMP cpu = cpu_logical_map(cpu); #endif /* Save the CPU clock settings */ tegra_cpu_clock_suspend(); flowctrl_cpu_suspend_enter(cpu); } void tegra_pm_clear_cpu_in_lp2(void) { int phy_cpu_id = cpu_logical_map(smp_processor_id()); u32 *cpu_in_lp2 = tegra_cpu_lp2_mask; spin_lock(&tegra_lp2_lock); BUG_ON(!(*cpu_in_lp2 & BIT(phy_cpu_id))); *cpu_in_lp2 &= ~BIT(phy_cpu_id); spin_unlock(&tegra_lp2_lock); } void tegra_pm_set_cpu_in_lp2(void) { int phy_cpu_id = cpu_logical_map(smp_processor_id()); u32 *cpu_in_lp2 = tegra_cpu_lp2_mask; spin_lock(&tegra_lp2_lock); BUG_ON((*cpu_in_lp2 & BIT(phy_cpu_id))); *cpu_in_lp2 |= BIT(phy_cpu_id); spin_unlock(&tegra_lp2_lock); } static int tegra_sleep_cpu(unsigned long v2p) { if (tegra_cpu_car_ops->rail_off_ready && WARN_ON(!tegra_cpu_rail_off_ready())) return -EBUSY; /* * L2 cache disabling using kernel API only allowed when all * secondary CPU's are offline. Cache have to be disabled with * MMU-on if cache maintenance is done via Trusted Foundations * firmware. Note that CPUIDLE won't ever enter powergate on Tegra30 * if any of secondary CPU's is online and this is the LP2-idle * code-path only for Tegra20/30. */ #ifdef CONFIG_OUTER_CACHE if (trusted_foundations_registered() && outer_cache.disable) outer_cache.disable(); #endif /* * Note that besides of setting up CPU reset vector this firmware * call may also do the following, depending on the FW version: * 1) Disable L2. But this doesn't matter since we already * disabled the L2. * 2) Disable D-cache. This need to be taken into account in * particular by the tegra_disable_clean_inv_dcache() which * shall avoid the re-disable. */ call_firmware_op(prepare_idle, TF_PM_MODE_LP2); setup_mm_for_reboot(); tegra_sleep_cpu_finish(v2p); /* should never here */ BUG(); return 0; } static void tegra_pm_set(enum tegra_suspend_mode mode) { u32 value; switch (tegra_get_chip_id()) { case TEGRA20: case TEGRA30: break; default: /* Turn off CRAIL */ value = flowctrl_read_cpu_csr(0); value &= ~FLOW_CTRL_CSR_ENABLE_EXT_MASK; value |= FLOW_CTRL_CSR_ENABLE_EXT_CRAIL; flowctrl_write_cpu_csr(0, value); break; } tegra_pmc_enter_suspend_mode(mode); } int tegra_pm_enter_lp2(void) { int err; tegra_pm_set(TEGRA_SUSPEND_LP2); cpu_cluster_pm_enter(); suspend_cpu_complex(); err = cpu_suspend(PHYS_OFFSET - PAGE_OFFSET, &tegra_sleep_cpu); /* * Resume L2 cache if it wasn't re-enabled early during resume, * which is the case for Tegra30 that has to re-enable the cache * via firmware call. In other cases cache is already enabled and * hence re-enabling is a no-op. This is always a no-op on Tegra114+. */ outer_resume(); restore_cpu_complex(); cpu_cluster_pm_exit(); call_firmware_op(prepare_idle, TF_PM_MODE_NONE); return err; } enum tegra_suspend_mode tegra_pm_validate_suspend_mode( enum tegra_suspend_mode mode) { /* * The Tegra devices support suspending to LP1 or lower currently. */ if (mode > TEGRA_SUSPEND_LP1) return TEGRA_SUSPEND_LP1; return mode; } static int tegra_sleep_core(unsigned long v2p) { /* * Cache have to be disabled with MMU-on if cache maintenance is done * via Trusted Foundations firmware. This is a no-op on Tegra114+. */ if (trusted_foundations_registered()) outer_disable(); call_firmware_op(prepare_idle, TF_PM_MODE_LP1); setup_mm_for_reboot(); tegra_sleep_core_finish(v2p); /* should never here */ BUG(); return 0; } /* * tegra_lp1_iram_hook * * Hooking the address of LP1 reset vector and SDRAM self-refresh code in * SDRAM. These codes not be copied to IRAM in this fuction. We need to * copy these code to IRAM before LP0/LP1 suspend and restore the content * of IRAM after resume. */ static bool tegra_lp1_iram_hook(void) { switch (tegra_get_chip_id()) { case TEGRA20: if (IS_ENABLED(CONFIG_ARCH_TEGRA_2x_SOC)) tegra20_lp1_iram_hook(); break; case TEGRA30: case TEGRA114: case TEGRA124: if (IS_ENABLED(CONFIG_ARCH_TEGRA_3x_SOC) || IS_ENABLED(CONFIG_ARCH_TEGRA_114_SOC) || IS_ENABLED(CONFIG_ARCH_TEGRA_124_SOC)) tegra30_lp1_iram_hook(); break; default: break; } if (!tegra_lp1_iram.start_addr || !tegra_lp1_iram.end_addr) return false; iram_save_size = tegra_lp1_iram.end_addr - tegra_lp1_iram.start_addr; iram_save_addr = kmalloc(iram_save_size, GFP_KERNEL); if (!iram_save_addr) return false; return true; } static bool tegra_sleep_core_init(void) { switch (tegra_get_chip_id()) { case TEGRA20: if (IS_ENABLED(CONFIG_ARCH_TEGRA_2x_SOC)) tegra20_sleep_core_init(); break; case TEGRA30: case TEGRA114: case TEGRA124: if (IS_ENABLED(CONFIG_ARCH_TEGRA_3x_SOC) || IS_ENABLED(CONFIG_ARCH_TEGRA_114_SOC) || IS_ENABLED(CONFIG_ARCH_TEGRA_124_SOC)) tegra30_sleep_core_init(); break; default: break; } if (!tegra_sleep_core_finish) return false; return true; } static void tegra_suspend_enter_lp1(void) { /* copy the reset vector & SDRAM shutdown code into IRAM */ memcpy(iram_save_addr, IO_ADDRESS(TEGRA_IRAM_LPx_RESUME_AREA), iram_save_size); memcpy(IO_ADDRESS(TEGRA_IRAM_LPx_RESUME_AREA), tegra_lp1_iram.start_addr, iram_save_size); *((u32 *)tegra_cpu_lp1_mask) = 1; } static void tegra_suspend_exit_lp1(void) { /* restore IRAM */ memcpy(IO_ADDRESS(TEGRA_IRAM_LPx_RESUME_AREA), iram_save_addr, iram_save_size); *(u32 *)tegra_cpu_lp1_mask = 0; } static const char *lp_state[TEGRA_MAX_SUSPEND_MODE] = { [TEGRA_SUSPEND_NONE] = "none", [TEGRA_SUSPEND_LP2] = "LP2", [TEGRA_SUSPEND_LP1] = "LP1", [TEGRA_SUSPEND_LP0] = "LP0", }; static int tegra_suspend_enter(suspend_state_t state) { enum tegra_suspend_mode mode = tegra_pmc_get_suspend_mode(); if (WARN_ON(mode < TEGRA_SUSPEND_NONE || mode >= TEGRA_MAX_SUSPEND_MODE)) return -EINVAL; pr_info("Entering suspend state %s\n", lp_state[mode]); tegra_pm_set(mode); local_fiq_disable(); suspend_cpu_complex(); switch (mode) { case TEGRA_SUSPEND_LP1: tegra_suspend_enter_lp1(); break; case TEGRA_SUSPEND_LP2: tegra_pm_set_cpu_in_lp2(); break; default: break; } cpu_suspend(PHYS_OFFSET - PAGE_OFFSET, tegra_sleep_func); /* * Resume L2 cache if it wasn't re-enabled early during resume, * which is the case for Tegra30 that has to re-enable the cache * via firmware call. In other cases cache is already enabled and * hence re-enabling is a no-op. */ outer_resume(); switch (mode) { case TEGRA_SUSPEND_LP1: tegra_suspend_exit_lp1(); break; case TEGRA_SUSPEND_LP2: tegra_pm_clear_cpu_in_lp2(); break; default: break; } restore_cpu_complex(); local_fiq_enable(); call_firmware_op(prepare_idle, TF_PM_MODE_NONE); return 0; } static const struct platform_suspend_ops tegra_suspend_ops = { .valid = suspend_valid_only_mem, .enter = tegra_suspend_enter, }; void tegra_pm_init_suspend(void) { enum tegra_suspend_mode mode = tegra_pmc_get_suspend_mode(); if (mode == TEGRA_SUSPEND_NONE) return; tegra_tear_down_cpu_init(); if (mode >= TEGRA_SUSPEND_LP1) { if (!tegra_lp1_iram_hook() || !tegra_sleep_core_init()) { pr_err("%s: unable to allocate memory for SDRAM" "self-refresh -- LP0/LP1 unavailable\n", __func__); tegra_pmc_set_suspend_mode(TEGRA_SUSPEND_LP2); mode = TEGRA_SUSPEND_LP2; } } /* set up sleep function for cpu_suspend */ switch (mode) { case TEGRA_SUSPEND_LP1: tegra_sleep_func = tegra_sleep_core; break; case TEGRA_SUSPEND_LP2: tegra_sleep_func = tegra_sleep_cpu; break; default: break; } suspend_set_ops(&tegra_suspend_ops); } int tegra_pm_park_secondary_cpu(unsigned long cpu) { if (cpu > 0) { tegra_disable_clean_inv_dcache(TEGRA_FLUSH_CACHE_LOUIS); if (tegra_get_chip_id() == TEGRA20) tegra20_hotplug_shutdown(); else tegra30_hotplug_shutdown(); } return -EINVAL; } #endif
linux-master
arch/arm/mach-tegra/pm.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2013, NVIDIA Corporation. All rights reserved. */ #include <linux/kernel.h> #include "pm.h" #ifdef CONFIG_PM_SLEEP extern u32 tegra30_iram_start, tegra30_iram_end; extern void tegra30_sleep_core_finish(unsigned long); void tegra30_lp1_iram_hook(void) { tegra_lp1_iram.start_addr = &tegra30_iram_start; tegra_lp1_iram.end_addr = &tegra30_iram_end; } void tegra30_sleep_core_init(void) { tegra_sleep_core_finish = tegra30_sleep_core_finish; } #endif
linux-master
arch/arm/mach-tegra/pm-tegra30.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/arch/arm/mach-tegra/platsmp.c * * Copyright (C) 2002 ARM Ltd. * All Rights Reserved * * Copyright (C) 2009 Palm * All Rights Reserved */ #include <linux/clk/tegra.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/io.h> #include <linux/jiffies.h> #include <linux/smp.h> #include <soc/tegra/flowctrl.h> #include <soc/tegra/fuse.h> #include <soc/tegra/pmc.h> #include <asm/cacheflush.h> #include <asm/mach-types.h> #include <asm/smp_plat.h> #include <asm/smp_scu.h> #include "common.h" #include "iomap.h" #include "reset.h" static cpumask_t tegra_cpu_init_mask; static void tegra_secondary_init(unsigned int cpu) { cpumask_set_cpu(cpu, &tegra_cpu_init_mask); } static int tegra20_boot_secondary(unsigned int cpu, struct task_struct *idle) { cpu = cpu_logical_map(cpu); /* * Force the CPU into reset. The CPU must remain in reset when * the flow controller state is cleared (which will cause the * flow controller to stop driving reset if the CPU has been * power-gated via the flow controller). This will have no * effect on first boot of the CPU since it should already be * in reset. */ tegra_put_cpu_in_reset(cpu); /* * Unhalt the CPU. If the flow controller was used to * power-gate the CPU this will cause the flow controller to * stop driving reset. The CPU will remain in reset because the * clock and reset block is now driving reset. */ flowctrl_write_cpu_halt(cpu, 0); tegra_enable_cpu_clock(cpu); flowctrl_write_cpu_csr(cpu, 0); /* Clear flow controller CSR. */ tegra_cpu_out_of_reset(cpu); return 0; } static int tegra30_boot_secondary(unsigned int cpu, struct task_struct *idle) { int ret; unsigned long timeout; cpu = cpu_logical_map(cpu); tegra_put_cpu_in_reset(cpu); flowctrl_write_cpu_halt(cpu, 0); /* * The power up sequence of cold boot CPU and warm boot CPU * was different. * * For warm boot CPU that was resumed from CPU hotplug, the * power will be resumed automatically after un-halting the * flow controller of the warm boot CPU. We need to wait for * the confirmation that the CPU is powered then removing * the IO clamps. * For cold boot CPU, do not wait. After the cold boot CPU be * booted, it will run to tegra_secondary_init() and set * tegra_cpu_init_mask which influences what tegra30_boot_secondary() * next time around. */ if (cpumask_test_cpu(cpu, &tegra_cpu_init_mask)) { timeout = jiffies + msecs_to_jiffies(50); do { if (tegra_pmc_cpu_is_powered(cpu)) goto remove_clamps; udelay(10); } while (time_before(jiffies, timeout)); } /* * The power status of the cold boot CPU is power gated as * default. To power up the cold boot CPU, the power should * be un-gated by un-toggling the power gate register * manually. */ ret = tegra_pmc_cpu_power_on(cpu); if (ret) return ret; remove_clamps: /* CPU partition is powered. Enable the CPU clock. */ tegra_enable_cpu_clock(cpu); udelay(10); /* Remove I/O clamps. */ ret = tegra_pmc_cpu_remove_clamping(cpu); if (ret) return ret; udelay(10); flowctrl_write_cpu_csr(cpu, 0); /* Clear flow controller CSR. */ tegra_cpu_out_of_reset(cpu); return 0; } static int tegra114_boot_secondary(unsigned int cpu, struct task_struct *idle) { int ret = 0; cpu = cpu_logical_map(cpu); if (cpumask_test_cpu(cpu, &tegra_cpu_init_mask)) { /* * Warm boot flow * The flow controller in charge of the power state and * control for each CPU. */ /* set SCLK as event trigger for flow controller */ flowctrl_write_cpu_csr(cpu, 1); flowctrl_write_cpu_halt(cpu, FLOW_CTRL_WAITEVENT | FLOW_CTRL_SCLK_RESUME); } else { /* * Cold boot flow * The CPU is powered up by toggling PMC directly. It will * also initial power state in flow controller. After that, * the CPU's power state is maintained by flow controller. */ ret = tegra_pmc_cpu_power_on(cpu); } return ret; } static int tegra_boot_secondary(unsigned int cpu, struct task_struct *idle) { if (IS_ENABLED(CONFIG_ARCH_TEGRA_2x_SOC) && tegra_get_chip_id() == TEGRA20) return tegra20_boot_secondary(cpu, idle); if (IS_ENABLED(CONFIG_ARCH_TEGRA_3x_SOC) && tegra_get_chip_id() == TEGRA30) return tegra30_boot_secondary(cpu, idle); if (IS_ENABLED(CONFIG_ARCH_TEGRA_114_SOC) && tegra_get_chip_id() == TEGRA114) return tegra114_boot_secondary(cpu, idle); if (IS_ENABLED(CONFIG_ARCH_TEGRA_124_SOC) && tegra_get_chip_id() == TEGRA124) return tegra114_boot_secondary(cpu, idle); return -EINVAL; } static void __init tegra_smp_prepare_cpus(unsigned int max_cpus) { /* Always mark the boot CPU (CPU0) as initialized. */ cpumask_set_cpu(0, &tegra_cpu_init_mask); if (scu_a9_has_base()) scu_enable(IO_ADDRESS(scu_a9_get_base())); } const struct smp_operations tegra_smp_ops __initconst = { .smp_prepare_cpus = tegra_smp_prepare_cpus, .smp_secondary_init = tegra_secondary_init, .smp_boot_secondary = tegra_boot_secondary, #ifdef CONFIG_HOTPLUG_CPU .cpu_kill = tegra_cpu_kill, .cpu_die = tegra_cpu_die, #endif };
linux-master
arch/arm/mach-tegra/platsmp.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2002 ARM Ltd. * All Rights Reserved * Copyright (c) 2010, 2012-2013, NVIDIA Corporation. All rights reserved. */ #include <linux/clk/tegra.h> #include <linux/kernel.h> #include <linux/smp.h> #include <soc/tegra/common.h> #include <soc/tegra/fuse.h> #include <asm/smp_plat.h> #include "common.h" #include "sleep.h" static void (*tegra_hotplug_shutdown)(void); int tegra_cpu_kill(unsigned cpu) { cpu = cpu_logical_map(cpu); /* Clock gate the CPU */ tegra_wait_cpu_in_reset(cpu); tegra_disable_cpu_clock(cpu); return 1; } /* * platform-specific code to shutdown a CPU * * Called with IRQs disabled */ void tegra_cpu_die(unsigned int cpu) { if (!tegra_hotplug_shutdown) { WARN(1, "hotplug is not yet initialized\n"); return; } /* Clean L1 data cache */ tegra_disable_clean_inv_dcache(TEGRA_FLUSH_CACHE_LOUIS); /* Shut down the current CPU. */ tegra_hotplug_shutdown(); /* Should never return here. */ BUG(); } static int __init tegra_hotplug_init(void) { if (!IS_ENABLED(CONFIG_HOTPLUG_CPU)) return 0; if (!soc_is_tegra()) return 0; if (IS_ENABLED(CONFIG_ARCH_TEGRA_2x_SOC) && tegra_get_chip_id() == TEGRA20) tegra_hotplug_shutdown = tegra20_hotplug_shutdown; if (IS_ENABLED(CONFIG_ARCH_TEGRA_3x_SOC) && tegra_get_chip_id() == TEGRA30) tegra_hotplug_shutdown = tegra30_hotplug_shutdown; if (IS_ENABLED(CONFIG_ARCH_TEGRA_114_SOC) && tegra_get_chip_id() == TEGRA114) tegra_hotplug_shutdown = tegra30_hotplug_shutdown; if (IS_ENABLED(CONFIG_ARCH_TEGRA_124_SOC) && tegra_get_chip_id() == TEGRA124) tegra_hotplug_shutdown = tegra30_hotplug_shutdown; return 0; } pure_initcall(tegra_hotplug_init);
linux-master
arch/arm/mach-tegra/hotplug.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2013, NVIDIA Corporation. All rights reserved. */ #include <linux/kernel.h> #include "pm.h" #ifdef CONFIG_PM_SLEEP extern u32 tegra20_iram_start, tegra20_iram_end; extern void tegra20_sleep_core_finish(unsigned long); void tegra20_lp1_iram_hook(void) { tegra_lp1_iram.start_addr = &tegra20_iram_start; tegra_lp1_iram.end_addr = &tegra20_iram_end; } void tegra20_sleep_core_init(void) { tegra_sleep_core_finish = tegra20_sleep_core_finish; } #endif
linux-master
arch/arm/mach-tegra/pm-tegra20.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2012 Samsung Electronics. * Kyungmin Park <[email protected]> * Tomasz Figa <[email protected]> */ #include <linux/kernel.h> #include <linux/suspend.h> #include <asm/firmware.h> static const struct firmware_ops default_firmware_ops; const struct firmware_ops *firmware_ops = &default_firmware_ops;
linux-master
arch/arm/common/firmware.c
// SPDX-License-Identifier: GPL-2.0-only /* * arch/arm/common/bL_switcher_dummy_if.c -- b.L switcher dummy interface * * Created by: Nicolas Pitre, November 2012 * Copyright: (C) 2012-2013 Linaro Limited * * Dummy interface to user space for debugging purpose only. */ #include <linux/init.h> #include <linux/module.h> #include <linux/fs.h> #include <linux/miscdevice.h> #include <linux/uaccess.h> #include <asm/bL_switcher.h> static ssize_t bL_switcher_write(struct file *file, const char __user *buf, size_t len, loff_t *pos) { unsigned char val[3]; unsigned int cpu, cluster; int ret; pr_debug("%s\n", __func__); if (len < 3) return -EINVAL; if (copy_from_user(val, buf, 3)) return -EFAULT; /* format: <cpu#>,<cluster#> */ if (val[0] < '0' || val[0] > '9' || val[1] != ',' || val[2] < '0' || val[2] > '1') return -EINVAL; cpu = val[0] - '0'; cluster = val[2] - '0'; ret = bL_switch_request(cpu, cluster); return ret ? : len; } static const struct file_operations bL_switcher_fops = { .write = bL_switcher_write, .owner = THIS_MODULE, }; static struct miscdevice bL_switcher_device = { MISC_DYNAMIC_MINOR, "b.L_switcher", &bL_switcher_fops }; module_misc_device(bL_switcher_device); MODULE_AUTHOR("Nicolas Pitre <[email protected]>"); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("big.LITTLE switcher dummy user interface");
linux-master
arch/arm/common/bL_switcher_dummy_if.c
// SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2018, The Linux Foundation. All rights reserved. #include <linux/spinlock.h> #include <linux/export.h> #include <asm/barrier.h> #include <asm/krait-l2-accessors.h> static DEFINE_RAW_SPINLOCK(krait_l2_lock); void krait_set_l2_indirect_reg(u32 addr, u32 val) { unsigned long flags; raw_spin_lock_irqsave(&krait_l2_lock, flags); /* * Select the L2 window by poking l2cpselr, then write to the window * via l2cpdr. */ asm volatile ("mcr p15, 3, %0, c15, c0, 6 @ l2cpselr" : : "r" (addr)); isb(); asm volatile ("mcr p15, 3, %0, c15, c0, 7 @ l2cpdr" : : "r" (val)); isb(); raw_spin_unlock_irqrestore(&krait_l2_lock, flags); } EXPORT_SYMBOL(krait_set_l2_indirect_reg); u32 krait_get_l2_indirect_reg(u32 addr) { u32 val; unsigned long flags; raw_spin_lock_irqsave(&krait_l2_lock, flags); /* * Select the L2 window by poking l2cpselr, then read from the window * via l2cpdr. */ asm volatile ("mcr p15, 3, %0, c15, c0, 6 @ l2cpselr" : : "r" (addr)); isb(); asm volatile ("mrc p15, 3, %0, c15, c0, 7 @ l2cpdr" : "=r" (val)); raw_spin_unlock_irqrestore(&krait_l2_lock, flags); return val; } EXPORT_SYMBOL(krait_get_l2_indirect_reg);
linux-master
arch/arm/common/krait-l2-accessors.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/arch/arm/common/locomo.c * * Sharp LoCoMo support * * This file contains all generic LoCoMo support. * * All initialization functions provided here are intended to be called * from machine specific code with proper arguments when required. * * Based on sa1111.c */ #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/ioport.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/io.h> #include <asm/irq.h> #include <asm/mach/irq.h> #include <asm/hardware/locomo.h> /* LoCoMo Interrupts */ #define IRQ_LOCOMO_KEY (0) #define IRQ_LOCOMO_GPIO (1) #define IRQ_LOCOMO_LT (2) #define IRQ_LOCOMO_SPI (3) /* M62332 output channel selection */ #define M62332_EVR_CH 1 /* M62332 volume channel number */ /* 0 : CH.1 , 1 : CH. 2 */ /* DAC send data */ #define M62332_SLAVE_ADDR 0x4e /* Slave address */ #define M62332_W_BIT 0x00 /* W bit (0 only) */ #define M62332_SUB_ADDR 0x00 /* Sub address */ #define M62332_A_BIT 0x00 /* A bit (0 only) */ /* DAC setup and hold times (expressed in us) */ #define DAC_BUS_FREE_TIME 5 /* 4.7 us */ #define DAC_START_SETUP_TIME 5 /* 4.7 us */ #define DAC_STOP_SETUP_TIME 4 /* 4.0 us */ #define DAC_START_HOLD_TIME 5 /* 4.7 us */ #define DAC_SCL_LOW_HOLD_TIME 5 /* 4.7 us */ #define DAC_SCL_HIGH_HOLD_TIME 4 /* 4.0 us */ #define DAC_DATA_SETUP_TIME 1 /* 250 ns */ #define DAC_DATA_HOLD_TIME 1 /* 300 ns */ #define DAC_LOW_SETUP_TIME 1 /* 300 ns */ #define DAC_HIGH_SETUP_TIME 1 /* 1000 ns */ /* the following is the overall data for the locomo chip */ struct locomo { struct device *dev; unsigned long phys; unsigned int irq; int irq_base; spinlock_t lock; void __iomem *base; #ifdef CONFIG_PM void *saved_state; #endif }; struct locomo_dev_info { unsigned long offset; unsigned long length; unsigned int devid; unsigned int irq[1]; const char * name; }; /* All the locomo devices. If offset is non-zero, the mapbase for the * locomo_dev will be set to the chip base plus offset. If offset is * zero, then the mapbase for the locomo_dev will be set to zero. An * offset of zero means the device only uses GPIOs or other helper * functions inside this file */ static struct locomo_dev_info locomo_devices[] = { { .devid = LOCOMO_DEVID_KEYBOARD, .irq = { IRQ_LOCOMO_KEY }, .name = "locomo-keyboard", .offset = LOCOMO_KEYBOARD, .length = 16, }, { .devid = LOCOMO_DEVID_FRONTLIGHT, .irq = {}, .name = "locomo-frontlight", .offset = LOCOMO_FRONTLIGHT, .length = 8, }, { .devid = LOCOMO_DEVID_BACKLIGHT, .irq = {}, .name = "locomo-backlight", .offset = LOCOMO_BACKLIGHT, .length = 8, }, { .devid = LOCOMO_DEVID_AUDIO, .irq = {}, .name = "locomo-audio", .offset = LOCOMO_AUDIO, .length = 4, }, { .devid = LOCOMO_DEVID_LED, .irq = {}, .name = "locomo-led", .offset = LOCOMO_LED, .length = 8, }, { .devid = LOCOMO_DEVID_UART, .irq = {}, .name = "locomo-uart", .offset = 0, .length = 0, }, { .devid = LOCOMO_DEVID_SPI, .irq = {}, .name = "locomo-spi", .offset = LOCOMO_SPI, .length = 0x30, }, }; static void locomo_handler(struct irq_desc *desc) { struct locomo *lchip = irq_desc_get_handler_data(desc); int req, i; /* Acknowledge the parent IRQ */ desc->irq_data.chip->irq_ack(&desc->irq_data); /* check why this interrupt was generated */ req = locomo_readl(lchip->base + LOCOMO_ICR) & 0x0f00; if (req) { unsigned int irq; /* generate the next interrupt(s) */ irq = lchip->irq_base; for (i = 0; i <= 3; i++, irq++) { if (req & (0x0100 << i)) { generic_handle_irq(irq); } } } } static void locomo_ack_irq(struct irq_data *d) { } static void locomo_mask_irq(struct irq_data *d) { struct locomo *lchip = irq_data_get_irq_chip_data(d); unsigned int r; r = locomo_readl(lchip->base + LOCOMO_ICR); r &= ~(0x0010 << (d->irq - lchip->irq_base)); locomo_writel(r, lchip->base + LOCOMO_ICR); } static void locomo_unmask_irq(struct irq_data *d) { struct locomo *lchip = irq_data_get_irq_chip_data(d); unsigned int r; r = locomo_readl(lchip->base + LOCOMO_ICR); r |= (0x0010 << (d->irq - lchip->irq_base)); locomo_writel(r, lchip->base + LOCOMO_ICR); } static struct irq_chip locomo_chip = { .name = "LOCOMO", .irq_ack = locomo_ack_irq, .irq_mask = locomo_mask_irq, .irq_unmask = locomo_unmask_irq, }; static void locomo_setup_irq(struct locomo *lchip) { int irq = lchip->irq_base; /* * Install handler for IRQ_LOCOMO_HW. */ irq_set_irq_type(lchip->irq, IRQ_TYPE_EDGE_FALLING); irq_set_chained_handler_and_data(lchip->irq, locomo_handler, lchip); /* Install handlers for IRQ_LOCOMO_* */ for ( ; irq <= lchip->irq_base + 3; irq++) { irq_set_chip_and_handler(irq, &locomo_chip, handle_level_irq); irq_set_chip_data(irq, lchip); irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE); } } static void locomo_dev_release(struct device *_dev) { struct locomo_dev *dev = LOCOMO_DEV(_dev); kfree(dev); } static int locomo_init_one_child(struct locomo *lchip, struct locomo_dev_info *info) { struct locomo_dev *dev; int ret; dev = kzalloc(sizeof(struct locomo_dev), GFP_KERNEL); if (!dev) { ret = -ENOMEM; goto out; } /* * If the parent device has a DMA mask associated with it, * propagate it down to the children. */ if (lchip->dev->dma_mask) { dev->dma_mask = *lchip->dev->dma_mask; dev->dev.dma_mask = &dev->dma_mask; } dev_set_name(&dev->dev, "%s", info->name); dev->devid = info->devid; dev->dev.parent = lchip->dev; dev->dev.bus = &locomo_bus_type; dev->dev.release = locomo_dev_release; dev->dev.coherent_dma_mask = lchip->dev->coherent_dma_mask; if (info->offset) dev->mapbase = lchip->base + info->offset; else dev->mapbase = 0; dev->length = info->length; dev->irq[0] = (lchip->irq_base == NO_IRQ) ? NO_IRQ : lchip->irq_base + info->irq[0]; ret = device_register(&dev->dev); if (ret) { out: kfree(dev); } return ret; } #ifdef CONFIG_PM struct locomo_save_data { u16 LCM_GPO; u16 LCM_SPICT; u16 LCM_GPE; u16 LCM_ASD; u16 LCM_SPIMD; }; static int locomo_suspend(struct platform_device *dev, pm_message_t state) { struct locomo *lchip = platform_get_drvdata(dev); struct locomo_save_data *save; unsigned long flags; save = kmalloc(sizeof(struct locomo_save_data), GFP_KERNEL); if (!save) return -ENOMEM; lchip->saved_state = save; spin_lock_irqsave(&lchip->lock, flags); save->LCM_GPO = locomo_readl(lchip->base + LOCOMO_GPO); /* GPIO */ locomo_writel(0x00, lchip->base + LOCOMO_GPO); save->LCM_SPICT = locomo_readl(lchip->base + LOCOMO_SPI + LOCOMO_SPICT); /* SPI */ locomo_writel(0x40, lchip->base + LOCOMO_SPI + LOCOMO_SPICT); save->LCM_GPE = locomo_readl(lchip->base + LOCOMO_GPE); /* GPIO */ locomo_writel(0x00, lchip->base + LOCOMO_GPE); save->LCM_ASD = locomo_readl(lchip->base + LOCOMO_ASD); /* ADSTART */ locomo_writel(0x00, lchip->base + LOCOMO_ASD); save->LCM_SPIMD = locomo_readl(lchip->base + LOCOMO_SPI + LOCOMO_SPIMD); /* SPI */ locomo_writel(0x3C14, lchip->base + LOCOMO_SPI + LOCOMO_SPIMD); locomo_writel(0x00, lchip->base + LOCOMO_PAIF); locomo_writel(0x00, lchip->base + LOCOMO_DAC); locomo_writel(0x00, lchip->base + LOCOMO_BACKLIGHT + LOCOMO_TC); if ((locomo_readl(lchip->base + LOCOMO_LED + LOCOMO_LPT0) & 0x88) && (locomo_readl(lchip->base + LOCOMO_LED + LOCOMO_LPT1) & 0x88)) locomo_writel(0x00, lchip->base + LOCOMO_C32K); /* CLK32 off */ else /* 18MHz already enabled, so no wait */ locomo_writel(0xc1, lchip->base + LOCOMO_C32K); /* CLK32 on */ locomo_writel(0x00, lchip->base + LOCOMO_TADC); /* 18MHz clock off*/ locomo_writel(0x00, lchip->base + LOCOMO_AUDIO + LOCOMO_ACC); /* 22MHz/24MHz clock off */ locomo_writel(0x00, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALS); /* FL */ spin_unlock_irqrestore(&lchip->lock, flags); return 0; } static int locomo_resume(struct platform_device *dev) { struct locomo *lchip = platform_get_drvdata(dev); struct locomo_save_data *save; unsigned long r; unsigned long flags; save = lchip->saved_state; if (!save) return 0; spin_lock_irqsave(&lchip->lock, flags); locomo_writel(save->LCM_GPO, lchip->base + LOCOMO_GPO); locomo_writel(save->LCM_SPICT, lchip->base + LOCOMO_SPI + LOCOMO_SPICT); locomo_writel(save->LCM_GPE, lchip->base + LOCOMO_GPE); locomo_writel(save->LCM_ASD, lchip->base + LOCOMO_ASD); locomo_writel(save->LCM_SPIMD, lchip->base + LOCOMO_SPI + LOCOMO_SPIMD); locomo_writel(0x00, lchip->base + LOCOMO_C32K); locomo_writel(0x90, lchip->base + LOCOMO_TADC); locomo_writel(0, lchip->base + LOCOMO_KEYBOARD + LOCOMO_KSC); r = locomo_readl(lchip->base + LOCOMO_KEYBOARD + LOCOMO_KIC); r &= 0xFEFF; locomo_writel(r, lchip->base + LOCOMO_KEYBOARD + LOCOMO_KIC); locomo_writel(0x1, lchip->base + LOCOMO_KEYBOARD + LOCOMO_KCMD); spin_unlock_irqrestore(&lchip->lock, flags); lchip->saved_state = NULL; kfree(save); return 0; } #endif static int __locomo_probe(struct device *me, struct resource *mem, int irq) { struct locomo_platform_data *pdata = me->platform_data; struct locomo *lchip; unsigned long r; int i, ret = -ENODEV; lchip = kzalloc(sizeof(struct locomo), GFP_KERNEL); if (!lchip) return -ENOMEM; spin_lock_init(&lchip->lock); lchip->dev = me; dev_set_drvdata(lchip->dev, lchip); lchip->phys = mem->start; lchip->irq = irq; lchip->irq_base = (pdata) ? pdata->irq_base : NO_IRQ; /* * Map the whole region. This also maps the * registers for our children. */ lchip->base = ioremap(mem->start, PAGE_SIZE); if (!lchip->base) { ret = -ENOMEM; goto out; } /* locomo initialize */ locomo_writel(0, lchip->base + LOCOMO_ICR); /* KEYBOARD */ locomo_writel(0, lchip->base + LOCOMO_KEYBOARD + LOCOMO_KIC); /* GPIO */ locomo_writel(0, lchip->base + LOCOMO_GPO); locomo_writel((LOCOMO_GPIO(1) | LOCOMO_GPIO(2) | LOCOMO_GPIO(13) | LOCOMO_GPIO(14)) , lchip->base + LOCOMO_GPE); locomo_writel((LOCOMO_GPIO(1) | LOCOMO_GPIO(2) | LOCOMO_GPIO(13) | LOCOMO_GPIO(14)) , lchip->base + LOCOMO_GPD); locomo_writel(0, lchip->base + LOCOMO_GIE); /* Frontlight */ locomo_writel(0, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALS); locomo_writel(0, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALD); /* Longtime timer */ locomo_writel(0, lchip->base + LOCOMO_LTINT); /* SPI */ locomo_writel(0, lchip->base + LOCOMO_SPI + LOCOMO_SPIIE); locomo_writel(6 + 8 + 320 + 30 - 10, lchip->base + LOCOMO_ASD); r = locomo_readl(lchip->base + LOCOMO_ASD); r |= 0x8000; locomo_writel(r, lchip->base + LOCOMO_ASD); locomo_writel(6 + 8 + 320 + 30 - 10 - 128 + 4, lchip->base + LOCOMO_HSD); r = locomo_readl(lchip->base + LOCOMO_HSD); r |= 0x8000; locomo_writel(r, lchip->base + LOCOMO_HSD); locomo_writel(128 / 8, lchip->base + LOCOMO_HSC); /* XON */ locomo_writel(0x80, lchip->base + LOCOMO_TADC); udelay(1000); /* CLK9MEN */ r = locomo_readl(lchip->base + LOCOMO_TADC); r |= 0x10; locomo_writel(r, lchip->base + LOCOMO_TADC); udelay(100); /* init DAC */ r = locomo_readl(lchip->base + LOCOMO_DAC); r |= LOCOMO_DAC_SCLOEB | LOCOMO_DAC_SDAOEB; locomo_writel(r, lchip->base + LOCOMO_DAC); r = locomo_readl(lchip->base + LOCOMO_VER); printk(KERN_INFO "LoCoMo Chip: %lu%lu\n", (r >> 8), (r & 0xff)); /* * The interrupt controller must be initialised before any * other device to ensure that the interrupts are available. */ if (lchip->irq != NO_IRQ && lchip->irq_base != NO_IRQ) locomo_setup_irq(lchip); for (i = 0; i < ARRAY_SIZE(locomo_devices); i++) locomo_init_one_child(lchip, &locomo_devices[i]); return 0; out: kfree(lchip); return ret; } static int locomo_remove_child(struct device *dev, void *data) { device_unregister(dev); return 0; } static void __locomo_remove(struct locomo *lchip) { device_for_each_child(lchip->dev, NULL, locomo_remove_child); if (lchip->irq != NO_IRQ) { irq_set_chained_handler_and_data(lchip->irq, NULL, NULL); } iounmap(lchip->base); kfree(lchip); } /** * locomo_probe - probe for a single LoCoMo chip. * @dev: platform device * * Probe for a LoCoMo chip. This must be called * before any other locomo-specific code. * * Returns: * * %-EINVAL - device's IORESOURCE_MEM not found * * %-ENXIO - could not allocate an IRQ for the device * * %-ENODEV - device not found. * * %-EBUSY - physical address already marked in-use. * * %-ENOMEM - could not allocate or iomap memory. * * %0 - successful. */ static int locomo_probe(struct platform_device *dev) { struct resource *mem; int irq; mem = platform_get_resource(dev, IORESOURCE_MEM, 0); if (!mem) return -EINVAL; irq = platform_get_irq(dev, 0); if (irq < 0) return -ENXIO; return __locomo_probe(&dev->dev, mem, irq); } static void locomo_remove(struct platform_device *dev) { struct locomo *lchip = platform_get_drvdata(dev); if (lchip) { __locomo_remove(lchip); platform_set_drvdata(dev, NULL); } } /* * Not sure if this should be on the system bus or not yet. * We really want some way to register a system device at * the per-machine level, and then have this driver pick * up the registered devices. */ static struct platform_driver locomo_device_driver = { .probe = locomo_probe, .remove_new = locomo_remove, #ifdef CONFIG_PM .suspend = locomo_suspend, .resume = locomo_resume, #endif .driver = { .name = "locomo", }, }; /* * Get the parent device driver (us) structure * from a child function device */ static inline struct locomo *locomo_chip_driver(struct locomo_dev *ldev) { return (struct locomo *)dev_get_drvdata(ldev->dev.parent); } void locomo_gpio_set_dir(struct device *dev, unsigned int bits, unsigned int dir) { struct locomo *lchip = dev_get_drvdata(dev); unsigned long flags; unsigned int r; if (!lchip) return; spin_lock_irqsave(&lchip->lock, flags); r = locomo_readl(lchip->base + LOCOMO_GPD); if (dir) r |= bits; else r &= ~bits; locomo_writel(r, lchip->base + LOCOMO_GPD); r = locomo_readl(lchip->base + LOCOMO_GPE); if (dir) r |= bits; else r &= ~bits; locomo_writel(r, lchip->base + LOCOMO_GPE); spin_unlock_irqrestore(&lchip->lock, flags); } EXPORT_SYMBOL(locomo_gpio_set_dir); int locomo_gpio_read_level(struct device *dev, unsigned int bits) { struct locomo *lchip = dev_get_drvdata(dev); unsigned long flags; unsigned int ret; if (!lchip) return -ENODEV; spin_lock_irqsave(&lchip->lock, flags); ret = locomo_readl(lchip->base + LOCOMO_GPL); spin_unlock_irqrestore(&lchip->lock, flags); ret &= bits; return ret; } EXPORT_SYMBOL(locomo_gpio_read_level); int locomo_gpio_read_output(struct device *dev, unsigned int bits) { struct locomo *lchip = dev_get_drvdata(dev); unsigned long flags; unsigned int ret; if (!lchip) return -ENODEV; spin_lock_irqsave(&lchip->lock, flags); ret = locomo_readl(lchip->base + LOCOMO_GPO); spin_unlock_irqrestore(&lchip->lock, flags); ret &= bits; return ret; } EXPORT_SYMBOL(locomo_gpio_read_output); void locomo_gpio_write(struct device *dev, unsigned int bits, unsigned int set) { struct locomo *lchip = dev_get_drvdata(dev); unsigned long flags; unsigned int r; if (!lchip) return; spin_lock_irqsave(&lchip->lock, flags); r = locomo_readl(lchip->base + LOCOMO_GPO); if (set) r |= bits; else r &= ~bits; locomo_writel(r, lchip->base + LOCOMO_GPO); spin_unlock_irqrestore(&lchip->lock, flags); } EXPORT_SYMBOL(locomo_gpio_write); static void locomo_m62332_sendbit(void *mapbase, int bit) { unsigned int r; r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SCLOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ udelay(DAC_DATA_HOLD_TIME); /* 300 nsec */ r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SCLOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ udelay(DAC_SCL_LOW_HOLD_TIME); /* 4.7 usec */ if (bit & 1) { r = locomo_readl(mapbase + LOCOMO_DAC); r |= LOCOMO_DAC_SDAOEB; locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */ } else { r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SDAOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ } udelay(DAC_DATA_SETUP_TIME); /* 250 nsec */ r = locomo_readl(mapbase + LOCOMO_DAC); r |= LOCOMO_DAC_SCLOEB; locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */ udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4.0 usec */ } void locomo_m62332_senddata(struct locomo_dev *ldev, unsigned int dac_data, int channel) { struct locomo *lchip = locomo_chip_driver(ldev); int i; unsigned char data; unsigned int r; void *mapbase = lchip->base; unsigned long flags; spin_lock_irqsave(&lchip->lock, flags); /* Start */ udelay(DAC_BUS_FREE_TIME); /* 5.0 usec */ r = locomo_readl(mapbase + LOCOMO_DAC); r |= LOCOMO_DAC_SCLOEB | LOCOMO_DAC_SDAOEB; locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */ udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4.0 usec */ r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SDAOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_START_HOLD_TIME); /* 5.0 usec */ udelay(DAC_DATA_HOLD_TIME); /* 300 nsec */ /* Send slave address and W bit (LSB is W bit) */ data = (M62332_SLAVE_ADDR << 1) | M62332_W_BIT; for (i = 1; i <= 8; i++) { locomo_m62332_sendbit(mapbase, data >> (8 - i)); } /* Check A bit */ r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SCLOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ udelay(DAC_SCL_LOW_HOLD_TIME); /* 4.7 usec */ r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SDAOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ r = locomo_readl(mapbase + LOCOMO_DAC); r |= LOCOMO_DAC_SCLOEB; locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */ udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4.7 usec */ if (locomo_readl(mapbase + LOCOMO_DAC) & LOCOMO_DAC_SDAOEB) { /* High is error */ printk(KERN_WARNING "locomo: m62332_senddata Error 1\n"); goto out; } /* Send Sub address (LSB is channel select) */ /* channel = 0 : ch1 select */ /* = 1 : ch2 select */ data = M62332_SUB_ADDR + channel; for (i = 1; i <= 8; i++) { locomo_m62332_sendbit(mapbase, data >> (8 - i)); } /* Check A bit */ r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SCLOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ udelay(DAC_SCL_LOW_HOLD_TIME); /* 4.7 usec */ r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SDAOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ r = locomo_readl(mapbase + LOCOMO_DAC); r |= LOCOMO_DAC_SCLOEB; locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */ udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4.7 usec */ if (locomo_readl(mapbase + LOCOMO_DAC) & LOCOMO_DAC_SDAOEB) { /* High is error */ printk(KERN_WARNING "locomo: m62332_senddata Error 2\n"); goto out; } /* Send DAC data */ for (i = 1; i <= 8; i++) { locomo_m62332_sendbit(mapbase, dac_data >> (8 - i)); } /* Check A bit */ r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SCLOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ udelay(DAC_SCL_LOW_HOLD_TIME); /* 4.7 usec */ r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SDAOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ r = locomo_readl(mapbase + LOCOMO_DAC); r |= LOCOMO_DAC_SCLOEB; locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */ udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4.7 usec */ if (locomo_readl(mapbase + LOCOMO_DAC) & LOCOMO_DAC_SDAOEB) { /* High is error */ printk(KERN_WARNING "locomo: m62332_senddata Error 3\n"); } out: /* stop */ r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SCLOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ udelay(DAC_SCL_LOW_HOLD_TIME); /* 4.7 usec */ r = locomo_readl(mapbase + LOCOMO_DAC); r |= LOCOMO_DAC_SCLOEB; locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */ udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4 usec */ r = locomo_readl(mapbase + LOCOMO_DAC); r |= LOCOMO_DAC_SDAOEB; locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */ udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4 usec */ r = locomo_readl(mapbase + LOCOMO_DAC); r |= LOCOMO_DAC_SCLOEB | LOCOMO_DAC_SDAOEB; locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 1000 nsec */ udelay(DAC_SCL_LOW_HOLD_TIME); /* 4.7 usec */ spin_unlock_irqrestore(&lchip->lock, flags); } EXPORT_SYMBOL(locomo_m62332_senddata); /* * Frontlight control */ void locomo_frontlight_set(struct locomo_dev *dev, int duty, int vr, int bpwf) { unsigned long flags; struct locomo *lchip = locomo_chip_driver(dev); if (vr) locomo_gpio_write(dev->dev.parent, LOCOMO_GPIO_FL_VR, 1); else locomo_gpio_write(dev->dev.parent, LOCOMO_GPIO_FL_VR, 0); spin_lock_irqsave(&lchip->lock, flags); locomo_writel(bpwf, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALS); udelay(100); locomo_writel(duty, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALD); locomo_writel(bpwf | LOCOMO_ALC_EN, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALS); spin_unlock_irqrestore(&lchip->lock, flags); } EXPORT_SYMBOL(locomo_frontlight_set); /* * LoCoMo "Register Access Bus." * * We model this as a regular bus type, and hang devices directly * off this. */ static int locomo_match(struct device *_dev, struct device_driver *_drv) { struct locomo_dev *dev = LOCOMO_DEV(_dev); struct locomo_driver *drv = LOCOMO_DRV(_drv); return dev->devid == drv->devid; } static int locomo_bus_probe(struct device *dev) { struct locomo_dev *ldev = LOCOMO_DEV(dev); struct locomo_driver *drv = LOCOMO_DRV(dev->driver); int ret = -ENODEV; if (drv->probe) ret = drv->probe(ldev); return ret; } static void locomo_bus_remove(struct device *dev) { struct locomo_dev *ldev = LOCOMO_DEV(dev); struct locomo_driver *drv = LOCOMO_DRV(dev->driver); if (drv->remove) drv->remove(ldev); } struct bus_type locomo_bus_type = { .name = "locomo-bus", .match = locomo_match, .probe = locomo_bus_probe, .remove = locomo_bus_remove, }; int locomo_driver_register(struct locomo_driver *driver) { driver->drv.bus = &locomo_bus_type; return driver_register(&driver->drv); } EXPORT_SYMBOL(locomo_driver_register); void locomo_driver_unregister(struct locomo_driver *driver) { driver_unregister(&driver->drv); } EXPORT_SYMBOL(locomo_driver_unregister); static int __init locomo_init(void) { int ret = bus_register(&locomo_bus_type); if (ret == 0) platform_driver_register(&locomo_device_driver); return ret; } static void __exit locomo_exit(void) { platform_driver_unregister(&locomo_device_driver); bus_unregister(&locomo_bus_type); } module_init(locomo_init); module_exit(locomo_exit); MODULE_DESCRIPTION("Sharp LoCoMo core driver"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("John Lenz <[email protected]>");
linux-master
arch/arm/common/locomo.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/arch/arm/common/sa1111.c * * SA1111 support * * Original code by John Dorsey * * This file contains all generic SA1111 support. * * All initialization functions provided here are intended to be called * from machine specific code with proper arguments when required. */ #include <linux/module.h> #include <linux/gpio/driver.h> #include <linux/init.h> #include <linux/irq.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/ioport.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/dma-map-ops.h> #include <linux/clk.h> #include <linux/io.h> #include <asm/mach/irq.h> #include <asm/mach-types.h> #include <linux/sizes.h> #include <asm/hardware/sa1111.h> #ifdef CONFIG_ARCH_SA1100 #include <mach/hardware.h> #endif /* SA1111 IRQs */ #define IRQ_GPAIN0 (0) #define IRQ_GPAIN1 (1) #define IRQ_GPAIN2 (2) #define IRQ_GPAIN3 (3) #define IRQ_GPBIN0 (4) #define IRQ_GPBIN1 (5) #define IRQ_GPBIN2 (6) #define IRQ_GPBIN3 (7) #define IRQ_GPBIN4 (8) #define IRQ_GPBIN5 (9) #define IRQ_GPCIN0 (10) #define IRQ_GPCIN1 (11) #define IRQ_GPCIN2 (12) #define IRQ_GPCIN3 (13) #define IRQ_GPCIN4 (14) #define IRQ_GPCIN5 (15) #define IRQ_GPCIN6 (16) #define IRQ_GPCIN7 (17) #define IRQ_MSTXINT (18) #define IRQ_MSRXINT (19) #define IRQ_MSSTOPERRINT (20) #define IRQ_TPTXINT (21) #define IRQ_TPRXINT (22) #define IRQ_TPSTOPERRINT (23) #define SSPXMTINT (24) #define SSPRCVINT (25) #define SSPROR (26) #define AUDXMTDMADONEA (32) #define AUDRCVDMADONEA (33) #define AUDXMTDMADONEB (34) #define AUDRCVDMADONEB (35) #define AUDTFSR (36) #define AUDRFSR (37) #define AUDTUR (38) #define AUDROR (39) #define AUDDTS (40) #define AUDRDD (41) #define AUDSTO (42) #define IRQ_USBPWR (43) #define IRQ_HCIM (44) #define IRQ_HCIBUFFACC (45) #define IRQ_HCIRMTWKP (46) #define IRQ_NHCIMFCIR (47) #define IRQ_USB_PORT_RESUME (48) #define IRQ_S0_READY_NINT (49) #define IRQ_S1_READY_NINT (50) #define IRQ_S0_CD_VALID (51) #define IRQ_S1_CD_VALID (52) #define IRQ_S0_BVD1_STSCHG (53) #define IRQ_S1_BVD1_STSCHG (54) #define SA1111_IRQ_NR (55) extern void sa1110_mb_enable(void); extern void sa1110_mb_disable(void); /* * We keep the following data for the overall SA1111. Note that the * struct device and struct resource are "fake"; they should be supplied * by the bus above us. However, in the interests of getting all SA1111 * drivers converted over to the device model, we provide this as an * anchor point for all the other drivers. */ struct sa1111 { struct device *dev; struct clk *clk; unsigned long phys; int irq; int irq_base; /* base for cascaded on-chip IRQs */ spinlock_t lock; void __iomem *base; struct sa1111_platform_data *pdata; struct irq_domain *irqdomain; struct gpio_chip gc; #ifdef CONFIG_PM void *saved_state; #endif }; /* * We _really_ need to eliminate this. Its only users * are the PWM and DMA checking code. */ static struct sa1111 *g_sa1111; struct sa1111_dev_info { unsigned long offset; unsigned long skpcr_mask; bool dma; unsigned int devid; unsigned int hwirq[6]; }; static struct sa1111_dev_info sa1111_devices[] = { { .offset = SA1111_USB, .skpcr_mask = SKPCR_UCLKEN, .dma = true, .devid = SA1111_DEVID_USB, .hwirq = { IRQ_USBPWR, IRQ_HCIM, IRQ_HCIBUFFACC, IRQ_HCIRMTWKP, IRQ_NHCIMFCIR, IRQ_USB_PORT_RESUME }, }, { .offset = 0x0600, .skpcr_mask = SKPCR_I2SCLKEN | SKPCR_L3CLKEN, .dma = true, .devid = SA1111_DEVID_SAC, .hwirq = { AUDXMTDMADONEA, AUDXMTDMADONEB, AUDRCVDMADONEA, AUDRCVDMADONEB }, }, { .offset = 0x0800, .skpcr_mask = SKPCR_SCLKEN, .devid = SA1111_DEVID_SSP, }, { .offset = SA1111_KBD, .skpcr_mask = SKPCR_PTCLKEN, .devid = SA1111_DEVID_PS2_KBD, .hwirq = { IRQ_TPRXINT, IRQ_TPTXINT }, }, { .offset = SA1111_MSE, .skpcr_mask = SKPCR_PMCLKEN, .devid = SA1111_DEVID_PS2_MSE, .hwirq = { IRQ_MSRXINT, IRQ_MSTXINT }, }, { .offset = 0x1800, .skpcr_mask = 0, .devid = SA1111_DEVID_PCMCIA, .hwirq = { IRQ_S0_READY_NINT, IRQ_S0_CD_VALID, IRQ_S0_BVD1_STSCHG, IRQ_S1_READY_NINT, IRQ_S1_CD_VALID, IRQ_S1_BVD1_STSCHG, }, }, }; static int sa1111_map_irq(struct sa1111 *sachip, irq_hw_number_t hwirq) { return irq_create_mapping(sachip->irqdomain, hwirq); } /* * SA1111 interrupt support. Since clearing an IRQ while there are * active IRQs causes the interrupt output to pulse, the upper levels * will call us again if there are more interrupts to process. */ static void sa1111_irq_handler(struct irq_desc *desc) { unsigned int stat0, stat1, i; struct sa1111 *sachip = irq_desc_get_handler_data(desc); struct irq_domain *irqdomain; void __iomem *mapbase = sachip->base + SA1111_INTC; stat0 = readl_relaxed(mapbase + SA1111_INTSTATCLR0); stat1 = readl_relaxed(mapbase + SA1111_INTSTATCLR1); writel_relaxed(stat0, mapbase + SA1111_INTSTATCLR0); desc->irq_data.chip->irq_ack(&desc->irq_data); writel_relaxed(stat1, mapbase + SA1111_INTSTATCLR1); if (stat0 == 0 && stat1 == 0) { do_bad_IRQ(desc); return; } irqdomain = sachip->irqdomain; for (i = 0; stat0; i++, stat0 >>= 1) if (stat0 & 1) generic_handle_domain_irq(irqdomain, i); for (i = 32; stat1; i++, stat1 >>= 1) if (stat1 & 1) generic_handle_domain_irq(irqdomain, i); /* For level-based interrupts */ desc->irq_data.chip->irq_unmask(&desc->irq_data); } static u32 sa1111_irqmask(struct irq_data *d) { return BIT(irqd_to_hwirq(d) & 31); } static int sa1111_irqbank(struct irq_data *d) { return (irqd_to_hwirq(d) / 32) * 4; } static void sa1111_ack_irq(struct irq_data *d) { } static void sa1111_mask_irq(struct irq_data *d) { struct sa1111 *sachip = irq_data_get_irq_chip_data(d); void __iomem *mapbase = sachip->base + SA1111_INTC + sa1111_irqbank(d); u32 ie; ie = readl_relaxed(mapbase + SA1111_INTEN0); ie &= ~sa1111_irqmask(d); writel(ie, mapbase + SA1111_INTEN0); } static void sa1111_unmask_irq(struct irq_data *d) { struct sa1111 *sachip = irq_data_get_irq_chip_data(d); void __iomem *mapbase = sachip->base + SA1111_INTC + sa1111_irqbank(d); u32 ie; ie = readl_relaxed(mapbase + SA1111_INTEN0); ie |= sa1111_irqmask(d); writel_relaxed(ie, mapbase + SA1111_INTEN0); } /* * Attempt to re-trigger the interrupt. The SA1111 contains a register * (INTSET) which claims to do this. However, in practice no amount of * manipulation of INTEN and INTSET guarantees that the interrupt will * be triggered. In fact, its very difficult, if not impossible to get * INTSET to re-trigger the interrupt. */ static int sa1111_retrigger_irq(struct irq_data *d) { struct sa1111 *sachip = irq_data_get_irq_chip_data(d); void __iomem *mapbase = sachip->base + SA1111_INTC + sa1111_irqbank(d); u32 ip, mask = sa1111_irqmask(d); int i; ip = readl_relaxed(mapbase + SA1111_INTPOL0); for (i = 0; i < 8; i++) { writel_relaxed(ip ^ mask, mapbase + SA1111_INTPOL0); writel_relaxed(ip, mapbase + SA1111_INTPOL0); if (readl_relaxed(mapbase + SA1111_INTSTATCLR0) & mask) break; } if (i == 8) { pr_err("Danger Will Robinson: failed to re-trigger IRQ%d\n", d->irq); return 0; } return 1; } static int sa1111_type_irq(struct irq_data *d, unsigned int flags) { struct sa1111 *sachip = irq_data_get_irq_chip_data(d); void __iomem *mapbase = sachip->base + SA1111_INTC + sa1111_irqbank(d); u32 ip, mask = sa1111_irqmask(d); if (flags == IRQ_TYPE_PROBE) return 0; if ((!(flags & IRQ_TYPE_EDGE_RISING) ^ !(flags & IRQ_TYPE_EDGE_FALLING)) == 0) return -EINVAL; ip = readl_relaxed(mapbase + SA1111_INTPOL0); if (flags & IRQ_TYPE_EDGE_RISING) ip &= ~mask; else ip |= mask; writel_relaxed(ip, mapbase + SA1111_INTPOL0); writel_relaxed(ip, mapbase + SA1111_WAKEPOL0); return 0; } static int sa1111_wake_irq(struct irq_data *d, unsigned int on) { struct sa1111 *sachip = irq_data_get_irq_chip_data(d); void __iomem *mapbase = sachip->base + SA1111_INTC + sa1111_irqbank(d); u32 we, mask = sa1111_irqmask(d); we = readl_relaxed(mapbase + SA1111_WAKEEN0); if (on) we |= mask; else we &= ~mask; writel_relaxed(we, mapbase + SA1111_WAKEEN0); return 0; } static struct irq_chip sa1111_irq_chip = { .name = "SA1111", .irq_ack = sa1111_ack_irq, .irq_mask = sa1111_mask_irq, .irq_unmask = sa1111_unmask_irq, .irq_retrigger = sa1111_retrigger_irq, .irq_set_type = sa1111_type_irq, .irq_set_wake = sa1111_wake_irq, }; static int sa1111_irqdomain_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hwirq) { struct sa1111 *sachip = d->host_data; /* Disallow unavailable interrupts */ if (hwirq > SSPROR && hwirq < AUDXMTDMADONEA) return -EINVAL; irq_set_chip_data(irq, sachip); irq_set_chip_and_handler(irq, &sa1111_irq_chip, handle_edge_irq); irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE); return 0; } static const struct irq_domain_ops sa1111_irqdomain_ops = { .map = sa1111_irqdomain_map, .xlate = irq_domain_xlate_twocell, }; static int sa1111_setup_irq(struct sa1111 *sachip, unsigned irq_base) { void __iomem *irqbase = sachip->base + SA1111_INTC; int ret; /* * We're guaranteed that this region hasn't been taken. */ request_mem_region(sachip->phys + SA1111_INTC, 512, "irq"); ret = irq_alloc_descs(-1, irq_base, SA1111_IRQ_NR, -1); if (ret <= 0) { dev_err(sachip->dev, "unable to allocate %u irqs: %d\n", SA1111_IRQ_NR, ret); if (ret == 0) ret = -EINVAL; return ret; } sachip->irq_base = ret; /* disable all IRQs */ writel_relaxed(0, irqbase + SA1111_INTEN0); writel_relaxed(0, irqbase + SA1111_INTEN1); writel_relaxed(0, irqbase + SA1111_WAKEEN0); writel_relaxed(0, irqbase + SA1111_WAKEEN1); /* * detect on rising edge. Note: Feb 2001 Errata for SA1111 * specifies that S0ReadyInt and S1ReadyInt should be '1'. */ writel_relaxed(0, irqbase + SA1111_INTPOL0); writel_relaxed(BIT(IRQ_S0_READY_NINT & 31) | BIT(IRQ_S1_READY_NINT & 31), irqbase + SA1111_INTPOL1); /* clear all IRQs */ writel_relaxed(~0, irqbase + SA1111_INTSTATCLR0); writel_relaxed(~0, irqbase + SA1111_INTSTATCLR1); sachip->irqdomain = irq_domain_add_linear(NULL, SA1111_IRQ_NR, &sa1111_irqdomain_ops, sachip); if (!sachip->irqdomain) { irq_free_descs(sachip->irq_base, SA1111_IRQ_NR); return -ENOMEM; } irq_domain_associate_many(sachip->irqdomain, sachip->irq_base + IRQ_GPAIN0, IRQ_GPAIN0, SSPROR + 1 - IRQ_GPAIN0); irq_domain_associate_many(sachip->irqdomain, sachip->irq_base + AUDXMTDMADONEA, AUDXMTDMADONEA, IRQ_S1_BVD1_STSCHG + 1 - AUDXMTDMADONEA); /* * Register SA1111 interrupt */ irq_set_irq_type(sachip->irq, IRQ_TYPE_EDGE_RISING); irq_set_chained_handler_and_data(sachip->irq, sa1111_irq_handler, sachip); dev_info(sachip->dev, "Providing IRQ%u-%u\n", sachip->irq_base, sachip->irq_base + SA1111_IRQ_NR - 1); return 0; } static void sa1111_remove_irq(struct sa1111 *sachip) { struct irq_domain *domain = sachip->irqdomain; void __iomem *irqbase = sachip->base + SA1111_INTC; int i; /* disable all IRQs */ writel_relaxed(0, irqbase + SA1111_INTEN0); writel_relaxed(0, irqbase + SA1111_INTEN1); writel_relaxed(0, irqbase + SA1111_WAKEEN0); writel_relaxed(0, irqbase + SA1111_WAKEEN1); irq_set_chained_handler_and_data(sachip->irq, NULL, NULL); for (i = 0; i < SA1111_IRQ_NR; i++) irq_dispose_mapping(irq_find_mapping(domain, i)); irq_domain_remove(domain); release_mem_region(sachip->phys + SA1111_INTC, 512); } enum { SA1111_GPIO_PXDDR = (SA1111_GPIO_PADDR - SA1111_GPIO_PADDR), SA1111_GPIO_PXDRR = (SA1111_GPIO_PADRR - SA1111_GPIO_PADDR), SA1111_GPIO_PXDWR = (SA1111_GPIO_PADWR - SA1111_GPIO_PADDR), SA1111_GPIO_PXSDR = (SA1111_GPIO_PASDR - SA1111_GPIO_PADDR), SA1111_GPIO_PXSSR = (SA1111_GPIO_PASSR - SA1111_GPIO_PADDR), }; static struct sa1111 *gc_to_sa1111(struct gpio_chip *gc) { return container_of(gc, struct sa1111, gc); } static void __iomem *sa1111_gpio_map_reg(struct sa1111 *sachip, unsigned offset) { void __iomem *reg = sachip->base + SA1111_GPIO; if (offset < 4) return reg + SA1111_GPIO_PADDR; if (offset < 10) return reg + SA1111_GPIO_PBDDR; if (offset < 18) return reg + SA1111_GPIO_PCDDR; return NULL; } static u32 sa1111_gpio_map_bit(unsigned offset) { if (offset < 4) return BIT(offset); if (offset < 10) return BIT(offset - 4); if (offset < 18) return BIT(offset - 10); return 0; } static void sa1111_gpio_modify(void __iomem *reg, u32 mask, u32 set) { u32 val; val = readl_relaxed(reg); val &= ~mask; val |= mask & set; writel_relaxed(val, reg); } static int sa1111_gpio_get_direction(struct gpio_chip *gc, unsigned offset) { struct sa1111 *sachip = gc_to_sa1111(gc); void __iomem *reg = sa1111_gpio_map_reg(sachip, offset); u32 mask = sa1111_gpio_map_bit(offset); return !!(readl_relaxed(reg + SA1111_GPIO_PXDDR) & mask); } static int sa1111_gpio_direction_input(struct gpio_chip *gc, unsigned offset) { struct sa1111 *sachip = gc_to_sa1111(gc); unsigned long flags; void __iomem *reg = sa1111_gpio_map_reg(sachip, offset); u32 mask = sa1111_gpio_map_bit(offset); spin_lock_irqsave(&sachip->lock, flags); sa1111_gpio_modify(reg + SA1111_GPIO_PXDDR, mask, mask); sa1111_gpio_modify(reg + SA1111_GPIO_PXSDR, mask, mask); spin_unlock_irqrestore(&sachip->lock, flags); return 0; } static int sa1111_gpio_direction_output(struct gpio_chip *gc, unsigned offset, int value) { struct sa1111 *sachip = gc_to_sa1111(gc); unsigned long flags; void __iomem *reg = sa1111_gpio_map_reg(sachip, offset); u32 mask = sa1111_gpio_map_bit(offset); spin_lock_irqsave(&sachip->lock, flags); sa1111_gpio_modify(reg + SA1111_GPIO_PXDWR, mask, value ? mask : 0); sa1111_gpio_modify(reg + SA1111_GPIO_PXSSR, mask, value ? mask : 0); sa1111_gpio_modify(reg + SA1111_GPIO_PXDDR, mask, 0); sa1111_gpio_modify(reg + SA1111_GPIO_PXSDR, mask, 0); spin_unlock_irqrestore(&sachip->lock, flags); return 0; } static int sa1111_gpio_get(struct gpio_chip *gc, unsigned offset) { struct sa1111 *sachip = gc_to_sa1111(gc); void __iomem *reg = sa1111_gpio_map_reg(sachip, offset); u32 mask = sa1111_gpio_map_bit(offset); return !!(readl_relaxed(reg + SA1111_GPIO_PXDRR) & mask); } static void sa1111_gpio_set(struct gpio_chip *gc, unsigned offset, int value) { struct sa1111 *sachip = gc_to_sa1111(gc); unsigned long flags; void __iomem *reg = sa1111_gpio_map_reg(sachip, offset); u32 mask = sa1111_gpio_map_bit(offset); spin_lock_irqsave(&sachip->lock, flags); sa1111_gpio_modify(reg + SA1111_GPIO_PXDWR, mask, value ? mask : 0); sa1111_gpio_modify(reg + SA1111_GPIO_PXSSR, mask, value ? mask : 0); spin_unlock_irqrestore(&sachip->lock, flags); } static void sa1111_gpio_set_multiple(struct gpio_chip *gc, unsigned long *mask, unsigned long *bits) { struct sa1111 *sachip = gc_to_sa1111(gc); unsigned long flags; void __iomem *reg = sachip->base + SA1111_GPIO; u32 msk, val; msk = *mask; val = *bits; spin_lock_irqsave(&sachip->lock, flags); sa1111_gpio_modify(reg + SA1111_GPIO_PADWR, msk & 15, val); sa1111_gpio_modify(reg + SA1111_GPIO_PASSR, msk & 15, val); sa1111_gpio_modify(reg + SA1111_GPIO_PBDWR, (msk >> 4) & 255, val >> 4); sa1111_gpio_modify(reg + SA1111_GPIO_PBSSR, (msk >> 4) & 255, val >> 4); sa1111_gpio_modify(reg + SA1111_GPIO_PCDWR, (msk >> 12) & 255, val >> 12); sa1111_gpio_modify(reg + SA1111_GPIO_PCSSR, (msk >> 12) & 255, val >> 12); spin_unlock_irqrestore(&sachip->lock, flags); } static int sa1111_gpio_to_irq(struct gpio_chip *gc, unsigned offset) { struct sa1111 *sachip = gc_to_sa1111(gc); return sa1111_map_irq(sachip, offset); } static int sa1111_setup_gpios(struct sa1111 *sachip) { sachip->gc.label = "sa1111"; sachip->gc.parent = sachip->dev; sachip->gc.owner = THIS_MODULE; sachip->gc.get_direction = sa1111_gpio_get_direction; sachip->gc.direction_input = sa1111_gpio_direction_input; sachip->gc.direction_output = sa1111_gpio_direction_output; sachip->gc.get = sa1111_gpio_get; sachip->gc.set = sa1111_gpio_set; sachip->gc.set_multiple = sa1111_gpio_set_multiple; sachip->gc.to_irq = sa1111_gpio_to_irq; sachip->gc.base = -1; sachip->gc.ngpio = 18; return devm_gpiochip_add_data(sachip->dev, &sachip->gc, sachip); } /* * Bring the SA1111 out of reset. This requires a set procedure: * 1. nRESET asserted (by hardware) * 2. CLK turned on from SA1110 * 3. nRESET deasserted * 4. VCO turned on, PLL_BYPASS turned off * 5. Wait lock time, then assert RCLKEn * 7. PCR set to allow clocking of individual functions * * Until we've done this, the only registers we can access are: * SBI_SKCR * SBI_SMCR * SBI_SKID */ static void sa1111_wake(struct sa1111 *sachip) { unsigned long flags, r; spin_lock_irqsave(&sachip->lock, flags); clk_enable(sachip->clk); /* * Turn VCO on, and disable PLL Bypass. */ r = readl_relaxed(sachip->base + SA1111_SKCR); r &= ~SKCR_VCO_OFF; writel_relaxed(r, sachip->base + SA1111_SKCR); r |= SKCR_PLL_BYPASS | SKCR_OE_EN; writel_relaxed(r, sachip->base + SA1111_SKCR); /* * Wait lock time. SA1111 manual _doesn't_ * specify a figure for this! We choose 100us. */ udelay(100); /* * Enable RCLK. We also ensure that RDYEN is set. */ r |= SKCR_RCLKEN | SKCR_RDYEN; writel_relaxed(r, sachip->base + SA1111_SKCR); /* * Wait 14 RCLK cycles for the chip to finish coming out * of reset. (RCLK=24MHz). This is 590ns. */ udelay(1); /* * Ensure all clocks are initially off. */ writel_relaxed(0, sachip->base + SA1111_SKPCR); spin_unlock_irqrestore(&sachip->lock, flags); } #ifdef CONFIG_ARCH_SA1100 static u32 sa1111_dma_mask[] = { ~0, ~(1 << 20), ~(1 << 23), ~(1 << 24), ~(1 << 25), ~(1 << 20), ~(1 << 20), 0, }; /* * Configure the SA1111 shared memory controller. */ static void sa1111_configure_smc(struct sa1111 *sachip, int sdram, unsigned int drac, unsigned int cas_latency) { unsigned int smcr = SMCR_DTIM | SMCR_MBGE | FInsrt(drac, SMCR_DRAC); if (cas_latency == 3) smcr |= SMCR_CLAT; writel_relaxed(smcr, sachip->base + SA1111_SMCR); /* * Now clear the bits in the DMA mask to work around the SA1111 * DMA erratum (Intel StrongARM SA-1111 Microprocessor Companion * Chip Specification Update, June 2000, Erratum #7). */ if (sachip->dev->dma_mask) *sachip->dev->dma_mask &= sa1111_dma_mask[drac >> 2]; sachip->dev->coherent_dma_mask &= sa1111_dma_mask[drac >> 2]; } #endif static void sa1111_dev_release(struct device *_dev) { struct sa1111_dev *dev = to_sa1111_device(_dev); kfree(dev); } static int sa1111_init_one_child(struct sa1111 *sachip, struct resource *parent, struct sa1111_dev_info *info) { struct sa1111_dev *dev; unsigned i; int ret; dev = kzalloc(sizeof(struct sa1111_dev), GFP_KERNEL); if (!dev) { ret = -ENOMEM; goto err_alloc; } device_initialize(&dev->dev); dev_set_name(&dev->dev, "%4.4lx", info->offset); dev->devid = info->devid; dev->dev.parent = sachip->dev; dev->dev.bus = &sa1111_bus_type; dev->dev.release = sa1111_dev_release; dev->res.start = sachip->phys + info->offset; dev->res.end = dev->res.start + 511; dev->res.name = dev_name(&dev->dev); dev->res.flags = IORESOURCE_MEM; dev->mapbase = sachip->base + info->offset; dev->skpcr_mask = info->skpcr_mask; for (i = 0; i < ARRAY_SIZE(info->hwirq); i++) dev->hwirq[i] = info->hwirq[i]; /* * If the parent device has a DMA mask associated with it, and * this child supports DMA, propagate it down to the children. */ if (info->dma && sachip->dev->dma_mask) { dev->dma_mask = *sachip->dev->dma_mask; dev->dev.dma_mask = &dev->dma_mask; dev->dev.coherent_dma_mask = sachip->dev->coherent_dma_mask; } ret = request_resource(parent, &dev->res); if (ret) { dev_err(sachip->dev, "failed to allocate resource for %s\n", dev->res.name); goto err_resource; } ret = device_add(&dev->dev); if (ret) goto err_add; return 0; err_add: release_resource(&dev->res); err_resource: put_device(&dev->dev); err_alloc: return ret; } /** * sa1111_probe - probe for a single SA1111 chip. * @phys_addr: physical address of device. * * Probe for a SA1111 chip. This must be called * before any other SA1111-specific code. * * Returns: * %-ENODEV device not found. * %-EBUSY physical address already marked in-use. * %-EINVAL no platform data passed * %0 successful. */ static int __sa1111_probe(struct device *me, struct resource *mem, int irq) { struct sa1111_platform_data *pd = me->platform_data; struct sa1111 *sachip; unsigned long id; unsigned int has_devs; int i, ret = -ENODEV; if (!pd) return -EINVAL; sachip = devm_kzalloc(me, sizeof(struct sa1111), GFP_KERNEL); if (!sachip) return -ENOMEM; sachip->clk = devm_clk_get(me, "SA1111_CLK"); if (IS_ERR(sachip->clk)) return PTR_ERR(sachip->clk); ret = clk_prepare(sachip->clk); if (ret) return ret; spin_lock_init(&sachip->lock); sachip->dev = me; dev_set_drvdata(sachip->dev, sachip); sachip->pdata = pd; sachip->phys = mem->start; sachip->irq = irq; /* * Map the whole region. This also maps the * registers for our children. */ sachip->base = ioremap(mem->start, PAGE_SIZE * 2); if (!sachip->base) { ret = -ENOMEM; goto err_clk_unprep; } /* * Probe for the chip. Only touch the SBI registers. */ id = readl_relaxed(sachip->base + SA1111_SKID); if ((id & SKID_ID_MASK) != SKID_SA1111_ID) { printk(KERN_DEBUG "SA1111 not detected: ID = %08lx\n", id); ret = -ENODEV; goto err_unmap; } pr_info("SA1111 Microprocessor Companion Chip: silicon revision %lx, metal revision %lx\n", (id & SKID_SIREV_MASK) >> 4, id & SKID_MTREV_MASK); /* * We found it. Wake the chip up, and initialise. */ sa1111_wake(sachip); /* * The interrupt controller must be initialised before any * other device to ensure that the interrupts are available. */ ret = sa1111_setup_irq(sachip, pd->irq_base); if (ret) goto err_clk; /* Setup the GPIOs - should really be done after the IRQ setup */ ret = sa1111_setup_gpios(sachip); if (ret) goto err_irq; #ifdef CONFIG_ARCH_SA1100 { unsigned int val; /* * The SDRAM configuration of the SA1110 and the SA1111 must * match. This is very important to ensure that SA1111 accesses * don't corrupt the SDRAM. Note that this ungates the SA1111's * MBGNT signal, so we must have called sa1110_mb_disable() * beforehand. */ sa1111_configure_smc(sachip, 1, FExtr(MDCNFG, MDCNFG_SA1110_DRAC0), FExtr(MDCNFG, MDCNFG_SA1110_TDL0)); /* * We only need to turn on DCLK whenever we want to use the * DMA. It can otherwise be held firmly in the off position. * (currently, we always enable it.) */ val = readl_relaxed(sachip->base + SA1111_SKPCR); writel_relaxed(val | SKPCR_DCLKEN, sachip->base + SA1111_SKPCR); /* * Enable the SA1110 memory bus request and grant signals. */ sa1110_mb_enable(); } #endif g_sa1111 = sachip; has_devs = ~0; if (pd) has_devs &= ~pd->disable_devs; for (i = 0; i < ARRAY_SIZE(sa1111_devices); i++) if (sa1111_devices[i].devid & has_devs) sa1111_init_one_child(sachip, mem, &sa1111_devices[i]); return 0; err_irq: sa1111_remove_irq(sachip); err_clk: clk_disable(sachip->clk); err_unmap: iounmap(sachip->base); err_clk_unprep: clk_unprepare(sachip->clk); return ret; } static int sa1111_remove_one(struct device *dev, void *data) { struct sa1111_dev *sadev = to_sa1111_device(dev); if (dev->bus != &sa1111_bus_type) return 0; device_del(&sadev->dev); release_resource(&sadev->res); put_device(&sadev->dev); return 0; } static void __sa1111_remove(struct sa1111 *sachip) { device_for_each_child(sachip->dev, NULL, sa1111_remove_one); sa1111_remove_irq(sachip); clk_disable(sachip->clk); clk_unprepare(sachip->clk); iounmap(sachip->base); } struct sa1111_save_data { unsigned int skcr; unsigned int skpcr; unsigned int skcdr; unsigned char skaud; unsigned char skpwm0; unsigned char skpwm1; /* * Interrupt controller */ unsigned int intpol0; unsigned int intpol1; unsigned int inten0; unsigned int inten1; unsigned int wakepol0; unsigned int wakepol1; unsigned int wakeen0; unsigned int wakeen1; }; #ifdef CONFIG_PM static int sa1111_suspend_noirq(struct device *dev) { struct sa1111 *sachip = dev_get_drvdata(dev); struct sa1111_save_data *save; unsigned long flags; unsigned int val; void __iomem *base; save = kmalloc(sizeof(struct sa1111_save_data), GFP_KERNEL); if (!save) return -ENOMEM; sachip->saved_state = save; spin_lock_irqsave(&sachip->lock, flags); /* * Save state. */ base = sachip->base; save->skcr = readl_relaxed(base + SA1111_SKCR); save->skpcr = readl_relaxed(base + SA1111_SKPCR); save->skcdr = readl_relaxed(base + SA1111_SKCDR); save->skaud = readl_relaxed(base + SA1111_SKAUD); save->skpwm0 = readl_relaxed(base + SA1111_SKPWM0); save->skpwm1 = readl_relaxed(base + SA1111_SKPWM1); writel_relaxed(0, sachip->base + SA1111_SKPWM0); writel_relaxed(0, sachip->base + SA1111_SKPWM1); base = sachip->base + SA1111_INTC; save->intpol0 = readl_relaxed(base + SA1111_INTPOL0); save->intpol1 = readl_relaxed(base + SA1111_INTPOL1); save->inten0 = readl_relaxed(base + SA1111_INTEN0); save->inten1 = readl_relaxed(base + SA1111_INTEN1); save->wakepol0 = readl_relaxed(base + SA1111_WAKEPOL0); save->wakepol1 = readl_relaxed(base + SA1111_WAKEPOL1); save->wakeen0 = readl_relaxed(base + SA1111_WAKEEN0); save->wakeen1 = readl_relaxed(base + SA1111_WAKEEN1); /* * Disable. */ val = readl_relaxed(sachip->base + SA1111_SKCR); writel_relaxed(val | SKCR_SLEEP, sachip->base + SA1111_SKCR); clk_disable(sachip->clk); spin_unlock_irqrestore(&sachip->lock, flags); #ifdef CONFIG_ARCH_SA1100 sa1110_mb_disable(); #endif return 0; } /* * sa1111_resume - Restore the SA1111 device state. * @dev: device to restore * * Restore the general state of the SA1111; clock control and * interrupt controller. Other parts of the SA1111 must be * restored by their respective drivers, and must be called * via LDM after this function. */ static int sa1111_resume_noirq(struct device *dev) { struct sa1111 *sachip = dev_get_drvdata(dev); struct sa1111_save_data *save; unsigned long flags, id; void __iomem *base; save = sachip->saved_state; if (!save) return 0; /* * Ensure that the SA1111 is still here. * FIXME: shouldn't do this here. */ id = readl_relaxed(sachip->base + SA1111_SKID); if ((id & SKID_ID_MASK) != SKID_SA1111_ID) { __sa1111_remove(sachip); dev_set_drvdata(dev, NULL); kfree(save); return 0; } /* * First of all, wake up the chip. */ sa1111_wake(sachip); #ifdef CONFIG_ARCH_SA1100 /* Enable the memory bus request/grant signals */ sa1110_mb_enable(); #endif /* * Only lock for write ops. Also, sa1111_wake must be called with * released spinlock! */ spin_lock_irqsave(&sachip->lock, flags); writel_relaxed(0, sachip->base + SA1111_INTC + SA1111_INTEN0); writel_relaxed(0, sachip->base + SA1111_INTC + SA1111_INTEN1); base = sachip->base; writel_relaxed(save->skcr, base + SA1111_SKCR); writel_relaxed(save->skpcr, base + SA1111_SKPCR); writel_relaxed(save->skcdr, base + SA1111_SKCDR); writel_relaxed(save->skaud, base + SA1111_SKAUD); writel_relaxed(save->skpwm0, base + SA1111_SKPWM0); writel_relaxed(save->skpwm1, base + SA1111_SKPWM1); base = sachip->base + SA1111_INTC; writel_relaxed(save->intpol0, base + SA1111_INTPOL0); writel_relaxed(save->intpol1, base + SA1111_INTPOL1); writel_relaxed(save->inten0, base + SA1111_INTEN0); writel_relaxed(save->inten1, base + SA1111_INTEN1); writel_relaxed(save->wakepol0, base + SA1111_WAKEPOL0); writel_relaxed(save->wakepol1, base + SA1111_WAKEPOL1); writel_relaxed(save->wakeen0, base + SA1111_WAKEEN0); writel_relaxed(save->wakeen1, base + SA1111_WAKEEN1); spin_unlock_irqrestore(&sachip->lock, flags); sachip->saved_state = NULL; kfree(save); return 0; } #else #define sa1111_suspend_noirq NULL #define sa1111_resume_noirq NULL #endif static int sa1111_probe(struct platform_device *pdev) { struct resource *mem; int irq; mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!mem) return -EINVAL; irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; return __sa1111_probe(&pdev->dev, mem, irq); } static void sa1111_remove(struct platform_device *pdev) { struct sa1111 *sachip = platform_get_drvdata(pdev); if (sachip) { #ifdef CONFIG_PM kfree(sachip->saved_state); sachip->saved_state = NULL; #endif __sa1111_remove(sachip); platform_set_drvdata(pdev, NULL); } } static struct dev_pm_ops sa1111_pm_ops = { .suspend_noirq = sa1111_suspend_noirq, .resume_noirq = sa1111_resume_noirq, }; /* * Not sure if this should be on the system bus or not yet. * We really want some way to register a system device at * the per-machine level, and then have this driver pick * up the registered devices. * * We also need to handle the SDRAM configuration for * PXA250/SA1110 machine classes. */ static struct platform_driver sa1111_device_driver = { .probe = sa1111_probe, .remove_new = sa1111_remove, .driver = { .name = "sa1111", .pm = &sa1111_pm_ops, }, }; /* * Get the parent device driver (us) structure * from a child function device */ static inline struct sa1111 *sa1111_chip_driver(struct sa1111_dev *sadev) { return (struct sa1111 *)dev_get_drvdata(sadev->dev.parent); } /* * The bits in the opdiv field are non-linear. */ static unsigned char opdiv_table[] = { 1, 4, 2, 8 }; static unsigned int __sa1111_pll_clock(struct sa1111 *sachip) { unsigned int skcdr, fbdiv, ipdiv, opdiv; skcdr = readl_relaxed(sachip->base + SA1111_SKCDR); fbdiv = (skcdr & 0x007f) + 2; ipdiv = ((skcdr & 0x0f80) >> 7) + 2; opdiv = opdiv_table[(skcdr & 0x3000) >> 12]; return 3686400 * fbdiv / (ipdiv * opdiv); } /** * sa1111_pll_clock - return the current PLL clock frequency. * @sadev: SA1111 function block * * BUG: we should look at SKCR. We also blindly believe that * the chip is being fed with the 3.6864MHz clock. * * Returns the PLL clock in Hz. */ unsigned int sa1111_pll_clock(struct sa1111_dev *sadev) { struct sa1111 *sachip = sa1111_chip_driver(sadev); return __sa1111_pll_clock(sachip); } EXPORT_SYMBOL(sa1111_pll_clock); /** * sa1111_select_audio_mode - select I2S or AC link mode * @sadev: SA1111 function block * @mode: One of %SA1111_AUDIO_ACLINK or %SA1111_AUDIO_I2S * * Frob the SKCR to select AC Link mode or I2S mode for * the audio block. */ void sa1111_select_audio_mode(struct sa1111_dev *sadev, int mode) { struct sa1111 *sachip = sa1111_chip_driver(sadev); unsigned long flags; unsigned int val; spin_lock_irqsave(&sachip->lock, flags); val = readl_relaxed(sachip->base + SA1111_SKCR); if (mode == SA1111_AUDIO_I2S) { val &= ~SKCR_SELAC; } else { val |= SKCR_SELAC; } writel_relaxed(val, sachip->base + SA1111_SKCR); spin_unlock_irqrestore(&sachip->lock, flags); } EXPORT_SYMBOL(sa1111_select_audio_mode); /** * sa1111_set_audio_rate - set the audio sample rate * @sadev: SA1111 SAC function block * @rate: sample rate to select */ int sa1111_set_audio_rate(struct sa1111_dev *sadev, int rate) { struct sa1111 *sachip = sa1111_chip_driver(sadev); unsigned int div; if (sadev->devid != SA1111_DEVID_SAC) return -EINVAL; div = (__sa1111_pll_clock(sachip) / 256 + rate / 2) / rate; if (div == 0) div = 1; if (div > 128) div = 128; writel_relaxed(div - 1, sachip->base + SA1111_SKAUD); return 0; } EXPORT_SYMBOL(sa1111_set_audio_rate); /** * sa1111_get_audio_rate - get the audio sample rate * @sadev: SA1111 SAC function block device */ int sa1111_get_audio_rate(struct sa1111_dev *sadev) { struct sa1111 *sachip = sa1111_chip_driver(sadev); unsigned long div; if (sadev->devid != SA1111_DEVID_SAC) return -EINVAL; div = readl_relaxed(sachip->base + SA1111_SKAUD) + 1; return __sa1111_pll_clock(sachip) / (256 * div); } EXPORT_SYMBOL(sa1111_get_audio_rate); /* * Individual device operations. */ /** * sa1111_enable_device - enable an on-chip SA1111 function block * @sadev: SA1111 function block device to enable */ int sa1111_enable_device(struct sa1111_dev *sadev) { struct sa1111 *sachip = sa1111_chip_driver(sadev); unsigned long flags; unsigned int val; int ret = 0; if (sachip->pdata && sachip->pdata->enable) ret = sachip->pdata->enable(sachip->pdata->data, sadev->devid); if (ret == 0) { spin_lock_irqsave(&sachip->lock, flags); val = readl_relaxed(sachip->base + SA1111_SKPCR); writel_relaxed(val | sadev->skpcr_mask, sachip->base + SA1111_SKPCR); spin_unlock_irqrestore(&sachip->lock, flags); } return ret; } EXPORT_SYMBOL(sa1111_enable_device); /** * sa1111_disable_device - disable an on-chip SA1111 function block * @sadev: SA1111 function block device to disable */ void sa1111_disable_device(struct sa1111_dev *sadev) { struct sa1111 *sachip = sa1111_chip_driver(sadev); unsigned long flags; unsigned int val; spin_lock_irqsave(&sachip->lock, flags); val = readl_relaxed(sachip->base + SA1111_SKPCR); writel_relaxed(val & ~sadev->skpcr_mask, sachip->base + SA1111_SKPCR); spin_unlock_irqrestore(&sachip->lock, flags); if (sachip->pdata && sachip->pdata->disable) sachip->pdata->disable(sachip->pdata->data, sadev->devid); } EXPORT_SYMBOL(sa1111_disable_device); int sa1111_get_irq(struct sa1111_dev *sadev, unsigned num) { struct sa1111 *sachip = sa1111_chip_driver(sadev); if (num >= ARRAY_SIZE(sadev->hwirq)) return -EINVAL; return sa1111_map_irq(sachip, sadev->hwirq[num]); } EXPORT_SYMBOL_GPL(sa1111_get_irq); /* * SA1111 "Register Access Bus." * * We model this as a regular bus type, and hang devices directly * off this. */ static int sa1111_match(struct device *_dev, struct device_driver *_drv) { struct sa1111_dev *dev = to_sa1111_device(_dev); struct sa1111_driver *drv = SA1111_DRV(_drv); return !!(dev->devid & drv->devid); } static int sa1111_bus_probe(struct device *dev) { struct sa1111_dev *sadev = to_sa1111_device(dev); struct sa1111_driver *drv = SA1111_DRV(dev->driver); int ret = -ENODEV; if (drv->probe) ret = drv->probe(sadev); return ret; } static void sa1111_bus_remove(struct device *dev) { struct sa1111_dev *sadev = to_sa1111_device(dev); struct sa1111_driver *drv = SA1111_DRV(dev->driver); if (drv->remove) drv->remove(sadev); } struct bus_type sa1111_bus_type = { .name = "sa1111-rab", .match = sa1111_match, .probe = sa1111_bus_probe, .remove = sa1111_bus_remove, }; EXPORT_SYMBOL(sa1111_bus_type); int sa1111_driver_register(struct sa1111_driver *driver) { driver->drv.bus = &sa1111_bus_type; return driver_register(&driver->drv); } EXPORT_SYMBOL(sa1111_driver_register); void sa1111_driver_unregister(struct sa1111_driver *driver) { driver_unregister(&driver->drv); } EXPORT_SYMBOL(sa1111_driver_unregister); static int __init sa1111_init(void) { int ret = bus_register(&sa1111_bus_type); if (ret == 0) platform_driver_register(&sa1111_device_driver); return ret; } static void __exit sa1111_exit(void) { platform_driver_unregister(&sa1111_device_driver); bus_unregister(&sa1111_bus_type); } subsys_initcall(sa1111_init); module_exit(sa1111_exit); MODULE_DESCRIPTION("Intel Corporation SA1111 core driver"); MODULE_LICENSE("GPL");
linux-master
arch/arm/common/sa1111.c
// SPDX-License-Identifier: GPL-2.0-only /* * Hardware parameter area specific to Sharp SL series devices * * Copyright (c) 2005 Richard Purdie * * Based on Sharp's 2.4 kernel patches */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/string.h> #include <asm/mach/sharpsl_param.h> #include <asm/page.h> /* * Certain hardware parameters determined at the time of device manufacture, * typically including LCD parameters are loaded by the bootloader at the * address PARAM_BASE. As the kernel will overwrite them, we need to store * them early in the boot process, then pass them to the appropriate drivers. * Not all devices use all parameters but the format is common to all. */ #ifdef CONFIG_ARCH_SA1100 #define PARAM_BASE 0xe8ffc000 #define param_start(x) (void *)(x) #else #define PARAM_BASE 0xa0000a00 #define param_start(x) __va(x) #endif #define MAGIC_CHG(a,b,c,d) ( ( d << 24 ) | ( c << 16 ) | ( b << 8 ) | a ) #define COMADJ_MAGIC MAGIC_CHG('C','M','A','D') #define UUID_MAGIC MAGIC_CHG('U','U','I','D') #define TOUCH_MAGIC MAGIC_CHG('T','U','C','H') #define AD_MAGIC MAGIC_CHG('B','V','A','D') #define PHAD_MAGIC MAGIC_CHG('P','H','A','D') struct sharpsl_param_info sharpsl_param; EXPORT_SYMBOL(sharpsl_param); void sharpsl_save_param(void) { struct sharpsl_param_info *params = param_start(PARAM_BASE); memcpy(&sharpsl_param, params, sizeof(*params)); if (sharpsl_param.comadj_keyword != COMADJ_MAGIC) sharpsl_param.comadj=-1; if (sharpsl_param.phad_keyword != PHAD_MAGIC) sharpsl_param.phadadj=-1; if (sharpsl_param.uuid_keyword != UUID_MAGIC) sharpsl_param.uuid[0]=-1; if (sharpsl_param.touch_keyword != TOUCH_MAGIC) sharpsl_param.touch_xp=-1; if (sharpsl_param.adadj_keyword != AD_MAGIC) sharpsl_param.adadj=-1; }
linux-master
arch/arm/common/sharpsl_param.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/arch/arm/mach-vexpress/mcpm_platsmp.c * * Created by: Nicolas Pitre, November 2012 * Copyright: (C) 2012-2013 Linaro Limited * * Code to handle secondary CPU bringup and hotplug for the cluster power API. */ #include <linux/init.h> #include <linux/smp.h> #include <linux/spinlock.h> #include <asm/mcpm.h> #include <asm/smp.h> #include <asm/smp_plat.h> static void cpu_to_pcpu(unsigned int cpu, unsigned int *pcpu, unsigned int *pcluster) { unsigned int mpidr; mpidr = cpu_logical_map(cpu); *pcpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); *pcluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); } static int mcpm_boot_secondary(unsigned int cpu, struct task_struct *idle) { unsigned int pcpu, pcluster, ret; extern void secondary_startup(void); cpu_to_pcpu(cpu, &pcpu, &pcluster); pr_debug("%s: logical CPU %d is physical CPU %d cluster %d\n", __func__, cpu, pcpu, pcluster); mcpm_set_entry_vector(pcpu, pcluster, NULL); ret = mcpm_cpu_power_up(pcpu, pcluster); if (ret) return ret; mcpm_set_entry_vector(pcpu, pcluster, secondary_startup); arch_send_wakeup_ipi_mask(cpumask_of(cpu)); dsb_sev(); return 0; } static void mcpm_secondary_init(unsigned int cpu) { mcpm_cpu_powered_up(); } #ifdef CONFIG_HOTPLUG_CPU static int mcpm_cpu_kill(unsigned int cpu) { unsigned int pcpu, pcluster; cpu_to_pcpu(cpu, &pcpu, &pcluster); return !mcpm_wait_for_cpu_powerdown(pcpu, pcluster); } static bool mcpm_cpu_can_disable(unsigned int cpu) { /* We assume all CPUs may be shut down. */ return true; } static void mcpm_cpu_die(unsigned int cpu) { unsigned int mpidr, pcpu, pcluster; mpidr = read_cpuid_mpidr(); pcpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); pcluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); mcpm_set_entry_vector(pcpu, pcluster, NULL); mcpm_cpu_power_down(); } #endif static const struct smp_operations mcpm_smp_ops __initconst = { .smp_boot_secondary = mcpm_boot_secondary, .smp_secondary_init = mcpm_secondary_init, #ifdef CONFIG_HOTPLUG_CPU .cpu_kill = mcpm_cpu_kill, .cpu_can_disable = mcpm_cpu_can_disable, .cpu_die = mcpm_cpu_die, #endif }; void __init mcpm_smp_set_ops(void) { smp_set_ops(&mcpm_smp_ops); }
linux-master
arch/arm/common/mcpm_platsmp.c
// SPDX-License-Identifier: GPL-2.0-only /* * arch/arm/common/mcpm_entry.c -- entry point for multi-cluster PM * * Created by: Nicolas Pitre, March 2012 * Copyright: (C) 2012-2013 Linaro Limited */ #include <linux/export.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/irqflags.h> #include <linux/cpu_pm.h> #include <asm/mcpm.h> #include <asm/cacheflush.h> #include <asm/idmap.h> #include <asm/cputype.h> #include <asm/suspend.h> /* * The public API for this code is documented in arch/arm/include/asm/mcpm.h. * For a comprehensive description of the main algorithm used here, please * see Documentation/arch/arm/cluster-pm-race-avoidance.rst. */ struct sync_struct mcpm_sync; /* * __mcpm_cpu_going_down: Indicates that the cpu is being torn down. * This must be called at the point of committing to teardown of a CPU. * The CPU cache (SCTRL.C bit) is expected to still be active. */ static void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster) { mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_GOING_DOWN; sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu); } /* * __mcpm_cpu_down: Indicates that cpu teardown is complete and that the * cluster can be torn down without disrupting this CPU. * To avoid deadlocks, this must be called before a CPU is powered down. * The CPU cache (SCTRL.C bit) is expected to be off. * However L2 cache might or might not be active. */ static void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster) { dmb(); mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_DOWN; sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu); sev(); } /* * __mcpm_outbound_leave_critical: Leave the cluster teardown critical section. * @state: the final state of the cluster: * CLUSTER_UP: no destructive teardown was done and the cluster has been * restored to the previous state (CPU cache still active); or * CLUSTER_DOWN: the cluster has been torn-down, ready for power-off * (CPU cache disabled, L2 cache either enabled or disabled). */ static void __mcpm_outbound_leave_critical(unsigned int cluster, int state) { dmb(); mcpm_sync.clusters[cluster].cluster = state; sync_cache_w(&mcpm_sync.clusters[cluster].cluster); sev(); } /* * __mcpm_outbound_enter_critical: Enter the cluster teardown critical section. * This function should be called by the last man, after local CPU teardown * is complete. CPU cache expected to be active. * * Returns: * false: the critical section was not entered because an inbound CPU was * observed, or the cluster is already being set up; * true: the critical section was entered: it is now safe to tear down the * cluster. */ static bool __mcpm_outbound_enter_critical(unsigned int cpu, unsigned int cluster) { unsigned int i; struct mcpm_sync_struct *c = &mcpm_sync.clusters[cluster]; /* Warn inbound CPUs that the cluster is being torn down: */ c->cluster = CLUSTER_GOING_DOWN; sync_cache_w(&c->cluster); /* Back out if the inbound cluster is already in the critical region: */ sync_cache_r(&c->inbound); if (c->inbound == INBOUND_COMING_UP) goto abort; /* * Wait for all CPUs to get out of the GOING_DOWN state, so that local * teardown is complete on each CPU before tearing down the cluster. * * If any CPU has been woken up again from the DOWN state, then we * shouldn't be taking the cluster down at all: abort in that case. */ sync_cache_r(&c->cpus); for (i = 0; i < MAX_CPUS_PER_CLUSTER; i++) { int cpustate; if (i == cpu) continue; while (1) { cpustate = c->cpus[i].cpu; if (cpustate != CPU_GOING_DOWN) break; wfe(); sync_cache_r(&c->cpus[i].cpu); } switch (cpustate) { case CPU_DOWN: continue; default: goto abort; } } return true; abort: __mcpm_outbound_leave_critical(cluster, CLUSTER_UP); return false; } static int __mcpm_cluster_state(unsigned int cluster) { sync_cache_r(&mcpm_sync.clusters[cluster].cluster); return mcpm_sync.clusters[cluster].cluster; } extern unsigned long mcpm_entry_vectors[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER]; void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr) { unsigned long val = ptr ? __pa_symbol(ptr) : 0; mcpm_entry_vectors[cluster][cpu] = val; sync_cache_w(&mcpm_entry_vectors[cluster][cpu]); } extern unsigned long mcpm_entry_early_pokes[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER][2]; void mcpm_set_early_poke(unsigned cpu, unsigned cluster, unsigned long poke_phys_addr, unsigned long poke_val) { unsigned long *poke = &mcpm_entry_early_pokes[cluster][cpu][0]; poke[0] = poke_phys_addr; poke[1] = poke_val; __sync_cache_range_w(poke, 2 * sizeof(*poke)); } static const struct mcpm_platform_ops *platform_ops; int __init mcpm_platform_register(const struct mcpm_platform_ops *ops) { if (platform_ops) return -EBUSY; platform_ops = ops; return 0; } bool mcpm_is_available(void) { return (platform_ops) ? true : false; } EXPORT_SYMBOL_GPL(mcpm_is_available); /* * We can't use regular spinlocks. In the switcher case, it is possible * for an outbound CPU to call power_down() after its inbound counterpart * is already live using the same logical CPU number which trips lockdep * debugging. */ static arch_spinlock_t mcpm_lock = __ARCH_SPIN_LOCK_UNLOCKED; static int mcpm_cpu_use_count[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER]; static inline bool mcpm_cluster_unused(unsigned int cluster) { int i, cnt; for (i = 0, cnt = 0; i < MAX_CPUS_PER_CLUSTER; i++) cnt |= mcpm_cpu_use_count[cluster][i]; return !cnt; } int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster) { bool cpu_is_down, cluster_is_down; int ret = 0; pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); if (!platform_ops) return -EUNATCH; /* try not to shadow power_up errors */ might_sleep(); /* * Since this is called with IRQs enabled, and no arch_spin_lock_irq * variant exists, we need to disable IRQs manually here. */ local_irq_disable(); arch_spin_lock(&mcpm_lock); cpu_is_down = !mcpm_cpu_use_count[cluster][cpu]; cluster_is_down = mcpm_cluster_unused(cluster); mcpm_cpu_use_count[cluster][cpu]++; /* * The only possible values are: * 0 = CPU down * 1 = CPU (still) up * 2 = CPU requested to be up before it had a chance * to actually make itself down. * Any other value is a bug. */ BUG_ON(mcpm_cpu_use_count[cluster][cpu] != 1 && mcpm_cpu_use_count[cluster][cpu] != 2); if (cluster_is_down) ret = platform_ops->cluster_powerup(cluster); if (cpu_is_down && !ret) ret = platform_ops->cpu_powerup(cpu, cluster); arch_spin_unlock(&mcpm_lock); local_irq_enable(); return ret; } typedef typeof(cpu_reset) phys_reset_t; void mcpm_cpu_power_down(void) { unsigned int mpidr, cpu, cluster; bool cpu_going_down, last_man; phys_reset_t phys_reset; mpidr = read_cpuid_mpidr(); cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); if (WARN_ON_ONCE(!platform_ops)) return; BUG_ON(!irqs_disabled()); setup_mm_for_reboot(); __mcpm_cpu_going_down(cpu, cluster); arch_spin_lock(&mcpm_lock); BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP); mcpm_cpu_use_count[cluster][cpu]--; BUG_ON(mcpm_cpu_use_count[cluster][cpu] != 0 && mcpm_cpu_use_count[cluster][cpu] != 1); cpu_going_down = !mcpm_cpu_use_count[cluster][cpu]; last_man = mcpm_cluster_unused(cluster); if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) { platform_ops->cpu_powerdown_prepare(cpu, cluster); platform_ops->cluster_powerdown_prepare(cluster); arch_spin_unlock(&mcpm_lock); platform_ops->cluster_cache_disable(); __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN); } else { if (cpu_going_down) platform_ops->cpu_powerdown_prepare(cpu, cluster); arch_spin_unlock(&mcpm_lock); /* * If cpu_going_down is false here, that means a power_up * request raced ahead of us. Even if we do not want to * shut this CPU down, the caller still expects execution * to return through the system resume entry path, like * when the WFI is aborted due to a new IRQ or the like.. * So let's continue with cache cleaning in all cases. */ platform_ops->cpu_cache_disable(); } __mcpm_cpu_down(cpu, cluster); /* Now we are prepared for power-down, do it: */ if (cpu_going_down) wfi(); /* * It is possible for a power_up request to happen concurrently * with a power_down request for the same CPU. In this case the * CPU might not be able to actually enter a powered down state * with the WFI instruction if the power_up request has removed * the required reset condition. We must perform a re-entry in * the kernel as if the power_up method just had deasserted reset * on the CPU. */ phys_reset = (phys_reset_t)(unsigned long)__pa_symbol(cpu_reset); phys_reset(__pa_symbol(mcpm_entry_point), false); /* should never get here */ BUG(); } int mcpm_wait_for_cpu_powerdown(unsigned int cpu, unsigned int cluster) { int ret; if (WARN_ON_ONCE(!platform_ops || !platform_ops->wait_for_powerdown)) return -EUNATCH; ret = platform_ops->wait_for_powerdown(cpu, cluster); if (ret) pr_warn("%s: cpu %u, cluster %u failed to power down (%d)\n", __func__, cpu, cluster, ret); return ret; } void mcpm_cpu_suspend(void) { if (WARN_ON_ONCE(!platform_ops)) return; /* Some platforms might have to enable special resume modes, etc. */ if (platform_ops->cpu_suspend_prepare) { unsigned int mpidr = read_cpuid_mpidr(); unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); arch_spin_lock(&mcpm_lock); platform_ops->cpu_suspend_prepare(cpu, cluster); arch_spin_unlock(&mcpm_lock); } mcpm_cpu_power_down(); } int mcpm_cpu_powered_up(void) { unsigned int mpidr, cpu, cluster; bool cpu_was_down, first_man; unsigned long flags; if (!platform_ops) return -EUNATCH; mpidr = read_cpuid_mpidr(); cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); local_irq_save(flags); arch_spin_lock(&mcpm_lock); cpu_was_down = !mcpm_cpu_use_count[cluster][cpu]; first_man = mcpm_cluster_unused(cluster); if (first_man && platform_ops->cluster_is_up) platform_ops->cluster_is_up(cluster); if (cpu_was_down) mcpm_cpu_use_count[cluster][cpu] = 1; if (platform_ops->cpu_is_up) platform_ops->cpu_is_up(cpu, cluster); arch_spin_unlock(&mcpm_lock); local_irq_restore(flags); return 0; } #ifdef CONFIG_ARM_CPU_SUSPEND static int __init nocache_trampoline(unsigned long _arg) { void (*cache_disable)(void) = (void *)_arg; unsigned int mpidr = read_cpuid_mpidr(); unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); phys_reset_t phys_reset; mcpm_set_entry_vector(cpu, cluster, cpu_resume_no_hyp); setup_mm_for_reboot(); __mcpm_cpu_going_down(cpu, cluster); BUG_ON(!__mcpm_outbound_enter_critical(cpu, cluster)); cache_disable(); __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN); __mcpm_cpu_down(cpu, cluster); phys_reset = (phys_reset_t)(unsigned long)__pa_symbol(cpu_reset); phys_reset(__pa_symbol(mcpm_entry_point), false); BUG(); } int __init mcpm_loopback(void (*cache_disable)(void)) { int ret; /* * We're going to soft-restart the current CPU through the * low-level MCPM code by leveraging the suspend/resume * infrastructure. Let's play it safe by using cpu_pm_enter() * in case the CPU init code path resets the VFP or similar. */ local_irq_disable(); local_fiq_disable(); ret = cpu_pm_enter(); if (!ret) { ret = cpu_suspend((unsigned long)cache_disable, nocache_trampoline); cpu_pm_exit(); } local_fiq_enable(); local_irq_enable(); if (ret) pr_err("%s returned %d\n", __func__, ret); return ret; } #endif extern unsigned long mcpm_power_up_setup_phys; int __init mcpm_sync_init( void (*power_up_setup)(unsigned int affinity_level)) { unsigned int i, j, mpidr, this_cluster; BUILD_BUG_ON(MCPM_SYNC_CLUSTER_SIZE * MAX_NR_CLUSTERS != sizeof mcpm_sync); BUG_ON((unsigned long)&mcpm_sync & (__CACHE_WRITEBACK_GRANULE - 1)); /* * Set initial CPU and cluster states. * Only one cluster is assumed to be active at this point. */ for (i = 0; i < MAX_NR_CLUSTERS; i++) { mcpm_sync.clusters[i].cluster = CLUSTER_DOWN; mcpm_sync.clusters[i].inbound = INBOUND_NOT_COMING_UP; for (j = 0; j < MAX_CPUS_PER_CLUSTER; j++) mcpm_sync.clusters[i].cpus[j].cpu = CPU_DOWN; } mpidr = read_cpuid_mpidr(); this_cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); for_each_online_cpu(i) { mcpm_cpu_use_count[this_cluster][i] = 1; mcpm_sync.clusters[this_cluster].cpus[i].cpu = CPU_UP; } mcpm_sync.clusters[this_cluster].cluster = CLUSTER_UP; sync_cache_w(&mcpm_sync); if (power_up_setup) { mcpm_power_up_setup_phys = __pa_symbol(power_up_setup); sync_cache_w(&mcpm_power_up_setup_phys); } return 0; }
linux-master
arch/arm/common/mcpm_entry.c
// SPDX-License-Identifier: GPL-2.0-only /* * arch/arm/common/bL_switcher.c -- big.LITTLE cluster switcher core driver * * Created by: Nicolas Pitre, March 2012 * Copyright: (C) 2012-2013 Linaro Limited */ #include <linux/atomic.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/sched/signal.h> #include <uapi/linux/sched/types.h> #include <linux/interrupt.h> #include <linux/cpu_pm.h> #include <linux/cpu.h> #include <linux/cpumask.h> #include <linux/kthread.h> #include <linux/wait.h> #include <linux/time.h> #include <linux/clockchips.h> #include <linux/hrtimer.h> #include <linux/tick.h> #include <linux/notifier.h> #include <linux/mm.h> #include <linux/mutex.h> #include <linux/smp.h> #include <linux/spinlock.h> #include <linux/string.h> #include <linux/sysfs.h> #include <linux/irqchip/arm-gic.h> #include <linux/moduleparam.h> #include <asm/smp_plat.h> #include <asm/cputype.h> #include <asm/suspend.h> #include <asm/mcpm.h> #include <asm/bL_switcher.h> #define CREATE_TRACE_POINTS #include <trace/events/power_cpu_migrate.h> /* * Use our own MPIDR accessors as the generic ones in asm/cputype.h have * __attribute_const__ and we don't want the compiler to assume any * constness here as the value _does_ change along some code paths. */ static int read_mpidr(void) { unsigned int id; asm volatile ("mrc p15, 0, %0, c0, c0, 5" : "=r" (id)); return id & MPIDR_HWID_BITMASK; } /* * bL switcher core code. */ static void bL_do_switch(void *_arg) { unsigned ib_mpidr, ib_cpu, ib_cluster; long volatile handshake, **handshake_ptr = _arg; pr_debug("%s\n", __func__); ib_mpidr = cpu_logical_map(smp_processor_id()); ib_cpu = MPIDR_AFFINITY_LEVEL(ib_mpidr, 0); ib_cluster = MPIDR_AFFINITY_LEVEL(ib_mpidr, 1); /* Advertise our handshake location */ if (handshake_ptr) { handshake = 0; *handshake_ptr = &handshake; } else handshake = -1; /* * Our state has been saved at this point. Let's release our * inbound CPU. */ mcpm_set_entry_vector(ib_cpu, ib_cluster, cpu_resume); sev(); /* * From this point, we must assume that our counterpart CPU might * have taken over in its parallel world already, as if execution * just returned from cpu_suspend(). It is therefore important to * be very careful not to make any change the other guy is not * expecting. This is why we need stack isolation. * * Fancy under cover tasks could be performed here. For now * we have none. */ /* * Let's wait until our inbound is alive. */ while (!handshake) { wfe(); smp_mb(); } /* Let's put ourself down. */ mcpm_cpu_power_down(); /* should never get here */ BUG(); } /* * Stack isolation. To ensure 'current' remains valid, we just use another * piece of our thread's stack space which should be fairly lightly used. * The selected area starts just above the thread_info structure located * at the very bottom of the stack, aligned to a cache line, and indexed * with the cluster number. */ #define STACK_SIZE 512 extern void call_with_stack(void (*fn)(void *), void *arg, void *sp); static int bL_switchpoint(unsigned long _arg) { unsigned int mpidr = read_mpidr(); unsigned int clusterid = MPIDR_AFFINITY_LEVEL(mpidr, 1); void *stack = current_thread_info() + 1; stack = PTR_ALIGN(stack, L1_CACHE_BYTES); stack += clusterid * STACK_SIZE + STACK_SIZE; call_with_stack(bL_do_switch, (void *)_arg, stack); BUG(); } /* * Generic switcher interface */ static unsigned int bL_gic_id[MAX_CPUS_PER_CLUSTER][MAX_NR_CLUSTERS]; static int bL_switcher_cpu_pairing[NR_CPUS]; /* * bL_switch_to - Switch to a specific cluster for the current CPU * @new_cluster_id: the ID of the cluster to switch to. * * This function must be called on the CPU to be switched. * Returns 0 on success, else a negative status code. */ static int bL_switch_to(unsigned int new_cluster_id) { unsigned int mpidr, this_cpu, that_cpu; unsigned int ob_mpidr, ob_cpu, ob_cluster, ib_mpidr, ib_cpu, ib_cluster; struct completion inbound_alive; long volatile *handshake_ptr; int ipi_nr, ret; this_cpu = smp_processor_id(); ob_mpidr = read_mpidr(); ob_cpu = MPIDR_AFFINITY_LEVEL(ob_mpidr, 0); ob_cluster = MPIDR_AFFINITY_LEVEL(ob_mpidr, 1); BUG_ON(cpu_logical_map(this_cpu) != ob_mpidr); if (new_cluster_id == ob_cluster) return 0; that_cpu = bL_switcher_cpu_pairing[this_cpu]; ib_mpidr = cpu_logical_map(that_cpu); ib_cpu = MPIDR_AFFINITY_LEVEL(ib_mpidr, 0); ib_cluster = MPIDR_AFFINITY_LEVEL(ib_mpidr, 1); pr_debug("before switch: CPU %d MPIDR %#x -> %#x\n", this_cpu, ob_mpidr, ib_mpidr); this_cpu = smp_processor_id(); /* Close the gate for our entry vectors */ mcpm_set_entry_vector(ob_cpu, ob_cluster, NULL); mcpm_set_entry_vector(ib_cpu, ib_cluster, NULL); /* Install our "inbound alive" notifier. */ init_completion(&inbound_alive); ipi_nr = register_ipi_completion(&inbound_alive, this_cpu); ipi_nr |= ((1 << 16) << bL_gic_id[ob_cpu][ob_cluster]); mcpm_set_early_poke(ib_cpu, ib_cluster, gic_get_sgir_physaddr(), ipi_nr); /* * Let's wake up the inbound CPU now in case it requires some delay * to come online, but leave it gated in our entry vector code. */ ret = mcpm_cpu_power_up(ib_cpu, ib_cluster); if (ret) { pr_err("%s: mcpm_cpu_power_up() returned %d\n", __func__, ret); return ret; } /* * Raise a SGI on the inbound CPU to make sure it doesn't stall * in a possible WFI, such as in bL_power_down(). */ gic_send_sgi(bL_gic_id[ib_cpu][ib_cluster], 0); /* * Wait for the inbound to come up. This allows for other * tasks to be scheduled in the mean time. */ wait_for_completion(&inbound_alive); mcpm_set_early_poke(ib_cpu, ib_cluster, 0, 0); /* * From this point we are entering the switch critical zone * and can't take any interrupts anymore. */ local_irq_disable(); local_fiq_disable(); trace_cpu_migrate_begin(ktime_get_real_ns(), ob_mpidr); /* redirect GIC's SGIs to our counterpart */ gic_migrate_target(bL_gic_id[ib_cpu][ib_cluster]); tick_suspend_local(); ret = cpu_pm_enter(); /* we can not tolerate errors at this point */ if (ret) panic("%s: cpu_pm_enter() returned %d\n", __func__, ret); /* Swap the physical CPUs in the logical map for this logical CPU. */ cpu_logical_map(this_cpu) = ib_mpidr; cpu_logical_map(that_cpu) = ob_mpidr; /* Let's do the actual CPU switch. */ ret = cpu_suspend((unsigned long)&handshake_ptr, bL_switchpoint); if (ret > 0) panic("%s: cpu_suspend() returned %d\n", __func__, ret); /* We are executing on the inbound CPU at this point */ mpidr = read_mpidr(); pr_debug("after switch: CPU %d MPIDR %#x\n", this_cpu, mpidr); BUG_ON(mpidr != ib_mpidr); mcpm_cpu_powered_up(); ret = cpu_pm_exit(); tick_resume_local(); trace_cpu_migrate_finish(ktime_get_real_ns(), ib_mpidr); local_fiq_enable(); local_irq_enable(); *handshake_ptr = 1; dsb_sev(); if (ret) pr_err("%s exiting with error %d\n", __func__, ret); return ret; } struct bL_thread { spinlock_t lock; struct task_struct *task; wait_queue_head_t wq; int wanted_cluster; struct completion started; bL_switch_completion_handler completer; void *completer_cookie; }; static struct bL_thread bL_threads[NR_CPUS]; static int bL_switcher_thread(void *arg) { struct bL_thread *t = arg; int cluster; bL_switch_completion_handler completer; void *completer_cookie; sched_set_fifo_low(current); complete(&t->started); do { if (signal_pending(current)) flush_signals(current); wait_event_interruptible(t->wq, t->wanted_cluster != -1 || kthread_should_stop()); spin_lock(&t->lock); cluster = t->wanted_cluster; completer = t->completer; completer_cookie = t->completer_cookie; t->wanted_cluster = -1; t->completer = NULL; spin_unlock(&t->lock); if (cluster != -1) { bL_switch_to(cluster); if (completer) completer(completer_cookie); } } while (!kthread_should_stop()); return 0; } static struct task_struct *bL_switcher_thread_create(int cpu, void *arg) { struct task_struct *task; task = kthread_create_on_node(bL_switcher_thread, arg, cpu_to_node(cpu), "kswitcher_%d", cpu); if (!IS_ERR(task)) { kthread_bind(task, cpu); wake_up_process(task); } else pr_err("%s failed for CPU %d\n", __func__, cpu); return task; } /* * bL_switch_request_cb - Switch to a specific cluster for the given CPU, * with completion notification via a callback * * @cpu: the CPU to switch * @new_cluster_id: the ID of the cluster to switch to. * @completer: switch completion callback. if non-NULL, * @completer(@completer_cookie) will be called on completion of * the switch, in non-atomic context. * @completer_cookie: opaque context argument for @completer. * * This function causes a cluster switch on the given CPU by waking up * the appropriate switcher thread. This function may or may not return * before the switch has occurred. * * If a @completer callback function is supplied, it will be called when * the switch is complete. This can be used to determine asynchronously * when the switch is complete, regardless of when bL_switch_request() * returns. When @completer is supplied, no new switch request is permitted * for the affected CPU until after the switch is complete, and @completer * has returned. */ int bL_switch_request_cb(unsigned int cpu, unsigned int new_cluster_id, bL_switch_completion_handler completer, void *completer_cookie) { struct bL_thread *t; if (cpu >= ARRAY_SIZE(bL_threads)) { pr_err("%s: cpu %d out of bounds\n", __func__, cpu); return -EINVAL; } t = &bL_threads[cpu]; if (IS_ERR(t->task)) return PTR_ERR(t->task); if (!t->task) return -ESRCH; spin_lock(&t->lock); if (t->completer) { spin_unlock(&t->lock); return -EBUSY; } t->completer = completer; t->completer_cookie = completer_cookie; t->wanted_cluster = new_cluster_id; spin_unlock(&t->lock); wake_up(&t->wq); return 0; } EXPORT_SYMBOL_GPL(bL_switch_request_cb); /* * Activation and configuration code. */ static DEFINE_MUTEX(bL_switcher_activation_lock); static BLOCKING_NOTIFIER_HEAD(bL_activation_notifier); static unsigned int bL_switcher_active; static unsigned int bL_switcher_cpu_original_cluster[NR_CPUS]; static cpumask_t bL_switcher_removed_logical_cpus; int bL_switcher_register_notifier(struct notifier_block *nb) { return blocking_notifier_chain_register(&bL_activation_notifier, nb); } EXPORT_SYMBOL_GPL(bL_switcher_register_notifier); int bL_switcher_unregister_notifier(struct notifier_block *nb) { return blocking_notifier_chain_unregister(&bL_activation_notifier, nb); } EXPORT_SYMBOL_GPL(bL_switcher_unregister_notifier); static int bL_activation_notify(unsigned long val) { int ret; ret = blocking_notifier_call_chain(&bL_activation_notifier, val, NULL); if (ret & NOTIFY_STOP_MASK) pr_err("%s: notifier chain failed with status 0x%x\n", __func__, ret); return notifier_to_errno(ret); } static void bL_switcher_restore_cpus(void) { int i; for_each_cpu(i, &bL_switcher_removed_logical_cpus) { struct device *cpu_dev = get_cpu_device(i); int ret = device_online(cpu_dev); if (ret) dev_err(cpu_dev, "switcher: unable to restore CPU\n"); } } static int bL_switcher_halve_cpus(void) { int i, j, cluster_0, gic_id, ret; unsigned int cpu, cluster, mask; cpumask_t available_cpus; /* First pass to validate what we have */ mask = 0; for_each_online_cpu(i) { cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0); cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1); if (cluster >= 2) { pr_err("%s: only dual cluster systems are supported\n", __func__); return -EINVAL; } if (WARN_ON(cpu >= MAX_CPUS_PER_CLUSTER)) return -EINVAL; mask |= (1 << cluster); } if (mask != 3) { pr_err("%s: no CPU pairing possible\n", __func__); return -EINVAL; } /* * Now let's do the pairing. We match each CPU with another CPU * from a different cluster. To get a uniform scheduling behavior * without fiddling with CPU topology and compute capacity data, * we'll use logical CPUs initially belonging to the same cluster. */ memset(bL_switcher_cpu_pairing, -1, sizeof(bL_switcher_cpu_pairing)); cpumask_copy(&available_cpus, cpu_online_mask); cluster_0 = -1; for_each_cpu(i, &available_cpus) { int match = -1; cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1); if (cluster_0 == -1) cluster_0 = cluster; if (cluster != cluster_0) continue; cpumask_clear_cpu(i, &available_cpus); for_each_cpu(j, &available_cpus) { cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(j), 1); /* * Let's remember the last match to create "odd" * pairings on purpose in order for other code not * to assume any relation between physical and * logical CPU numbers. */ if (cluster != cluster_0) match = j; } if (match != -1) { bL_switcher_cpu_pairing[i] = match; cpumask_clear_cpu(match, &available_cpus); pr_info("CPU%d paired with CPU%d\n", i, match); } } /* * Now we disable the unwanted CPUs i.e. everything that has no * pairing information (that includes the pairing counterparts). */ cpumask_clear(&bL_switcher_removed_logical_cpus); for_each_online_cpu(i) { cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0); cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1); /* Let's take note of the GIC ID for this CPU */ gic_id = gic_get_cpu_id(i); if (gic_id < 0) { pr_err("%s: bad GIC ID for CPU %d\n", __func__, i); bL_switcher_restore_cpus(); return -EINVAL; } bL_gic_id[cpu][cluster] = gic_id; pr_info("GIC ID for CPU %u cluster %u is %u\n", cpu, cluster, gic_id); if (bL_switcher_cpu_pairing[i] != -1) { bL_switcher_cpu_original_cluster[i] = cluster; continue; } ret = device_offline(get_cpu_device(i)); if (ret) { bL_switcher_restore_cpus(); return ret; } cpumask_set_cpu(i, &bL_switcher_removed_logical_cpus); } return 0; } /* Determine the logical CPU a given physical CPU is grouped on. */ int bL_switcher_get_logical_index(u32 mpidr) { int cpu; if (!bL_switcher_active) return -EUNATCH; mpidr &= MPIDR_HWID_BITMASK; for_each_online_cpu(cpu) { int pairing = bL_switcher_cpu_pairing[cpu]; if (pairing == -1) continue; if ((mpidr == cpu_logical_map(cpu)) || (mpidr == cpu_logical_map(pairing))) return cpu; } return -EINVAL; } static void bL_switcher_trace_trigger_cpu(void *__always_unused info) { trace_cpu_migrate_current(ktime_get_real_ns(), read_mpidr()); } int bL_switcher_trace_trigger(void) { preempt_disable(); bL_switcher_trace_trigger_cpu(NULL); smp_call_function(bL_switcher_trace_trigger_cpu, NULL, true); preempt_enable(); return 0; } EXPORT_SYMBOL_GPL(bL_switcher_trace_trigger); static int bL_switcher_enable(void) { int cpu, ret; mutex_lock(&bL_switcher_activation_lock); lock_device_hotplug(); if (bL_switcher_active) { unlock_device_hotplug(); mutex_unlock(&bL_switcher_activation_lock); return 0; } pr_info("big.LITTLE switcher initializing\n"); ret = bL_activation_notify(BL_NOTIFY_PRE_ENABLE); if (ret) goto error; ret = bL_switcher_halve_cpus(); if (ret) goto error; bL_switcher_trace_trigger(); for_each_online_cpu(cpu) { struct bL_thread *t = &bL_threads[cpu]; spin_lock_init(&t->lock); init_waitqueue_head(&t->wq); init_completion(&t->started); t->wanted_cluster = -1; t->task = bL_switcher_thread_create(cpu, t); } bL_switcher_active = 1; bL_activation_notify(BL_NOTIFY_POST_ENABLE); pr_info("big.LITTLE switcher initialized\n"); goto out; error: pr_warn("big.LITTLE switcher initialization failed\n"); bL_activation_notify(BL_NOTIFY_POST_DISABLE); out: unlock_device_hotplug(); mutex_unlock(&bL_switcher_activation_lock); return ret; } #ifdef CONFIG_SYSFS static void bL_switcher_disable(void) { unsigned int cpu, cluster; struct bL_thread *t; struct task_struct *task; mutex_lock(&bL_switcher_activation_lock); lock_device_hotplug(); if (!bL_switcher_active) goto out; if (bL_activation_notify(BL_NOTIFY_PRE_DISABLE) != 0) { bL_activation_notify(BL_NOTIFY_POST_ENABLE); goto out; } bL_switcher_active = 0; /* * To deactivate the switcher, we must shut down the switcher * threads to prevent any other requests from being accepted. * Then, if the final cluster for given logical CPU is not the * same as the original one, we'll recreate a switcher thread * just for the purpose of switching the CPU back without any * possibility for interference from external requests. */ for_each_online_cpu(cpu) { t = &bL_threads[cpu]; task = t->task; t->task = NULL; if (!task || IS_ERR(task)) continue; kthread_stop(task); /* no more switch may happen on this CPU at this point */ cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1); if (cluster == bL_switcher_cpu_original_cluster[cpu]) continue; init_completion(&t->started); t->wanted_cluster = bL_switcher_cpu_original_cluster[cpu]; task = bL_switcher_thread_create(cpu, t); if (!IS_ERR(task)) { wait_for_completion(&t->started); kthread_stop(task); cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1); if (cluster == bL_switcher_cpu_original_cluster[cpu]) continue; } /* If execution gets here, we're in trouble. */ pr_crit("%s: unable to restore original cluster for CPU %d\n", __func__, cpu); pr_crit("%s: CPU %d can't be restored\n", __func__, bL_switcher_cpu_pairing[cpu]); cpumask_clear_cpu(bL_switcher_cpu_pairing[cpu], &bL_switcher_removed_logical_cpus); } bL_switcher_restore_cpus(); bL_switcher_trace_trigger(); bL_activation_notify(BL_NOTIFY_POST_DISABLE); out: unlock_device_hotplug(); mutex_unlock(&bL_switcher_activation_lock); } static ssize_t bL_switcher_active_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%u\n", bL_switcher_active); } static ssize_t bL_switcher_active_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { int ret; switch (buf[0]) { case '0': bL_switcher_disable(); ret = 0; break; case '1': ret = bL_switcher_enable(); break; default: ret = -EINVAL; } return (ret >= 0) ? count : ret; } static ssize_t bL_switcher_trace_trigger_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { int ret = bL_switcher_trace_trigger(); return ret ? ret : count; } static struct kobj_attribute bL_switcher_active_attr = __ATTR(active, 0644, bL_switcher_active_show, bL_switcher_active_store); static struct kobj_attribute bL_switcher_trace_trigger_attr = __ATTR(trace_trigger, 0200, NULL, bL_switcher_trace_trigger_store); static struct attribute *bL_switcher_attrs[] = { &bL_switcher_active_attr.attr, &bL_switcher_trace_trigger_attr.attr, NULL, }; static struct attribute_group bL_switcher_attr_group = { .attrs = bL_switcher_attrs, }; static struct kobject *bL_switcher_kobj; static int __init bL_switcher_sysfs_init(void) { int ret; bL_switcher_kobj = kobject_create_and_add("bL_switcher", kernel_kobj); if (!bL_switcher_kobj) return -ENOMEM; ret = sysfs_create_group(bL_switcher_kobj, &bL_switcher_attr_group); if (ret) kobject_put(bL_switcher_kobj); return ret; } #endif /* CONFIG_SYSFS */ bool bL_switcher_get_enabled(void) { mutex_lock(&bL_switcher_activation_lock); return bL_switcher_active; } EXPORT_SYMBOL_GPL(bL_switcher_get_enabled); void bL_switcher_put_enabled(void) { mutex_unlock(&bL_switcher_activation_lock); } EXPORT_SYMBOL_GPL(bL_switcher_put_enabled); /* * Veto any CPU hotplug operation on those CPUs we've removed * while the switcher is active. * We're just not ready to deal with that given the trickery involved. */ static int bL_switcher_cpu_pre(unsigned int cpu) { int pairing; if (!bL_switcher_active) return 0; pairing = bL_switcher_cpu_pairing[cpu]; if (pairing == -1) return -EINVAL; return 0; } static bool no_bL_switcher; core_param(no_bL_switcher, no_bL_switcher, bool, 0644); static int __init bL_switcher_init(void) { int ret; if (!mcpm_is_available()) return -ENODEV; cpuhp_setup_state_nocalls(CPUHP_ARM_BL_PREPARE, "arm/bl:prepare", bL_switcher_cpu_pre, NULL); ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "arm/bl:predown", NULL, bL_switcher_cpu_pre); if (ret < 0) { cpuhp_remove_state_nocalls(CPUHP_ARM_BL_PREPARE); pr_err("bL_switcher: Failed to allocate a hotplug state\n"); return ret; } if (!no_bL_switcher) { ret = bL_switcher_enable(); if (ret) return ret; } #ifdef CONFIG_SYSFS ret = bL_switcher_sysfs_init(); if (ret) pr_err("%s: unable to create sysfs entry\n", __func__); #endif return 0; } late_initcall(bL_switcher_init);
linux-master
arch/arm/common/bL_switcher.c