python_code
stringlengths
0
1.8M
repo_name
stringclasses
7 values
file_path
stringlengths
5
99
// SPDX-License-Identifier: GPL-2.0 #include <linux/err.h> #include <linux/module.h> #include <linux/io.h> #include <linux/of.h> #include <linux/of_address.h> #include "hardware.h" #include "common.h" unsigned int __mxc_cpu_type; static unsigned int imx_soc_revision; void mxc_set_cpu_type(unsigned int type) { __mxc_cpu_type = type; } void imx_set_soc_revision(unsigned int rev) { imx_soc_revision = rev; } unsigned int imx_get_soc_revision(void) { return imx_soc_revision; } void imx_print_silicon_rev(const char *cpu, int srev) { if (srev == IMX_CHIP_REVISION_UNKNOWN) pr_info("CPU identified as %s, unknown revision\n", cpu); else pr_info("CPU identified as %s, silicon rev %d.%d\n", cpu, (srev >> 4) & 0xf, srev & 0xf); } void __init imx_set_aips(void __iomem *base) { unsigned int reg; /* * Set all MPROTx to be non-bufferable, trusted for R/W, * not forced to user-mode. */ imx_writel(0x77777777, base + 0x0); imx_writel(0x77777777, base + 0x4); /* * Set all OPACRx to be non-bufferable, to not require * supervisor privilege level for access, allow for * write access and untrusted master access. */ imx_writel(0x0, base + 0x40); imx_writel(0x0, base + 0x44); imx_writel(0x0, base + 0x48); imx_writel(0x0, base + 0x4C); reg = imx_readl(base + 0x50) & 0x00FFFFFF; imx_writel(reg, base + 0x50); } void __init imx_aips_allow_unprivileged_access( const char *compat) { void __iomem *aips_base_addr; struct device_node *np; for_each_compatible_node(np, NULL, compat) { aips_base_addr = of_iomap(np, 0); WARN_ON(!aips_base_addr); imx_set_aips(aips_base_addr); } }
linux-master
arch/arm/mach-imx/cpu.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2011 Freescale Semiconductor, Inc. All Rights Reserved. */ #include <linux/suspend.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/err.h> #include <linux/export.h> #include <linux/genalloc.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_platform.h> #include <linux/platform_device.h> #include <asm/cacheflush.h> #include <asm/fncpy.h> #include <asm/system_misc.h> #include <asm/tlbflush.h> #include "common.h" #include "cpuidle.h" #include "hardware.h" #define MXC_CCM_CLPCR 0x54 #define MXC_CCM_CLPCR_LPM_OFFSET 0 #define MXC_CCM_CLPCR_LPM_MASK 0x3 #define MXC_CCM_CLPCR_STBY_COUNT_OFFSET 9 #define MXC_CCM_CLPCR_VSTBY (0x1 << 8) #define MXC_CCM_CLPCR_SBYOS (0x1 << 6) #define MXC_CORTEXA8_PLAT_LPC 0xc #define MXC_CORTEXA8_PLAT_LPC_DSM (1 << 0) #define MXC_CORTEXA8_PLAT_LPC_DBG_DSM (1 << 1) #define MXC_SRPG_NEON_SRPGCR 0x280 #define MXC_SRPG_ARM_SRPGCR 0x2a0 #define MXC_SRPG_EMPGC0_SRPGCR 0x2c0 #define MXC_SRPG_EMPGC1_SRPGCR 0x2d0 #define MXC_SRPGCR_PCR 1 /* * The WAIT_UNCLOCKED_POWER_OFF state only requires <= 500ns to exit. * This is also the lowest power state possible without affecting * non-cpu parts of the system. For these reasons, imx5 should default * to always using this state for cpu idling. The PM_SUSPEND_STANDBY also * uses this state and needs to take no action when registers remain configured * for this state. */ #define IMX5_DEFAULT_CPU_IDLE_STATE WAIT_UNCLOCKED_POWER_OFF struct imx5_suspend_io_state { u32 offset; u32 clear; u32 set; u32 saved_value; }; struct imx5_pm_data { phys_addr_t ccm_addr; phys_addr_t cortex_addr; phys_addr_t gpc_addr; phys_addr_t m4if_addr; phys_addr_t iomuxc_addr; void (*suspend_asm)(void __iomem *ocram_vbase); const u32 *suspend_asm_sz; const struct imx5_suspend_io_state *suspend_io_config; int suspend_io_count; }; static const struct imx5_suspend_io_state imx53_suspend_io_config[] = { #define MX53_DSE_HIGHZ_MASK (0x7 << 19) {.offset = 0x584, .clear = MX53_DSE_HIGHZ_MASK}, /* DQM0 */ {.offset = 0x594, .clear = MX53_DSE_HIGHZ_MASK}, /* DQM1 */ {.offset = 0x560, .clear = MX53_DSE_HIGHZ_MASK}, /* DQM2 */ {.offset = 0x554, .clear = MX53_DSE_HIGHZ_MASK}, /* DQM3 */ {.offset = 0x574, .clear = MX53_DSE_HIGHZ_MASK}, /* CAS */ {.offset = 0x588, .clear = MX53_DSE_HIGHZ_MASK}, /* RAS */ {.offset = 0x578, .clear = MX53_DSE_HIGHZ_MASK}, /* SDCLK_0 */ {.offset = 0x570, .clear = MX53_DSE_HIGHZ_MASK}, /* SDCLK_1 */ {.offset = 0x580, .clear = MX53_DSE_HIGHZ_MASK}, /* SDODT0 */ {.offset = 0x564, .clear = MX53_DSE_HIGHZ_MASK}, /* SDODT1 */ {.offset = 0x57c, .clear = MX53_DSE_HIGHZ_MASK}, /* SDQS0 */ {.offset = 0x590, .clear = MX53_DSE_HIGHZ_MASK}, /* SDQS1 */ {.offset = 0x568, .clear = MX53_DSE_HIGHZ_MASK}, /* SDQS2 */ {.offset = 0x558, .clear = MX53_DSE_HIGHZ_MASK}, /* SDSQ3 */ {.offset = 0x6f0, .clear = MX53_DSE_HIGHZ_MASK}, /* GRP_ADDS */ {.offset = 0x718, .clear = MX53_DSE_HIGHZ_MASK}, /* GRP_BODS */ {.offset = 0x71c, .clear = MX53_DSE_HIGHZ_MASK}, /* GRP_B1DS */ {.offset = 0x728, .clear = MX53_DSE_HIGHZ_MASK}, /* GRP_B2DS */ {.offset = 0x72c, .clear = MX53_DSE_HIGHZ_MASK}, /* GRP_B3DS */ /* Controls the CKE signal which is required to leave self refresh */ {.offset = 0x720, .clear = MX53_DSE_HIGHZ_MASK, .set = 1 << 19}, /* CTLDS */ }; static const struct imx5_pm_data imx51_pm_data __initconst = { .ccm_addr = 0x73fd4000, .cortex_addr = 0x83fa0000, .gpc_addr = 0x73fd8000, }; static const struct imx5_pm_data imx53_pm_data __initconst = { .ccm_addr = 0x53fd4000, .cortex_addr = 0x63fa0000, .gpc_addr = 0x53fd8000, .m4if_addr = 0x63fd8000, .iomuxc_addr = 0x53fa8000, .suspend_asm = &imx53_suspend, .suspend_asm_sz = &imx53_suspend_sz, .suspend_io_config = imx53_suspend_io_config, .suspend_io_count = ARRAY_SIZE(imx53_suspend_io_config), }; #define MX5_MAX_SUSPEND_IOSTATE ARRAY_SIZE(imx53_suspend_io_config) /* * This structure is for passing necessary data for low level ocram * suspend code(arch/arm/mach-imx/suspend-imx53.S), if this struct * definition is changed, the offset definition in that file * must be also changed accordingly otherwise, the suspend to ocram * function will be broken! */ struct imx5_cpu_suspend_info { void __iomem *m4if_base; void __iomem *iomuxc_base; u32 io_count; struct imx5_suspend_io_state io_state[MX5_MAX_SUSPEND_IOSTATE]; } __aligned(8); static void __iomem *ccm_base; static void __iomem *cortex_base; static void __iomem *gpc_base; static void __iomem *suspend_ocram_base; static void (*imx5_suspend_in_ocram_fn)(void __iomem *ocram_vbase); /* * set cpu low power mode before WFI instruction. This function is called * mx5 because it can be used for mx51, and mx53. */ static void mx5_cpu_lp_set(enum mxc_cpu_pwr_mode mode) { u32 plat_lpc, arm_srpgcr, ccm_clpcr; u32 empgc0, empgc1; int stop_mode = 0; /* always allow platform to issue a deep sleep mode request */ plat_lpc = imx_readl(cortex_base + MXC_CORTEXA8_PLAT_LPC) & ~(MXC_CORTEXA8_PLAT_LPC_DSM); ccm_clpcr = imx_readl(ccm_base + MXC_CCM_CLPCR) & ~(MXC_CCM_CLPCR_LPM_MASK); arm_srpgcr = imx_readl(gpc_base + MXC_SRPG_ARM_SRPGCR) & ~(MXC_SRPGCR_PCR); empgc0 = imx_readl(gpc_base + MXC_SRPG_EMPGC0_SRPGCR) & ~(MXC_SRPGCR_PCR); empgc1 = imx_readl(gpc_base + MXC_SRPG_EMPGC1_SRPGCR) & ~(MXC_SRPGCR_PCR); switch (mode) { case WAIT_CLOCKED: break; case WAIT_UNCLOCKED: ccm_clpcr |= 0x1 << MXC_CCM_CLPCR_LPM_OFFSET; break; case WAIT_UNCLOCKED_POWER_OFF: case STOP_POWER_OFF: plat_lpc |= MXC_CORTEXA8_PLAT_LPC_DSM | MXC_CORTEXA8_PLAT_LPC_DBG_DSM; if (mode == WAIT_UNCLOCKED_POWER_OFF) { ccm_clpcr |= 0x1 << MXC_CCM_CLPCR_LPM_OFFSET; ccm_clpcr &= ~MXC_CCM_CLPCR_VSTBY; ccm_clpcr &= ~MXC_CCM_CLPCR_SBYOS; stop_mode = 0; } else { ccm_clpcr |= 0x2 << MXC_CCM_CLPCR_LPM_OFFSET; ccm_clpcr |= 0x3 << MXC_CCM_CLPCR_STBY_COUNT_OFFSET; ccm_clpcr |= MXC_CCM_CLPCR_VSTBY; ccm_clpcr |= MXC_CCM_CLPCR_SBYOS; stop_mode = 1; } arm_srpgcr |= MXC_SRPGCR_PCR; break; case STOP_POWER_ON: ccm_clpcr |= 0x2 << MXC_CCM_CLPCR_LPM_OFFSET; break; default: printk(KERN_WARNING "UNKNOWN cpu power mode: %d\n", mode); return; } imx_writel(plat_lpc, cortex_base + MXC_CORTEXA8_PLAT_LPC); imx_writel(ccm_clpcr, ccm_base + MXC_CCM_CLPCR); imx_writel(arm_srpgcr, gpc_base + MXC_SRPG_ARM_SRPGCR); imx_writel(arm_srpgcr, gpc_base + MXC_SRPG_NEON_SRPGCR); if (stop_mode) { empgc0 |= MXC_SRPGCR_PCR; empgc1 |= MXC_SRPGCR_PCR; imx_writel(empgc0, gpc_base + MXC_SRPG_EMPGC0_SRPGCR); imx_writel(empgc1, gpc_base + MXC_SRPG_EMPGC1_SRPGCR); } } static int mx5_suspend_enter(suspend_state_t state) { switch (state) { case PM_SUSPEND_MEM: mx5_cpu_lp_set(STOP_POWER_OFF); break; case PM_SUSPEND_STANDBY: /* DEFAULT_IDLE_STATE already configured */ break; default: return -EINVAL; } if (state == PM_SUSPEND_MEM) { local_flush_tlb_all(); flush_cache_all(); /*clear the EMPGC0/1 bits */ imx_writel(0, gpc_base + MXC_SRPG_EMPGC0_SRPGCR); imx_writel(0, gpc_base + MXC_SRPG_EMPGC1_SRPGCR); if (imx5_suspend_in_ocram_fn) imx5_suspend_in_ocram_fn(suspend_ocram_base); else cpu_do_idle(); } else { cpu_do_idle(); } /* return registers to default idle state */ mx5_cpu_lp_set(IMX5_DEFAULT_CPU_IDLE_STATE); return 0; } static int mx5_pm_valid(suspend_state_t state) { return (state > PM_SUSPEND_ON && state <= PM_SUSPEND_MAX); } static const struct platform_suspend_ops mx5_suspend_ops = { .valid = mx5_pm_valid, .enter = mx5_suspend_enter, }; static inline int imx5_cpu_do_idle(void) { int ret = tzic_enable_wake(); if (likely(!ret)) cpu_do_idle(); return ret; } static void imx5_pm_idle(void) { imx5_cpu_do_idle(); } static int __init imx_suspend_alloc_ocram( size_t size, void __iomem **virt_out, phys_addr_t *phys_out) { struct device_node *node; struct platform_device *pdev; struct gen_pool *ocram_pool; unsigned long ocram_base; void __iomem *virt; phys_addr_t phys; int ret = 0; /* Copied from imx6: TODO factorize */ node = of_find_compatible_node(NULL, NULL, "mmio-sram"); if (!node) { pr_warn("%s: failed to find ocram node!\n", __func__); return -ENODEV; } pdev = of_find_device_by_node(node); if (!pdev) { pr_warn("%s: failed to find ocram device!\n", __func__); ret = -ENODEV; goto put_node; } ocram_pool = gen_pool_get(&pdev->dev, NULL); if (!ocram_pool) { pr_warn("%s: ocram pool unavailable!\n", __func__); ret = -ENODEV; goto put_device; } ocram_base = gen_pool_alloc(ocram_pool, size); if (!ocram_base) { pr_warn("%s: unable to alloc ocram!\n", __func__); ret = -ENOMEM; goto put_device; } phys = gen_pool_virt_to_phys(ocram_pool, ocram_base); virt = __arm_ioremap_exec(phys, size, false); if (phys_out) *phys_out = phys; if (virt_out) *virt_out = virt; put_device: put_device(&pdev->dev); put_node: of_node_put(node); return ret; } static int __init imx5_suspend_init(const struct imx5_pm_data *soc_data) { struct imx5_cpu_suspend_info *suspend_info; int ret; /* Need this to avoid compile error due to const typeof in fncpy.h */ void (*suspend_asm)(void __iomem *) = soc_data->suspend_asm; if (!suspend_asm) return 0; if (!soc_data->suspend_asm_sz || !*soc_data->suspend_asm_sz) return -EINVAL; ret = imx_suspend_alloc_ocram( *soc_data->suspend_asm_sz + sizeof(*suspend_info), &suspend_ocram_base, NULL); if (ret) return ret; suspend_info = suspend_ocram_base; suspend_info->io_count = soc_data->suspend_io_count; memcpy(suspend_info->io_state, soc_data->suspend_io_config, sizeof(*suspend_info->io_state) * soc_data->suspend_io_count); suspend_info->m4if_base = ioremap(soc_data->m4if_addr, SZ_16K); if (!suspend_info->m4if_base) { ret = -ENOMEM; goto failed_map_m4if; } suspend_info->iomuxc_base = ioremap(soc_data->iomuxc_addr, SZ_16K); if (!suspend_info->iomuxc_base) { ret = -ENOMEM; goto failed_map_iomuxc; } imx5_suspend_in_ocram_fn = fncpy( suspend_ocram_base + sizeof(*suspend_info), suspend_asm, *soc_data->suspend_asm_sz); return 0; failed_map_iomuxc: iounmap(suspend_info->m4if_base); failed_map_m4if: return ret; } static int __init imx5_pm_common_init(const struct imx5_pm_data *data) { int ret; struct clk *gpc_dvfs_clk = clk_get(NULL, "gpc_dvfs"); if (IS_ERR(gpc_dvfs_clk)) return PTR_ERR(gpc_dvfs_clk); ret = clk_prepare_enable(gpc_dvfs_clk); if (ret) return ret; arm_pm_idle = imx5_pm_idle; ccm_base = ioremap(data->ccm_addr, SZ_16K); cortex_base = ioremap(data->cortex_addr, SZ_16K); gpc_base = ioremap(data->gpc_addr, SZ_16K); WARN_ON(!ccm_base || !cortex_base || !gpc_base); /* Set the registers to the default cpu idle state. */ mx5_cpu_lp_set(IMX5_DEFAULT_CPU_IDLE_STATE); ret = imx5_cpuidle_init(); if (ret) pr_warn("%s: cpuidle init failed %d\n", __func__, ret); ret = imx5_suspend_init(data); if (ret) pr_warn("%s: No DDR LPM support with suspend %d!\n", __func__, ret); suspend_set_ops(&mx5_suspend_ops); return 0; } void __init imx51_pm_init(void) { if (IS_ENABLED(CONFIG_SOC_IMX51)) imx5_pm_common_init(&imx51_pm_data); } void __init imx53_pm_init(void) { if (IS_ENABLED(CONFIG_SOC_IMX53)) imx5_pm_common_init(&imx53_pm_data); }
linux-master
arch/arm/mach-imx/pm-imx5.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2014 Freescale Semiconductor, Inc. */ #include <linux/irqchip.h> #include <linux/of_platform.h> #include <linux/regmap.h> #include <linux/mfd/syscon.h> #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h> #include <asm/mach/arch.h> #include "common.h" #include "cpuidle.h" static void __init imx6sx_enet_clk_sel(void) { struct regmap *gpr; gpr = syscon_regmap_lookup_by_compatible("fsl,imx6sx-iomuxc-gpr"); if (!IS_ERR(gpr)) { regmap_update_bits(gpr, IOMUXC_GPR1, IMX6SX_GPR1_FEC_CLOCK_MUX_SEL_MASK, 0); regmap_update_bits(gpr, IOMUXC_GPR1, IMX6SX_GPR1_FEC_CLOCK_PAD_DIR_MASK, 0); } else { pr_err("failed to find fsl,imx6sx-iomux-gpr regmap\n"); } } static inline void imx6sx_enet_init(void) { imx6sx_enet_clk_sel(); } static void __init imx6sx_init_machine(void) { of_platform_default_populate(NULL, NULL, NULL); imx6sx_enet_init(); imx_anatop_init(); imx6sx_pm_init(); } static void __init imx6sx_init_irq(void) { imx_gpc_check_dt(); imx_init_revision_from_anatop(); imx_init_l2cache(); imx_src_init(); irqchip_init(); imx6_pm_ccm_init("fsl,imx6sx-ccm"); } static void __init imx6sx_init_late(void) { imx6sx_cpuidle_init(); if (IS_ENABLED(CONFIG_ARM_IMX6Q_CPUFREQ)) platform_device_register_simple("imx6q-cpufreq", -1, NULL, 0); } static const char * const imx6sx_dt_compat[] __initconst = { "fsl,imx6sx", NULL, }; DT_MACHINE_START(IMX6SX, "Freescale i.MX6 SoloX (Device Tree)") .l2c_aux_val = 0, .l2c_aux_mask = ~0, .init_irq = imx6sx_init_irq, .init_machine = imx6sx_init_machine, .dt_compat = imx6sx_dt_compat, .init_late = imx6sx_init_late, MACHINE_END
linux-master
arch/arm/mach-imx/mach-imx6sx.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright 2012 Sascha Hauer, Pengutronix */ #include <asm/mach/arch.h> #include "common.h" static const char * const imx31_dt_board_compat[] __initconst = { "fsl,imx31", NULL }; DT_MACHINE_START(IMX31_DT, "Freescale i.MX31 (Device Tree Support)") .map_io = mx31_map_io, .init_early = imx31_init_early, .dt_compat = imx31_dt_board_compat, MACHINE_END
linux-master
arch/arm/mach-imx/mach-imx31.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright 2011-2013 Freescale Semiconductor, Inc. * Copyright 2011 Linaro Ltd. */ #include <linux/io.h> #include <linux/irq.h> #include <linux/irqchip.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include "common.h" #include "hardware.h" #define GPC_CNTR 0x0 #define GPC_IMR1 0x008 #define GPC_PGC_CPU_PDN 0x2a0 #define GPC_PGC_CPU_PUPSCR 0x2a4 #define GPC_PGC_CPU_PDNSCR 0x2a8 #define GPC_PGC_SW2ISO_SHIFT 0x8 #define GPC_PGC_SW_SHIFT 0x0 #define GPC_CNTR_L2_PGE_SHIFT 22 #define IMR_NUM 4 #define GPC_MAX_IRQS (IMR_NUM * 32) static void __iomem *gpc_base; static u32 gpc_wake_irqs[IMR_NUM]; static u32 gpc_saved_imrs[IMR_NUM]; void imx_gpc_set_arm_power_up_timing(u32 sw2iso, u32 sw) { writel_relaxed((sw2iso << GPC_PGC_SW2ISO_SHIFT) | (sw << GPC_PGC_SW_SHIFT), gpc_base + GPC_PGC_CPU_PUPSCR); } void imx_gpc_set_arm_power_down_timing(u32 sw2iso, u32 sw) { writel_relaxed((sw2iso << GPC_PGC_SW2ISO_SHIFT) | (sw << GPC_PGC_SW_SHIFT), gpc_base + GPC_PGC_CPU_PDNSCR); } void imx_gpc_set_arm_power_in_lpm(bool power_off) { writel_relaxed(power_off, gpc_base + GPC_PGC_CPU_PDN); } void imx_gpc_set_l2_mem_power_in_lpm(bool power_off) { u32 val; val = readl_relaxed(gpc_base + GPC_CNTR); val &= ~(1 << GPC_CNTR_L2_PGE_SHIFT); if (power_off) val |= 1 << GPC_CNTR_L2_PGE_SHIFT; writel_relaxed(val, gpc_base + GPC_CNTR); } void imx_gpc_pre_suspend(bool arm_power_off) { void __iomem *reg_imr1 = gpc_base + GPC_IMR1; int i; /* Tell GPC to power off ARM core when suspend */ if (arm_power_off) imx_gpc_set_arm_power_in_lpm(arm_power_off); for (i = 0; i < IMR_NUM; i++) { gpc_saved_imrs[i] = readl_relaxed(reg_imr1 + i * 4); writel_relaxed(~gpc_wake_irqs[i], reg_imr1 + i * 4); } } void imx_gpc_post_resume(void) { void __iomem *reg_imr1 = gpc_base + GPC_IMR1; int i; /* Keep ARM core powered on for other low-power modes */ imx_gpc_set_arm_power_in_lpm(false); for (i = 0; i < IMR_NUM; i++) writel_relaxed(gpc_saved_imrs[i], reg_imr1 + i * 4); } static int imx_gpc_irq_set_wake(struct irq_data *d, unsigned int on) { unsigned int idx = d->hwirq / 32; u32 mask; mask = 1 << d->hwirq % 32; gpc_wake_irqs[idx] = on ? gpc_wake_irqs[idx] | mask : gpc_wake_irqs[idx] & ~mask; /* * Do *not* call into the parent, as the GIC doesn't have any * wake-up facility... */ return 0; } void imx_gpc_mask_all(void) { void __iomem *reg_imr1 = gpc_base + GPC_IMR1; int i; for (i = 0; i < IMR_NUM; i++) { gpc_saved_imrs[i] = readl_relaxed(reg_imr1 + i * 4); writel_relaxed(~0, reg_imr1 + i * 4); } } void imx_gpc_restore_all(void) { void __iomem *reg_imr1 = gpc_base + GPC_IMR1; int i; for (i = 0; i < IMR_NUM; i++) writel_relaxed(gpc_saved_imrs[i], reg_imr1 + i * 4); } void imx_gpc_hwirq_unmask(unsigned int hwirq) { void __iomem *reg; u32 val; reg = gpc_base + GPC_IMR1 + hwirq / 32 * 4; val = readl_relaxed(reg); val &= ~(1 << hwirq % 32); writel_relaxed(val, reg); } void imx_gpc_hwirq_mask(unsigned int hwirq) { void __iomem *reg; u32 val; reg = gpc_base + GPC_IMR1 + hwirq / 32 * 4; val = readl_relaxed(reg); val |= 1 << (hwirq % 32); writel_relaxed(val, reg); } static void imx_gpc_irq_unmask(struct irq_data *d) { imx_gpc_hwirq_unmask(d->hwirq); irq_chip_unmask_parent(d); } static void imx_gpc_irq_mask(struct irq_data *d) { imx_gpc_hwirq_mask(d->hwirq); irq_chip_mask_parent(d); } static struct irq_chip imx_gpc_chip = { .name = "GPC", .irq_eoi = irq_chip_eoi_parent, .irq_mask = imx_gpc_irq_mask, .irq_unmask = imx_gpc_irq_unmask, .irq_retrigger = irq_chip_retrigger_hierarchy, .irq_set_wake = imx_gpc_irq_set_wake, .irq_set_type = irq_chip_set_type_parent, #ifdef CONFIG_SMP .irq_set_affinity = irq_chip_set_affinity_parent, #endif }; static int imx_gpc_domain_translate(struct irq_domain *d, struct irq_fwspec *fwspec, unsigned long *hwirq, unsigned int *type) { if (is_of_node(fwspec->fwnode)) { if (fwspec->param_count != 3) return -EINVAL; /* No PPI should point to this domain */ if (fwspec->param[0] != 0) return -EINVAL; *hwirq = fwspec->param[1]; *type = fwspec->param[2]; return 0; } return -EINVAL; } static int imx_gpc_domain_alloc(struct irq_domain *domain, unsigned int irq, unsigned int nr_irqs, void *data) { struct irq_fwspec *fwspec = data; struct irq_fwspec parent_fwspec; irq_hw_number_t hwirq; int i; if (fwspec->param_count != 3) return -EINVAL; /* Not GIC compliant */ if (fwspec->param[0] != 0) return -EINVAL; /* No PPI should point to this domain */ hwirq = fwspec->param[1]; if (hwirq >= GPC_MAX_IRQS) return -EINVAL; /* Can't deal with this */ for (i = 0; i < nr_irqs; i++) irq_domain_set_hwirq_and_chip(domain, irq + i, hwirq + i, &imx_gpc_chip, NULL); parent_fwspec = *fwspec; parent_fwspec.fwnode = domain->parent->fwnode; return irq_domain_alloc_irqs_parent(domain, irq, nr_irqs, &parent_fwspec); } static const struct irq_domain_ops imx_gpc_domain_ops = { .translate = imx_gpc_domain_translate, .alloc = imx_gpc_domain_alloc, .free = irq_domain_free_irqs_common, }; static int __init imx_gpc_init(struct device_node *node, struct device_node *parent) { struct irq_domain *parent_domain, *domain; int i; if (!parent) { pr_err("%pOF: no parent, giving up\n", node); return -ENODEV; } parent_domain = irq_find_host(parent); if (!parent_domain) { pr_err("%pOF: unable to obtain parent domain\n", node); return -ENXIO; } gpc_base = of_iomap(node, 0); if (WARN_ON(!gpc_base)) return -ENOMEM; domain = irq_domain_add_hierarchy(parent_domain, 0, GPC_MAX_IRQS, node, &imx_gpc_domain_ops, NULL); if (!domain) { iounmap(gpc_base); return -ENOMEM; } /* Initially mask all interrupts */ for (i = 0; i < IMR_NUM; i++) writel_relaxed(~0, gpc_base + GPC_IMR1 + i * 4); /* * Clear the OF_POPULATED flag set in of_irq_init so that * later the GPC power domain driver will not be skipped. */ of_node_clear_flag(node, OF_POPULATED); return 0; } IRQCHIP_DECLARE(imx_gpc, "fsl,imx6q-gpc", imx_gpc_init); void __init imx_gpc_check_dt(void) { struct device_node *np; np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-gpc"); if (WARN_ON(!np)) return; if (WARN_ON(!of_property_read_bool(np, "interrupt-controller"))) { pr_warn("Outdated DT detected, suspend/resume will NOT work\n"); /* map GPC, so that at least CPUidle and WARs keep working */ gpc_base = of_iomap(np, 0); } of_node_put(np); }
linux-master
arch/arm/mach-imx/gpc.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright 2013 Greg Ungerer <[email protected]> * Copyright 2011 Freescale Semiconductor, Inc. All Rights Reserved. * Copyright 2011 Linaro Ltd. */ #include <asm/mach/arch.h> #include "common.h" #include "hardware.h" static void __init imx50_init_early(void) { mxc_set_cpu_type(MXC_CPU_MX50); } static const char * const imx50_dt_board_compat[] __initconst = { "fsl,imx50", NULL }; DT_MACHINE_START(IMX50_DT, "Freescale i.MX50 (Device Tree Support)") .init_early = imx50_init_early, .dt_compat = imx50_dt_board_compat, MACHINE_END
linux-master
arch/arm/mach-imx/mach-imx50.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2015 Freescale Semiconductor, Inc. */ #include <linux/irqchip.h> #include <linux/of_platform.h> #include <asm/mach/arch.h> #include "common.h" #include "cpuidle.h" #include "hardware.h" static void __init imx6ul_init_machine(void) { imx_print_silicon_rev(cpu_is_imx6ull() ? "i.MX6ULL" : "i.MX6UL", imx_get_soc_revision()); of_platform_default_populate(NULL, NULL, NULL); imx_anatop_init(); imx6ul_pm_init(); } static void __init imx6ul_init_irq(void) { imx_init_revision_from_anatop(); imx_src_init(); irqchip_init(); imx6_pm_ccm_init("fsl,imx6ul-ccm"); } static void __init imx6ul_init_late(void) { imx6sx_cpuidle_init(); if (IS_ENABLED(CONFIG_ARM_IMX6Q_CPUFREQ)) platform_device_register_simple("imx6q-cpufreq", -1, NULL, 0); } static const char * const imx6ul_dt_compat[] __initconst = { "fsl,imx6ul", "fsl,imx6ull", "fsl,imx6ulz", NULL, }; DT_MACHINE_START(IMX6UL, "Freescale i.MX6 Ultralite (Device Tree)") .init_irq = imx6ul_init_irq, .init_machine = imx6ul_init_machine, .init_late = imx6ul_init_late, .dt_compat = imx6ul_dt_compat, MACHINE_END
linux-master
arch/arm/mach-imx/mach-imx6ul.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 1999 ARM Limited * Copyright (C) 2000 Deep Blue Solutions Ltd * Copyright 2006-2007 Freescale Semiconductor, Inc. All Rights Reserved. * Copyright 2008 Juergen Beisert, [email protected] * Copyright 2009 Ilya Yanok, Emcraft Systems Ltd, [email protected] */ #include <linux/kernel.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/err.h> #include <linux/delay.h> #include <linux/of.h> #include <linux/of_address.h> #include <asm/system_misc.h> #include <asm/proc-fns.h> #include <asm/mach-types.h> #include <asm/hardware/cache-l2x0.h> #include "common.h" #include "hardware.h" static void __iomem *wdog_base; static struct clk *wdog_clk; static int wcr_enable = (1 << 2); /* * Reset the system. It is called by machine_restart(). */ void mxc_restart(enum reboot_mode mode, const char *cmd) { if (!wdog_base) goto reset_fallback; if (!IS_ERR(wdog_clk)) clk_enable(wdog_clk); /* Assert SRS signal */ imx_writew(wcr_enable, wdog_base); /* * Due to imx6q errata ERR004346 (WDOG: WDOG SRS bit requires to be * written twice), we add another two writes to ensure there must be at * least two writes happen in the same one 32kHz clock period. We save * the target check here, since the writes shouldn't be a huge burden * for other platforms. */ imx_writew(wcr_enable, wdog_base); imx_writew(wcr_enable, wdog_base); /* wait for reset to assert... */ mdelay(500); pr_err("%s: Watchdog reset failed to assert reset\n", __func__); /* delay to allow the serial port to show the message */ mdelay(50); reset_fallback: /* we'll take a jump through zero as a poor second */ soft_restart(0); } void __init mxc_arch_reset_init(void __iomem *base) { wdog_base = base; wdog_clk = clk_get_sys("imx2-wdt.0", NULL); if (IS_ERR(wdog_clk)) pr_warn("%s: failed to get wdog clock\n", __func__); else clk_prepare(wdog_clk); } #ifdef CONFIG_SOC_IMX1 void __init imx1_reset_init(void __iomem *base) { wcr_enable = (1 << 0); mxc_arch_reset_init(base); } #endif #ifdef CONFIG_CACHE_L2X0 void __init imx_init_l2cache(void) { void __iomem *l2x0_base; struct device_node *np; unsigned int val; np = of_find_compatible_node(NULL, NULL, "arm,pl310-cache"); if (!np) return; l2x0_base = of_iomap(np, 0); if (!l2x0_base) goto put_node; if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) { /* Configure the L2 PREFETCH and POWER registers */ val = readl_relaxed(l2x0_base + L310_PREFETCH_CTRL); val |= L310_PREFETCH_CTRL_DBL_LINEFILL | L310_PREFETCH_CTRL_INSTR_PREFETCH | L310_PREFETCH_CTRL_DATA_PREFETCH; /* Set perfetch offset to improve performance */ val &= ~L310_PREFETCH_CTRL_OFFSET_MASK; val |= 15; writel_relaxed(val, l2x0_base + L310_PREFETCH_CTRL); } iounmap(l2x0_base); put_node: of_node_put(np); } #endif
linux-master
arch/arm/mach-imx/system.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2014 Alexander Shiyan <[email protected]> */ #include <asm/mach/arch.h> #include "common.h" #include "hardware.h" static void __init imx1_init_early(void) { mxc_set_cpu_type(MXC_CPU_MX1); } static const char * const imx1_dt_board_compat[] __initconst = { "fsl,imx1", NULL }; DT_MACHINE_START(IMX1_DT, "Freescale i.MX1 (Device Tree Support)") .init_early = imx1_init_early, .dt_compat = imx1_dt_board_compat, .restart = mxc_restart, MACHINE_END
linux-master
arch/arm/mach-imx/mach-imx1.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright 2011-2014 Freescale Semiconductor, Inc. * Copyright 2011 Linaro Ltd. */ #include <linux/clk/imx.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/genalloc.h> #include <linux/irqchip/arm-gic.h> #include <linux/mfd/syscon.h> #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_platform.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/suspend.h> #include <asm/cacheflush.h> #include <asm/fncpy.h> #include <asm/proc-fns.h> #include <asm/suspend.h> #include <asm/tlb.h> #include "common.h" #include "hardware.h" #define CCR 0x0 #define BM_CCR_WB_COUNT (0x7 << 16) #define BM_CCR_RBC_BYPASS_COUNT (0x3f << 21) #define BM_CCR_RBC_EN (0x1 << 27) #define CLPCR 0x54 #define BP_CLPCR_LPM 0 #define BM_CLPCR_LPM (0x3 << 0) #define BM_CLPCR_BYPASS_PMIC_READY (0x1 << 2) #define BM_CLPCR_ARM_CLK_DIS_ON_LPM (0x1 << 5) #define BM_CLPCR_SBYOS (0x1 << 6) #define BM_CLPCR_DIS_REF_OSC (0x1 << 7) #define BM_CLPCR_VSTBY (0x1 << 8) #define BP_CLPCR_STBY_COUNT 9 #define BM_CLPCR_STBY_COUNT (0x3 << 9) #define BM_CLPCR_COSC_PWRDOWN (0x1 << 11) #define BM_CLPCR_WB_PER_AT_LPM (0x1 << 16) #define BM_CLPCR_WB_CORE_AT_LPM (0x1 << 17) #define BM_CLPCR_BYP_MMDC_CH0_LPM_HS (0x1 << 19) #define BM_CLPCR_BYP_MMDC_CH1_LPM_HS (0x1 << 21) #define BM_CLPCR_MASK_CORE0_WFI (0x1 << 22) #define BM_CLPCR_MASK_CORE1_WFI (0x1 << 23) #define BM_CLPCR_MASK_CORE2_WFI (0x1 << 24) #define BM_CLPCR_MASK_CORE3_WFI (0x1 << 25) #define BM_CLPCR_MASK_SCU_IDLE (0x1 << 26) #define BM_CLPCR_MASK_L2CC_IDLE (0x1 << 27) #define CGPR 0x64 #define BM_CGPR_INT_MEM_CLK_LPM (0x1 << 17) #define MX6Q_SUSPEND_OCRAM_SIZE 0x1000 #define MX6_MAX_MMDC_IO_NUM 33 static void __iomem *ccm_base; static void __iomem *suspend_ocram_base; static void (*imx6_suspend_in_ocram_fn)(void __iomem *ocram_vbase); /* * suspend ocram space layout: * ======================== high address ====================== * . * . * . * ^ * ^ * ^ * imx6_suspend code * PM_INFO structure(imx6_cpu_pm_info) * ======================== low address ======================= */ struct imx6_pm_base { phys_addr_t pbase; void __iomem *vbase; }; struct imx6_pm_socdata { u32 ddr_type; const char *mmdc_compat; const char *src_compat; const char *iomuxc_compat; const char *gpc_compat; const char *pl310_compat; const u32 mmdc_io_num; const u32 *mmdc_io_offset; }; static const u32 imx6q_mmdc_io_offset[] __initconst = { 0x5ac, 0x5b4, 0x528, 0x520, /* DQM0 ~ DQM3 */ 0x514, 0x510, 0x5bc, 0x5c4, /* DQM4 ~ DQM7 */ 0x56c, 0x578, 0x588, 0x594, /* CAS, RAS, SDCLK_0, SDCLK_1 */ 0x5a8, 0x5b0, 0x524, 0x51c, /* SDQS0 ~ SDQS3 */ 0x518, 0x50c, 0x5b8, 0x5c0, /* SDQS4 ~ SDQS7 */ 0x784, 0x788, 0x794, 0x79c, /* GPR_B0DS ~ GPR_B3DS */ 0x7a0, 0x7a4, 0x7a8, 0x748, /* GPR_B4DS ~ GPR_B7DS */ 0x59c, 0x5a0, 0x750, 0x774, /* SODT0, SODT1, MODE_CTL, MODE */ 0x74c, /* GPR_ADDS */ }; static const u32 imx6dl_mmdc_io_offset[] __initconst = { 0x470, 0x474, 0x478, 0x47c, /* DQM0 ~ DQM3 */ 0x480, 0x484, 0x488, 0x48c, /* DQM4 ~ DQM7 */ 0x464, 0x490, 0x4ac, 0x4b0, /* CAS, RAS, SDCLK_0, SDCLK_1 */ 0x4bc, 0x4c0, 0x4c4, 0x4c8, /* DRAM_SDQS0 ~ DRAM_SDQS3 */ 0x4cc, 0x4d0, 0x4d4, 0x4d8, /* DRAM_SDQS4 ~ DRAM_SDQS7 */ 0x764, 0x770, 0x778, 0x77c, /* GPR_B0DS ~ GPR_B3DS */ 0x780, 0x784, 0x78c, 0x748, /* GPR_B4DS ~ GPR_B7DS */ 0x4b4, 0x4b8, 0x750, 0x760, /* SODT0, SODT1, MODE_CTL, MODE */ 0x74c, /* GPR_ADDS */ }; static const u32 imx6sl_mmdc_io_offset[] __initconst = { 0x30c, 0x310, 0x314, 0x318, /* DQM0 ~ DQM3 */ 0x5c4, 0x5cc, 0x5d4, 0x5d8, /* GPR_B0DS ~ GPR_B3DS */ 0x300, 0x31c, 0x338, 0x5ac, /* CAS, RAS, SDCLK_0, GPR_ADDS */ 0x33c, 0x340, 0x5b0, 0x5c0, /* SODT0, SODT1, MODE_CTL, MODE */ 0x330, 0x334, 0x320, /* SDCKE0, SDCKE1, RESET */ }; static const u32 imx6sll_mmdc_io_offset[] __initconst = { 0x294, 0x298, 0x29c, 0x2a0, /* DQM0 ~ DQM3 */ 0x544, 0x54c, 0x554, 0x558, /* GPR_B0DS ~ GPR_B3DS */ 0x530, 0x540, 0x2ac, 0x52c, /* MODE_CTL, MODE, SDCLK_0, GPR_ADDDS */ 0x2a4, 0x2a8, /* SDCKE0, SDCKE1*/ }; static const u32 imx6sx_mmdc_io_offset[] __initconst = { 0x2ec, 0x2f0, 0x2f4, 0x2f8, /* DQM0 ~ DQM3 */ 0x60c, 0x610, 0x61c, 0x620, /* GPR_B0DS ~ GPR_B3DS */ 0x300, 0x2fc, 0x32c, 0x5f4, /* CAS, RAS, SDCLK_0, GPR_ADDS */ 0x310, 0x314, 0x5f8, 0x608, /* SODT0, SODT1, MODE_CTL, MODE */ 0x330, 0x334, 0x338, 0x33c, /* SDQS0 ~ SDQS3 */ }; static const u32 imx6ul_mmdc_io_offset[] __initconst = { 0x244, 0x248, 0x24c, 0x250, /* DQM0, DQM1, RAS, CAS */ 0x27c, 0x498, 0x4a4, 0x490, /* SDCLK0, GPR_B0DS-B1DS, GPR_ADDS */ 0x280, 0x284, 0x260, 0x264, /* SDQS0~1, SODT0, SODT1 */ 0x494, 0x4b0, /* MODE_CTL, MODE, */ }; static const struct imx6_pm_socdata imx6q_pm_data __initconst = { .mmdc_compat = "fsl,imx6q-mmdc", .src_compat = "fsl,imx6q-src", .iomuxc_compat = "fsl,imx6q-iomuxc", .gpc_compat = "fsl,imx6q-gpc", .pl310_compat = "arm,pl310-cache", .mmdc_io_num = ARRAY_SIZE(imx6q_mmdc_io_offset), .mmdc_io_offset = imx6q_mmdc_io_offset, }; static const struct imx6_pm_socdata imx6dl_pm_data __initconst = { .mmdc_compat = "fsl,imx6q-mmdc", .src_compat = "fsl,imx6q-src", .iomuxc_compat = "fsl,imx6dl-iomuxc", .gpc_compat = "fsl,imx6q-gpc", .pl310_compat = "arm,pl310-cache", .mmdc_io_num = ARRAY_SIZE(imx6dl_mmdc_io_offset), .mmdc_io_offset = imx6dl_mmdc_io_offset, }; static const struct imx6_pm_socdata imx6sl_pm_data __initconst = { .mmdc_compat = "fsl,imx6sl-mmdc", .src_compat = "fsl,imx6sl-src", .iomuxc_compat = "fsl,imx6sl-iomuxc", .gpc_compat = "fsl,imx6sl-gpc", .pl310_compat = "arm,pl310-cache", .mmdc_io_num = ARRAY_SIZE(imx6sl_mmdc_io_offset), .mmdc_io_offset = imx6sl_mmdc_io_offset, }; static const struct imx6_pm_socdata imx6sll_pm_data __initconst = { .mmdc_compat = "fsl,imx6sll-mmdc", .src_compat = "fsl,imx6sll-src", .iomuxc_compat = "fsl,imx6sll-iomuxc", .gpc_compat = "fsl,imx6sll-gpc", .pl310_compat = "arm,pl310-cache", .mmdc_io_num = ARRAY_SIZE(imx6sll_mmdc_io_offset), .mmdc_io_offset = imx6sll_mmdc_io_offset, }; static const struct imx6_pm_socdata imx6sx_pm_data __initconst = { .mmdc_compat = "fsl,imx6sx-mmdc", .src_compat = "fsl,imx6sx-src", .iomuxc_compat = "fsl,imx6sx-iomuxc", .gpc_compat = "fsl,imx6sx-gpc", .pl310_compat = "arm,pl310-cache", .mmdc_io_num = ARRAY_SIZE(imx6sx_mmdc_io_offset), .mmdc_io_offset = imx6sx_mmdc_io_offset, }; static const struct imx6_pm_socdata imx6ul_pm_data __initconst = { .mmdc_compat = "fsl,imx6ul-mmdc", .src_compat = "fsl,imx6ul-src", .iomuxc_compat = "fsl,imx6ul-iomuxc", .gpc_compat = "fsl,imx6ul-gpc", .pl310_compat = NULL, .mmdc_io_num = ARRAY_SIZE(imx6ul_mmdc_io_offset), .mmdc_io_offset = imx6ul_mmdc_io_offset, }; /* * This structure is for passing necessary data for low level ocram * suspend code(arch/arm/mach-imx/suspend-imx6.S), if this struct * definition is changed, the offset definition in * arch/arm/mach-imx/suspend-imx6.S must be also changed accordingly, * otherwise, the suspend to ocram function will be broken! */ struct imx6_cpu_pm_info { phys_addr_t pbase; /* The physical address of pm_info. */ phys_addr_t resume_addr; /* The physical resume address for asm code */ u32 ddr_type; u32 pm_info_size; /* Size of pm_info. */ struct imx6_pm_base mmdc_base; struct imx6_pm_base src_base; struct imx6_pm_base iomuxc_base; struct imx6_pm_base ccm_base; struct imx6_pm_base gpc_base; struct imx6_pm_base l2_base; u32 mmdc_io_num; /* Number of MMDC IOs which need saved/restored. */ u32 mmdc_io_val[MX6_MAX_MMDC_IO_NUM][2]; /* To save offset and value */ } __aligned(8); void imx6_set_int_mem_clk_lpm(bool enable) { u32 val = readl_relaxed(ccm_base + CGPR); val &= ~BM_CGPR_INT_MEM_CLK_LPM; if (enable) val |= BM_CGPR_INT_MEM_CLK_LPM; writel_relaxed(val, ccm_base + CGPR); } void imx6_enable_rbc(bool enable) { u32 val; /* * need to mask all interrupts in GPC before * operating RBC configurations */ imx_gpc_mask_all(); /* configure RBC enable bit */ val = readl_relaxed(ccm_base + CCR); val &= ~BM_CCR_RBC_EN; val |= enable ? BM_CCR_RBC_EN : 0; writel_relaxed(val, ccm_base + CCR); /* configure RBC count */ val = readl_relaxed(ccm_base + CCR); val &= ~BM_CCR_RBC_BYPASS_COUNT; val |= enable ? BM_CCR_RBC_BYPASS_COUNT : 0; writel(val, ccm_base + CCR); /* * need to delay at least 2 cycles of CKIL(32K) * due to hardware design requirement, which is * ~61us, here we use 65us for safe */ udelay(65); /* restore GPC interrupt mask settings */ imx_gpc_restore_all(); } static void imx6q_enable_wb(bool enable) { u32 val; /* configure well bias enable bit */ val = readl_relaxed(ccm_base + CLPCR); val &= ~BM_CLPCR_WB_PER_AT_LPM; val |= enable ? BM_CLPCR_WB_PER_AT_LPM : 0; writel_relaxed(val, ccm_base + CLPCR); /* configure well bias count */ val = readl_relaxed(ccm_base + CCR); val &= ~BM_CCR_WB_COUNT; val |= enable ? BM_CCR_WB_COUNT : 0; writel_relaxed(val, ccm_base + CCR); } int imx6_set_lpm(enum mxc_cpu_pwr_mode mode) { u32 val = readl_relaxed(ccm_base + CLPCR); val &= ~BM_CLPCR_LPM; switch (mode) { case WAIT_CLOCKED: break; case WAIT_UNCLOCKED: val |= 0x1 << BP_CLPCR_LPM; val |= BM_CLPCR_ARM_CLK_DIS_ON_LPM; break; case STOP_POWER_ON: val |= 0x2 << BP_CLPCR_LPM; val &= ~BM_CLPCR_VSTBY; val &= ~BM_CLPCR_SBYOS; if (cpu_is_imx6sl()) val |= BM_CLPCR_BYPASS_PMIC_READY; if (cpu_is_imx6sl() || cpu_is_imx6sx() || cpu_is_imx6ul() || cpu_is_imx6ull() || cpu_is_imx6sll() || cpu_is_imx6ulz()) val |= BM_CLPCR_BYP_MMDC_CH0_LPM_HS; else val |= BM_CLPCR_BYP_MMDC_CH1_LPM_HS; break; case WAIT_UNCLOCKED_POWER_OFF: val |= 0x1 << BP_CLPCR_LPM; val &= ~BM_CLPCR_VSTBY; val &= ~BM_CLPCR_SBYOS; break; case STOP_POWER_OFF: val |= 0x2 << BP_CLPCR_LPM; val |= 0x3 << BP_CLPCR_STBY_COUNT; val |= BM_CLPCR_VSTBY; val |= BM_CLPCR_SBYOS; if (cpu_is_imx6sl() || cpu_is_imx6sx()) val |= BM_CLPCR_BYPASS_PMIC_READY; if (cpu_is_imx6sl() || cpu_is_imx6sx() || cpu_is_imx6ul() || cpu_is_imx6ull() || cpu_is_imx6sll() || cpu_is_imx6ulz()) val |= BM_CLPCR_BYP_MMDC_CH0_LPM_HS; else val |= BM_CLPCR_BYP_MMDC_CH1_LPM_HS; break; default: return -EINVAL; } /* * ERR007265: CCM: When improper low-power sequence is used, * the SoC enters low power mode before the ARM core executes WFI. * * Software workaround: * 1) Software should trigger IRQ #32 (IOMUX) to be always pending * by setting IOMUX_GPR1_GINT. * 2) Software should then unmask IRQ #32 in GPC before setting CCM * Low-Power mode. * 3) Software should mask IRQ #32 right after CCM Low-Power mode * is set (set bits 0-1 of CCM_CLPCR). * * Note that IRQ #32 is GIC SPI #0. */ if (mode != WAIT_CLOCKED) imx_gpc_hwirq_unmask(0); writel_relaxed(val, ccm_base + CLPCR); if (mode != WAIT_CLOCKED) imx_gpc_hwirq_mask(0); return 0; } static int imx6q_suspend_finish(unsigned long val) { if (!imx6_suspend_in_ocram_fn) { cpu_do_idle(); } else { /* * call low level suspend function in ocram, * as we need to float DDR IO. */ local_flush_tlb_all(); /* check if need to flush internal L2 cache */ if (!((struct imx6_cpu_pm_info *) suspend_ocram_base)->l2_base.vbase) flush_cache_all(); imx6_suspend_in_ocram_fn(suspend_ocram_base); } return 0; } static int imx6q_pm_enter(suspend_state_t state) { switch (state) { case PM_SUSPEND_STANDBY: imx6_set_lpm(STOP_POWER_ON); imx6_set_int_mem_clk_lpm(true); imx_gpc_pre_suspend(false); if (cpu_is_imx6sl()) imx6sl_set_wait_clk(true); /* Zzz ... */ cpu_do_idle(); if (cpu_is_imx6sl()) imx6sl_set_wait_clk(false); imx_gpc_post_resume(); imx6_set_lpm(WAIT_CLOCKED); break; case PM_SUSPEND_MEM: imx6_set_lpm(STOP_POWER_OFF); imx6_set_int_mem_clk_lpm(false); imx6q_enable_wb(true); /* * For suspend into ocram, asm code already take care of * RBC setting, so we do NOT need to do that here. */ if (!imx6_suspend_in_ocram_fn) imx6_enable_rbc(true); imx_gpc_pre_suspend(true); imx_anatop_pre_suspend(); /* Zzz ... */ cpu_suspend(0, imx6q_suspend_finish); if (cpu_is_imx6q() || cpu_is_imx6dl()) imx_smp_prepare(); imx_anatop_post_resume(); imx_gpc_post_resume(); imx6_enable_rbc(false); imx6q_enable_wb(false); imx6_set_int_mem_clk_lpm(true); imx6_set_lpm(WAIT_CLOCKED); break; default: return -EINVAL; } return 0; } static int imx6q_pm_valid(suspend_state_t state) { return (state == PM_SUSPEND_STANDBY || state == PM_SUSPEND_MEM); } static const struct platform_suspend_ops imx6q_pm_ops = { .enter = imx6q_pm_enter, .valid = imx6q_pm_valid, }; static int __init imx6_pm_get_base(struct imx6_pm_base *base, const char *compat) { struct device_node *node; struct resource res; int ret = 0; node = of_find_compatible_node(NULL, NULL, compat); if (!node) return -ENODEV; ret = of_address_to_resource(node, 0, &res); if (ret) goto put_node; base->pbase = res.start; base->vbase = ioremap(res.start, resource_size(&res)); if (!base->vbase) ret = -ENOMEM; put_node: of_node_put(node); return ret; } static int __init imx6q_suspend_init(const struct imx6_pm_socdata *socdata) { phys_addr_t ocram_pbase; struct device_node *node; struct platform_device *pdev; struct imx6_cpu_pm_info *pm_info; struct gen_pool *ocram_pool; unsigned long ocram_base; int i, ret = 0; const u32 *mmdc_offset_array; suspend_set_ops(&imx6q_pm_ops); if (!socdata) { pr_warn("%s: invalid argument!\n", __func__); return -EINVAL; } node = of_find_compatible_node(NULL, NULL, "mmio-sram"); if (!node) { pr_warn("%s: failed to find ocram node!\n", __func__); return -ENODEV; } pdev = of_find_device_by_node(node); if (!pdev) { pr_warn("%s: failed to find ocram device!\n", __func__); ret = -ENODEV; goto put_node; } ocram_pool = gen_pool_get(&pdev->dev, NULL); if (!ocram_pool) { pr_warn("%s: ocram pool unavailable!\n", __func__); ret = -ENODEV; goto put_device; } ocram_base = gen_pool_alloc(ocram_pool, MX6Q_SUSPEND_OCRAM_SIZE); if (!ocram_base) { pr_warn("%s: unable to alloc ocram!\n", __func__); ret = -ENOMEM; goto put_device; } ocram_pbase = gen_pool_virt_to_phys(ocram_pool, ocram_base); suspend_ocram_base = __arm_ioremap_exec(ocram_pbase, MX6Q_SUSPEND_OCRAM_SIZE, false); memset(suspend_ocram_base, 0, sizeof(*pm_info)); pm_info = suspend_ocram_base; pm_info->pbase = ocram_pbase; pm_info->resume_addr = __pa_symbol(v7_cpu_resume); pm_info->pm_info_size = sizeof(*pm_info); /* * ccm physical address is not used by asm code currently, * so get ccm virtual address directly. */ pm_info->ccm_base.vbase = ccm_base; ret = imx6_pm_get_base(&pm_info->mmdc_base, socdata->mmdc_compat); if (ret) { pr_warn("%s: failed to get mmdc base %d!\n", __func__, ret); goto put_device; } ret = imx6_pm_get_base(&pm_info->src_base, socdata->src_compat); if (ret) { pr_warn("%s: failed to get src base %d!\n", __func__, ret); goto src_map_failed; } ret = imx6_pm_get_base(&pm_info->iomuxc_base, socdata->iomuxc_compat); if (ret) { pr_warn("%s: failed to get iomuxc base %d!\n", __func__, ret); goto iomuxc_map_failed; } ret = imx6_pm_get_base(&pm_info->gpc_base, socdata->gpc_compat); if (ret) { pr_warn("%s: failed to get gpc base %d!\n", __func__, ret); goto gpc_map_failed; } if (socdata->pl310_compat) { ret = imx6_pm_get_base(&pm_info->l2_base, socdata->pl310_compat); if (ret) { pr_warn("%s: failed to get pl310-cache base %d!\n", __func__, ret); goto pl310_cache_map_failed; } } pm_info->ddr_type = imx_mmdc_get_ddr_type(); pm_info->mmdc_io_num = socdata->mmdc_io_num; mmdc_offset_array = socdata->mmdc_io_offset; for (i = 0; i < pm_info->mmdc_io_num; i++) { pm_info->mmdc_io_val[i][0] = mmdc_offset_array[i]; pm_info->mmdc_io_val[i][1] = readl_relaxed(pm_info->iomuxc_base.vbase + mmdc_offset_array[i]); } imx6_suspend_in_ocram_fn = fncpy( suspend_ocram_base + sizeof(*pm_info), &imx6_suspend, MX6Q_SUSPEND_OCRAM_SIZE - sizeof(*pm_info)); __arm_iomem_set_ro(suspend_ocram_base, MX6Q_SUSPEND_OCRAM_SIZE); goto put_device; pl310_cache_map_failed: iounmap(pm_info->gpc_base.vbase); gpc_map_failed: iounmap(pm_info->iomuxc_base.vbase); iomuxc_map_failed: iounmap(pm_info->src_base.vbase); src_map_failed: iounmap(pm_info->mmdc_base.vbase); put_device: put_device(&pdev->dev); put_node: of_node_put(node); return ret; } static void __init imx6_pm_common_init(const struct imx6_pm_socdata *socdata) { struct regmap *gpr; int ret; WARN_ON(!ccm_base); if (IS_ENABLED(CONFIG_SUSPEND)) { ret = imx6q_suspend_init(socdata); if (ret) pr_warn("%s: No DDR LPM support with suspend %d!\n", __func__, ret); } /* * This is for SW workaround step #1 of ERR007265, see comments * in imx6_set_lpm for details of this errata. * Force IOMUXC irq pending, so that the interrupt to GPC can be * used to deassert dsm_request signal when the signal gets * asserted unexpectedly. */ gpr = syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr"); if (!IS_ERR(gpr)) regmap_update_bits(gpr, IOMUXC_GPR1, IMX6Q_GPR1_GINT, IMX6Q_GPR1_GINT); } static void imx6_pm_stby_poweroff(void) { gic_cpu_if_down(0); imx6_set_lpm(STOP_POWER_OFF); imx6q_suspend_finish(0); mdelay(1000); pr_emerg("Unable to poweroff system\n"); } static int imx6_pm_stby_poweroff_probe(void) { if (pm_power_off) { pr_warn("%s: pm_power_off already claimed %p %ps!\n", __func__, pm_power_off, pm_power_off); return -EBUSY; } pm_power_off = imx6_pm_stby_poweroff; return 0; } void __init imx6_pm_ccm_init(const char *ccm_compat) { struct device_node *np; u32 val; np = of_find_compatible_node(NULL, NULL, ccm_compat); ccm_base = of_iomap(np, 0); BUG_ON(!ccm_base); /* * Initialize CCM_CLPCR_LPM into RUN mode to avoid ARM core * clock being shut down unexpectedly by WAIT mode. */ val = readl_relaxed(ccm_base + CLPCR); val &= ~BM_CLPCR_LPM; writel_relaxed(val, ccm_base + CLPCR); if (of_property_read_bool(np, "fsl,pmic-stby-poweroff")) imx6_pm_stby_poweroff_probe(); of_node_put(np); } void __init imx6q_pm_init(void) { imx6_pm_common_init(&imx6q_pm_data); } void __init imx6dl_pm_init(void) { imx6_pm_common_init(&imx6dl_pm_data); } void __init imx6sl_pm_init(void) { struct regmap *gpr; if (cpu_is_imx6sl()) { imx6_pm_common_init(&imx6sl_pm_data); } else { imx6_pm_common_init(&imx6sll_pm_data); gpr = syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr"); if (!IS_ERR(gpr)) regmap_update_bits(gpr, IOMUXC_GPR5, IMX6SLL_GPR5_AFCG_X_BYPASS_MASK, 0); } } void __init imx6sx_pm_init(void) { imx6_pm_common_init(&imx6sx_pm_data); } void __init imx6ul_pm_init(void) { imx6_pm_common_init(&imx6ul_pm_data); }
linux-master
arch/arm/mach-imx/pm-imx6.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright 2011 Freescale Semiconductor, Inc. All Rights Reserved. * Copyright 2011 Linaro Ltd. */ #include <asm/mach/arch.h> #include "common.h" #include "hardware.h" static void __init imx53_init_early(void) { mxc_set_cpu_type(MXC_CPU_MX53); } static void __init imx53_dt_init(void) { imx_src_init(); imx5_pmu_init(); imx_aips_allow_unprivileged_access("fsl,imx53-aipstz"); } static void __init imx53_init_late(void) { imx53_pm_init(); } static const char * const imx53_dt_board_compat[] __initconst = { "fsl,imx53", NULL }; DT_MACHINE_START(IMX53_DT, "Freescale i.MX53 (Device Tree Support)") .init_early = imx53_init_early, .init_machine = imx53_dt_init, .init_late = imx53_init_late, .dt_compat = imx53_dt_board_compat, MACHINE_END
linux-master
arch/arm/mach-imx/mach-imx53.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2013 Freescale Semiconductor, Inc. */ #include <linux/irqchip.h> #include <linux/of_platform.h> #include <linux/mfd/syscon.h> #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h> #include <linux/regmap.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include "common.h" #include "cpuidle.h" #include "hardware.h" static void __init imx6sl_fec_init(void) { struct regmap *gpr; /* set FEC clock from internal PLL clock source */ gpr = syscon_regmap_lookup_by_compatible("fsl,imx6sl-iomuxc-gpr"); if (!IS_ERR(gpr)) { regmap_update_bits(gpr, IOMUXC_GPR1, IMX6SL_GPR1_FEC_CLOCK_MUX2_SEL_MASK, 0); regmap_update_bits(gpr, IOMUXC_GPR1, IMX6SL_GPR1_FEC_CLOCK_MUX1_SEL_MASK, 0); } else { pr_err("failed to find fsl,imx6sl-iomux-gpr regmap\n"); } } static void __init imx6sl_init_late(void) { /* imx6sl reuses imx6q cpufreq driver */ if (IS_ENABLED(CONFIG_ARM_IMX6Q_CPUFREQ)) platform_device_register_simple("imx6q-cpufreq", -1, NULL, 0); if (IS_ENABLED(CONFIG_SOC_IMX6SL) && cpu_is_imx6sl()) imx6sl_cpuidle_init(); else if (IS_ENABLED(CONFIG_SOC_IMX6SLL)) imx6sx_cpuidle_init(); } static void __init imx6sl_init_machine(void) { of_platform_default_populate(NULL, NULL, NULL); if (cpu_is_imx6sl()) imx6sl_fec_init(); imx_anatop_init(); imx6sl_pm_init(); } static void __init imx6sl_init_irq(void) { imx_gpc_check_dt(); imx_init_revision_from_anatop(); imx_init_l2cache(); imx_src_init(); irqchip_init(); if (cpu_is_imx6sl()) imx6_pm_ccm_init("fsl,imx6sl-ccm"); else imx6_pm_ccm_init("fsl,imx6sll-ccm"); } static const char * const imx6sl_dt_compat[] __initconst = { "fsl,imx6sl", "fsl,imx6sll", NULL, }; DT_MACHINE_START(IMX6SL, "Freescale i.MX6 SoloLite (Device Tree)") .l2c_aux_val = 0, .l2c_aux_mask = ~0, .init_irq = imx6sl_init_irq, .init_machine = imx6sl_init_machine, .init_late = imx6sl_init_late, .dt_compat = imx6sl_dt_compat, MACHINE_END
linux-master
arch/arm/mach-imx/mach-imx6sl.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2016 NXP Semiconductors */ #include <linux/kernel.h> #include <linux/suspend.h> #include <linux/io.h> #include "common.h" static int imx25_suspend_enter(suspend_state_t state) { if (!IS_ENABLED(CONFIG_PM)) return 0; switch (state) { case PM_SUSPEND_MEM: cpu_do_idle(); break; default: return -EINVAL; } return 0; } static const struct platform_suspend_ops imx25_suspend_ops = { .enter = imx25_suspend_enter, .valid = suspend_valid_only_mem, }; void __init imx25_pm_init(void) { suspend_set_ops(&imx25_suspend_ops); }
linux-master
arch/arm/mach-imx/pm-imx25.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright 2011-2013 Freescale Semiconductor, Inc. * Copyright 2011 Linaro Ltd. */ #include <linux/clk.h> #include <linux/irqchip.h> #include <linux/of_platform.h> #include <linux/pci.h> #include <linux/phy.h> #include <linux/regmap.h> #include <linux/micrel_phy.h> #include <linux/mfd/syscon.h> #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include "common.h" #include "cpuidle.h" #include "hardware.h" /* For imx6q sabrelite board: set KSZ9021RN RGMII pad skew */ static int ksz9021rn_phy_fixup(struct phy_device *phydev) { if (IS_BUILTIN(CONFIG_PHYLIB)) { /* min rx data delay */ phy_write(phydev, MICREL_KSZ9021_EXTREG_CTRL, 0x8000 | MICREL_KSZ9021_RGMII_RX_DATA_PAD_SCEW); phy_write(phydev, MICREL_KSZ9021_EXTREG_DATA_WRITE, 0x0000); /* max rx/tx clock delay, min rx/tx control delay */ phy_write(phydev, MICREL_KSZ9021_EXTREG_CTRL, 0x8000 | MICREL_KSZ9021_RGMII_CLK_CTRL_PAD_SCEW); phy_write(phydev, MICREL_KSZ9021_EXTREG_DATA_WRITE, 0xf0f0); phy_write(phydev, MICREL_KSZ9021_EXTREG_CTRL, MICREL_KSZ9021_RGMII_CLK_CTRL_PAD_SCEW); } return 0; } /* * fixup for PLX PEX8909 bridge to configure GPIO1-7 as output High * as they are used for slots1-7 PERST# */ static void ventana_pciesw_early_fixup(struct pci_dev *dev) { u32 dw; if (!of_machine_is_compatible("gw,ventana")) return; if (dev->devfn != 0) return; pci_read_config_dword(dev, 0x62c, &dw); dw |= 0xaaa8; // GPIO1-7 outputs pci_write_config_dword(dev, 0x62c, dw); pci_read_config_dword(dev, 0x644, &dw); dw |= 0xfe; // GPIO1-7 output high pci_write_config_dword(dev, 0x644, dw); msleep(100); } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_PLX, 0x8609, ventana_pciesw_early_fixup); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_PLX, 0x8606, ventana_pciesw_early_fixup); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_PLX, 0x8604, ventana_pciesw_early_fixup); static void __init imx6q_enet_phy_init(void) { if (IS_BUILTIN(CONFIG_PHYLIB)) { phy_register_fixup_for_uid(PHY_ID_KSZ9021, MICREL_PHY_ID_MASK, ksz9021rn_phy_fixup); } } static void __init imx6q_1588_init(void) { struct device_node *np; struct clk *ptp_clk, *fec_enet_ref; struct clk *enet_ref; struct regmap *gpr; u32 clksel; np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-fec"); if (!np) { pr_warn("%s: failed to find fec node\n", __func__); return; } /* * If enet_clk_ref configured, we assume DT did it properly and . * clk-imx6q.c will do needed configuration. */ fec_enet_ref = of_clk_get_by_name(np, "enet_clk_ref"); if (!IS_ERR(fec_enet_ref)) goto put_node; ptp_clk = of_clk_get(np, 2); if (IS_ERR(ptp_clk)) { pr_warn("%s: failed to get ptp clock\n", __func__); goto put_node; } enet_ref = clk_get_sys(NULL, "enet_ref"); if (IS_ERR(enet_ref)) { pr_warn("%s: failed to get enet clock\n", __func__); goto put_ptp_clk; } /* * If enet_ref from ANATOP/CCM is the PTP clock source, we need to * set bit IOMUXC_GPR1[21]. Or the PTP clock must be from pad * (external OSC), and we need to clear the bit. */ clksel = clk_is_match(ptp_clk, enet_ref) ? IMX6Q_GPR1_ENET_CLK_SEL_ANATOP : IMX6Q_GPR1_ENET_CLK_SEL_PAD; gpr = syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr"); if (!IS_ERR(gpr)) regmap_update_bits(gpr, IOMUXC_GPR1, IMX6Q_GPR1_ENET_CLK_SEL_MASK, clksel); else pr_err("failed to find fsl,imx6q-iomuxc-gpr regmap\n"); clk_put(enet_ref); put_ptp_clk: clk_put(ptp_clk); put_node: of_node_put(np); } static void __init imx6q_axi_init(void) { struct regmap *gpr; unsigned int mask; gpr = syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr"); if (!IS_ERR(gpr)) { /* * Enable the cacheable attribute of VPU and IPU * AXI transactions. */ mask = IMX6Q_GPR4_VPU_WR_CACHE_SEL | IMX6Q_GPR4_VPU_RD_CACHE_SEL | IMX6Q_GPR4_VPU_P_WR_CACHE_VAL | IMX6Q_GPR4_VPU_P_RD_CACHE_VAL_MASK | IMX6Q_GPR4_IPU_WR_CACHE_CTL | IMX6Q_GPR4_IPU_RD_CACHE_CTL; regmap_update_bits(gpr, IOMUXC_GPR4, mask, mask); /* Increase IPU read QoS priority */ regmap_update_bits(gpr, IOMUXC_GPR6, IMX6Q_GPR6_IPU1_ID00_RD_QOS_MASK | IMX6Q_GPR6_IPU1_ID01_RD_QOS_MASK, (0xf << 16) | (0x7 << 20)); regmap_update_bits(gpr, IOMUXC_GPR7, IMX6Q_GPR7_IPU2_ID00_RD_QOS_MASK | IMX6Q_GPR7_IPU2_ID01_RD_QOS_MASK, (0xf << 16) | (0x7 << 20)); } else { pr_warn("failed to find fsl,imx6q-iomuxc-gpr regmap\n"); } } static void __init imx6q_init_machine(void) { if (cpu_is_imx6q() && imx_get_soc_revision() >= IMX_CHIP_REVISION_2_0) /* * SoCs that identify as i.MX6Q >= rev 2.0 are really i.MX6QP. * Quirk: i.MX6QP revision = i.MX6Q revision - (1, 0), * e.g. i.MX6QP rev 1.1 identifies as i.MX6Q rev 2.1. */ imx_print_silicon_rev("i.MX6QP", imx_get_soc_revision() - 0x10); else imx_print_silicon_rev(cpu_is_imx6dl() ? "i.MX6DL" : "i.MX6Q", imx_get_soc_revision()); imx6q_enet_phy_init(); of_platform_default_populate(NULL, NULL, NULL); imx_anatop_init(); cpu_is_imx6q() ? imx6q_pm_init() : imx6dl_pm_init(); imx6q_1588_init(); imx6q_axi_init(); } static void __init imx6q_init_late(void) { /* * WAIT mode is broken on imx6 Dual/Quad revision 1.0 and 1.1 so * there is no point to run cpuidle on them. * * It does work on imx6 Solo/DualLite starting from 1.1 */ if ((cpu_is_imx6q() && imx_get_soc_revision() > IMX_CHIP_REVISION_1_1) || (cpu_is_imx6dl() && imx_get_soc_revision() > IMX_CHIP_REVISION_1_0)) imx6q_cpuidle_init(); if (IS_ENABLED(CONFIG_ARM_IMX6Q_CPUFREQ)) platform_device_register_simple("imx6q-cpufreq", -1, NULL, 0); } static void __init imx6q_map_io(void) { debug_ll_io_init(); imx_scu_map_io(); } static void __init imx6q_init_irq(void) { imx_gpc_check_dt(); imx_init_revision_from_anatop(); imx_init_l2cache(); imx_src_init(); irqchip_init(); imx6_pm_ccm_init("fsl,imx6q-ccm"); } static const char * const imx6q_dt_compat[] __initconst = { "fsl,imx6dl", "fsl,imx6q", "fsl,imx6qp", NULL, }; DT_MACHINE_START(IMX6Q, "Freescale i.MX6 Quad/DualLite (Device Tree)") .l2c_aux_val = 0, .l2c_aux_mask = ~0, .smp = smp_ops(imx_smp_ops), .map_io = imx6q_map_io, .init_irq = imx6q_init_irq, .init_machine = imx6q_init_machine, .init_late = imx6q_init_late, .dt_compat = imx6q_dt_compat, MACHINE_END
linux-master
arch/arm/mach-imx/mach-imx6q.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright 2012-2013 Freescale Semiconductor, Inc. */ #include <linux/of_address.h> #include <linux/of_platform.h> #include <linux/io.h> #include <linux/irqchip.h> #include <asm/mach/arch.h> #include <asm/hardware/cache-l2x0.h> #include "common.h" #include "hardware.h" #define MSCM_CPxCOUNT 0x00c #define MSCM_CPxCFG1 0x014 static void __init vf610_detect_cpu(void) { struct device_node *np; u32 cpxcount, cpxcfg1; unsigned int cpu_type; void __iomem *mscm; np = of_find_compatible_node(NULL, NULL, "fsl,vf610-mscm-cpucfg"); if (WARN_ON(!np)) return; mscm = of_iomap(np, 0); of_node_put(np); if (WARN_ON(!mscm)) return; cpxcount = readl_relaxed(mscm + MSCM_CPxCOUNT); cpxcfg1 = readl_relaxed(mscm + MSCM_CPxCFG1); iounmap(mscm); cpu_type = cpxcount ? MXC_CPU_VF600 : MXC_CPU_VF500; if (cpxcfg1) cpu_type |= MXC_CPU_VFx10; mxc_set_cpu_type(cpu_type); } static void __init vf610_init_machine(void) { vf610_detect_cpu(); of_platform_default_populate(NULL, NULL, NULL); } static const char * const vf610_dt_compat[] __initconst = { "fsl,vf500", "fsl,vf510", "fsl,vf600", "fsl,vf610", "fsl,vf610m4", NULL, }; DT_MACHINE_START(VYBRID_VF610, "Freescale Vybrid VF5xx/VF6xx (Device Tree)") .l2c_aux_val = 0, .l2c_aux_mask = ~0, .init_machine = vf610_init_machine, .dt_compat = vf610_dt_compat, MACHINE_END
linux-master
arch/arm/mach-imx/mach-vf610.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 1999,2000 Arm Limited * Copyright (C) 2000 Deep Blue Solutions Ltd * Copyright (C) 2002 Shane Nay ([email protected]) * Copyright 2005-2007 Freescale Semiconductor, Inc. All Rights Reserved. * - add MX31 specific definitions */ #include <linux/mm.h> #include <linux/init.h> #include <linux/err.h> #include <linux/io.h> #include <linux/of_address.h> #include <asm/system_misc.h> #include <asm/hardware/cache-l2x0.h> #include <asm/mach/map.h> #include "common.h" #include "crmregs-imx3.h" #include "hardware.h" void __iomem *mx3_ccm_base; static void imx3_idle(void) { unsigned long reg = 0; __asm__ __volatile__( /* disable I and D cache */ "mrc p15, 0, %0, c1, c0, 0\n" "bic %0, %0, #0x00001000\n" "bic %0, %0, #0x00000004\n" "mcr p15, 0, %0, c1, c0, 0\n" /* invalidate I cache */ "mov %0, #0\n" "mcr p15, 0, %0, c7, c5, 0\n" /* clear and invalidate D cache */ "mov %0, #0\n" "mcr p15, 0, %0, c7, c14, 0\n" /* WFI */ "mov %0, #0\n" "mcr p15, 0, %0, c7, c0, 4\n" "nop\n" "nop\n" "nop\n" "nop\n" "nop\n" "nop\n" "nop\n" /* enable I and D cache */ "mrc p15, 0, %0, c1, c0, 0\n" "orr %0, %0, #0x00001000\n" "orr %0, %0, #0x00000004\n" "mcr p15, 0, %0, c1, c0, 0\n" : "=r" (reg)); } static void __iomem *imx3_ioremap_caller(phys_addr_t phys_addr, size_t size, unsigned int mtype, void *caller) { if (mtype == MT_DEVICE) { /* * Access all peripherals below 0x80000000 as nonshared device * on mx3, but leave l2cc alone. Otherwise cache corruptions * can occur. */ if (phys_addr < 0x80000000 && !addr_in_module(phys_addr, MX3x_L2CC)) mtype = MT_DEVICE_NONSHARED; } return __arm_ioremap_caller(phys_addr, size, mtype, caller); } #ifdef CONFIG_SOC_IMX31 static struct map_desc mx31_io_desc[] __initdata = { imx_map_entry(MX31, X_MEMC, MT_DEVICE), imx_map_entry(MX31, AVIC, MT_DEVICE_NONSHARED), imx_map_entry(MX31, AIPS1, MT_DEVICE_NONSHARED), imx_map_entry(MX31, AIPS2, MT_DEVICE_NONSHARED), imx_map_entry(MX31, SPBA0, MT_DEVICE_NONSHARED), }; /* * This function initializes the memory map. It is called during the * system startup to create static physical to virtual memory mappings * for the IO modules. */ void __init mx31_map_io(void) { iotable_init(mx31_io_desc, ARRAY_SIZE(mx31_io_desc)); } static void imx31_idle(void) { int reg = imx_readl(mx3_ccm_base + MXC_CCM_CCMR); reg &= ~MXC_CCM_CCMR_LPM_MASK; imx_writel(reg, mx3_ccm_base + MXC_CCM_CCMR); imx3_idle(); } void __init imx31_init_early(void) { struct device_node *np; mxc_set_cpu_type(MXC_CPU_MX31); arch_ioremap_caller = imx3_ioremap_caller; arm_pm_idle = imx31_idle; np = of_find_compatible_node(NULL, NULL, "fsl,imx31-ccm"); mx3_ccm_base = of_iomap(np, 0); BUG_ON(!mx3_ccm_base); } #endif /* ifdef CONFIG_SOC_IMX31 */ #ifdef CONFIG_SOC_IMX35 static struct map_desc mx35_io_desc[] __initdata = { imx_map_entry(MX35, X_MEMC, MT_DEVICE), imx_map_entry(MX35, AVIC, MT_DEVICE_NONSHARED), imx_map_entry(MX35, AIPS1, MT_DEVICE_NONSHARED), imx_map_entry(MX35, AIPS2, MT_DEVICE_NONSHARED), imx_map_entry(MX35, SPBA0, MT_DEVICE_NONSHARED), }; void __init mx35_map_io(void) { iotable_init(mx35_io_desc, ARRAY_SIZE(mx35_io_desc)); } static void imx35_idle(void) { int reg = imx_readl(mx3_ccm_base + MXC_CCM_CCMR); reg &= ~MXC_CCM_CCMR_LPM_MASK; reg |= MXC_CCM_CCMR_LPM_WAIT_MX35; imx_writel(reg, mx3_ccm_base + MXC_CCM_CCMR); imx3_idle(); } void __init imx35_init_early(void) { struct device_node *np; mxc_set_cpu_type(MXC_CPU_MX35); arm_pm_idle = imx35_idle; arch_ioremap_caller = imx3_ioremap_caller; np = of_find_compatible_node(NULL, NULL, "fsl,imx35-ccm"); mx3_ccm_base = of_iomap(np, 0); BUG_ON(!mx3_ccm_base); } #endif /* ifdef CONFIG_SOC_IMX35 */
linux-master
arch/arm/mach-imx/mm-imx3.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright 2017 NXP * Copyright 2011,2016 Freescale Semiconductor, Inc. * Copyright 2011 Linaro Ltd. */ #include <linux/clk.h> #include <linux/hrtimer.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_device.h> #include <linux/perf_event.h> #include <linux/slab.h> #include "common.h" #define MMDC_MAPSR 0x404 #define BP_MMDC_MAPSR_PSD 0 #define BP_MMDC_MAPSR_PSS 4 #define MMDC_MDMISC 0x18 #define BM_MMDC_MDMISC_DDR_TYPE 0x18 #define BP_MMDC_MDMISC_DDR_TYPE 0x3 #define TOTAL_CYCLES 0x0 #define BUSY_CYCLES 0x1 #define READ_ACCESSES 0x2 #define WRITE_ACCESSES 0x3 #define READ_BYTES 0x4 #define WRITE_BYTES 0x5 /* Enables, resets, freezes, overflow profiling*/ #define DBG_DIS 0x0 #define DBG_EN 0x1 #define DBG_RST 0x2 #define PRF_FRZ 0x4 #define CYC_OVF 0x8 #define PROFILE_SEL 0x10 #define MMDC_MADPCR0 0x410 #define MMDC_MADPCR1 0x414 #define MMDC_MADPSR0 0x418 #define MMDC_MADPSR1 0x41C #define MMDC_MADPSR2 0x420 #define MMDC_MADPSR3 0x424 #define MMDC_MADPSR4 0x428 #define MMDC_MADPSR5 0x42C #define MMDC_NUM_COUNTERS 6 #define MMDC_FLAG_PROFILE_SEL 0x1 #define MMDC_PRF_AXI_ID_CLEAR 0x0 #define to_mmdc_pmu(p) container_of(p, struct mmdc_pmu, pmu) static int ddr_type; struct fsl_mmdc_devtype_data { unsigned int flags; }; static const struct fsl_mmdc_devtype_data imx6q_data = { }; static const struct fsl_mmdc_devtype_data imx6qp_data = { .flags = MMDC_FLAG_PROFILE_SEL, }; static const struct of_device_id imx_mmdc_dt_ids[] = { { .compatible = "fsl,imx6q-mmdc", .data = (void *)&imx6q_data}, { .compatible = "fsl,imx6qp-mmdc", .data = (void *)&imx6qp_data}, { /* sentinel */ } }; #ifdef CONFIG_PERF_EVENTS static enum cpuhp_state cpuhp_mmdc_state; static DEFINE_IDA(mmdc_ida); PMU_EVENT_ATTR_STRING(total-cycles, mmdc_pmu_total_cycles, "event=0x00") PMU_EVENT_ATTR_STRING(busy-cycles, mmdc_pmu_busy_cycles, "event=0x01") PMU_EVENT_ATTR_STRING(read-accesses, mmdc_pmu_read_accesses, "event=0x02") PMU_EVENT_ATTR_STRING(write-accesses, mmdc_pmu_write_accesses, "event=0x03") PMU_EVENT_ATTR_STRING(read-bytes, mmdc_pmu_read_bytes, "event=0x04") PMU_EVENT_ATTR_STRING(read-bytes.unit, mmdc_pmu_read_bytes_unit, "MB"); PMU_EVENT_ATTR_STRING(read-bytes.scale, mmdc_pmu_read_bytes_scale, "0.000001"); PMU_EVENT_ATTR_STRING(write-bytes, mmdc_pmu_write_bytes, "event=0x05") PMU_EVENT_ATTR_STRING(write-bytes.unit, mmdc_pmu_write_bytes_unit, "MB"); PMU_EVENT_ATTR_STRING(write-bytes.scale, mmdc_pmu_write_bytes_scale, "0.000001"); struct mmdc_pmu { struct pmu pmu; void __iomem *mmdc_base; cpumask_t cpu; struct hrtimer hrtimer; unsigned int active_events; int id; struct device *dev; struct perf_event *mmdc_events[MMDC_NUM_COUNTERS]; struct hlist_node node; struct fsl_mmdc_devtype_data *devtype_data; struct clk *mmdc_ipg_clk; }; /* * Polling period is set to one second, overflow of total-cycles (the fastest * increasing counter) takes ten seconds so one second is safe */ static unsigned int mmdc_pmu_poll_period_us = 1000000; module_param_named(pmu_pmu_poll_period_us, mmdc_pmu_poll_period_us, uint, S_IRUGO | S_IWUSR); static ktime_t mmdc_pmu_timer_period(void) { return ns_to_ktime((u64)mmdc_pmu_poll_period_us * 1000); } static ssize_t mmdc_pmu_cpumask_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mmdc_pmu *pmu_mmdc = dev_get_drvdata(dev); return cpumap_print_to_pagebuf(true, buf, &pmu_mmdc->cpu); } static struct device_attribute mmdc_pmu_cpumask_attr = __ATTR(cpumask, S_IRUGO, mmdc_pmu_cpumask_show, NULL); static struct attribute *mmdc_pmu_cpumask_attrs[] = { &mmdc_pmu_cpumask_attr.attr, NULL, }; static struct attribute_group mmdc_pmu_cpumask_attr_group = { .attrs = mmdc_pmu_cpumask_attrs, }; static struct attribute *mmdc_pmu_events_attrs[] = { &mmdc_pmu_total_cycles.attr.attr, &mmdc_pmu_busy_cycles.attr.attr, &mmdc_pmu_read_accesses.attr.attr, &mmdc_pmu_write_accesses.attr.attr, &mmdc_pmu_read_bytes.attr.attr, &mmdc_pmu_read_bytes_unit.attr.attr, &mmdc_pmu_read_bytes_scale.attr.attr, &mmdc_pmu_write_bytes.attr.attr, &mmdc_pmu_write_bytes_unit.attr.attr, &mmdc_pmu_write_bytes_scale.attr.attr, NULL, }; static struct attribute_group mmdc_pmu_events_attr_group = { .name = "events", .attrs = mmdc_pmu_events_attrs, }; PMU_FORMAT_ATTR(event, "config:0-63"); PMU_FORMAT_ATTR(axi_id, "config1:0-63"); static struct attribute *mmdc_pmu_format_attrs[] = { &format_attr_event.attr, &format_attr_axi_id.attr, NULL, }; static struct attribute_group mmdc_pmu_format_attr_group = { .name = "format", .attrs = mmdc_pmu_format_attrs, }; static const struct attribute_group *attr_groups[] = { &mmdc_pmu_events_attr_group, &mmdc_pmu_format_attr_group, &mmdc_pmu_cpumask_attr_group, NULL, }; static u32 mmdc_pmu_read_counter(struct mmdc_pmu *pmu_mmdc, int cfg) { void __iomem *mmdc_base, *reg; mmdc_base = pmu_mmdc->mmdc_base; switch (cfg) { case TOTAL_CYCLES: reg = mmdc_base + MMDC_MADPSR0; break; case BUSY_CYCLES: reg = mmdc_base + MMDC_MADPSR1; break; case READ_ACCESSES: reg = mmdc_base + MMDC_MADPSR2; break; case WRITE_ACCESSES: reg = mmdc_base + MMDC_MADPSR3; break; case READ_BYTES: reg = mmdc_base + MMDC_MADPSR4; break; case WRITE_BYTES: reg = mmdc_base + MMDC_MADPSR5; break; default: return WARN_ONCE(1, "invalid configuration %d for mmdc counter", cfg); } return readl(reg); } static int mmdc_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node) { struct mmdc_pmu *pmu_mmdc = hlist_entry_safe(node, struct mmdc_pmu, node); int target; if (!cpumask_test_and_clear_cpu(cpu, &pmu_mmdc->cpu)) return 0; target = cpumask_any_but(cpu_online_mask, cpu); if (target >= nr_cpu_ids) return 0; perf_pmu_migrate_context(&pmu_mmdc->pmu, cpu, target); cpumask_set_cpu(target, &pmu_mmdc->cpu); return 0; } static bool mmdc_pmu_group_event_is_valid(struct perf_event *event, struct pmu *pmu, unsigned long *used_counters) { int cfg = event->attr.config; if (is_software_event(event)) return true; if (event->pmu != pmu) return false; return !test_and_set_bit(cfg, used_counters); } /* * Each event has a single fixed-purpose counter, so we can only have a * single active event for each at any point in time. Here we just check * for duplicates, and rely on mmdc_pmu_event_init to verify that the HW * event numbers are valid. */ static bool mmdc_pmu_group_is_valid(struct perf_event *event) { struct pmu *pmu = event->pmu; struct perf_event *leader = event->group_leader; struct perf_event *sibling; unsigned long counter_mask = 0; set_bit(leader->attr.config, &counter_mask); if (event != leader) { if (!mmdc_pmu_group_event_is_valid(event, pmu, &counter_mask)) return false; } for_each_sibling_event(sibling, leader) { if (!mmdc_pmu_group_event_is_valid(sibling, pmu, &counter_mask)) return false; } return true; } static int mmdc_pmu_event_init(struct perf_event *event) { struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu); int cfg = event->attr.config; if (event->attr.type != event->pmu->type) return -ENOENT; if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) return -EOPNOTSUPP; if (event->cpu < 0) { dev_warn(pmu_mmdc->dev, "Can't provide per-task data!\n"); return -EOPNOTSUPP; } if (event->attr.sample_period) return -EINVAL; if (cfg < 0 || cfg >= MMDC_NUM_COUNTERS) return -EINVAL; if (!mmdc_pmu_group_is_valid(event)) return -EINVAL; event->cpu = cpumask_first(&pmu_mmdc->cpu); return 0; } static void mmdc_pmu_event_update(struct perf_event *event) { struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu); struct hw_perf_event *hwc = &event->hw; u64 delta, prev_raw_count, new_raw_count; do { prev_raw_count = local64_read(&hwc->prev_count); new_raw_count = mmdc_pmu_read_counter(pmu_mmdc, event->attr.config); } while (local64_cmpxchg(&hwc->prev_count, prev_raw_count, new_raw_count) != prev_raw_count); delta = (new_raw_count - prev_raw_count) & 0xFFFFFFFF; local64_add(delta, &event->count); } static void mmdc_pmu_event_start(struct perf_event *event, int flags) { struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu); struct hw_perf_event *hwc = &event->hw; void __iomem *mmdc_base, *reg; u32 val; mmdc_base = pmu_mmdc->mmdc_base; reg = mmdc_base + MMDC_MADPCR0; /* * hrtimer is required because mmdc does not provide an interrupt so * polling is necessary */ hrtimer_start(&pmu_mmdc->hrtimer, mmdc_pmu_timer_period(), HRTIMER_MODE_REL_PINNED); local64_set(&hwc->prev_count, 0); writel(DBG_RST, reg); /* * Write the AXI id parameter to MADPCR1. */ val = event->attr.config1; reg = mmdc_base + MMDC_MADPCR1; writel(val, reg); reg = mmdc_base + MMDC_MADPCR0; val = DBG_EN; if (pmu_mmdc->devtype_data->flags & MMDC_FLAG_PROFILE_SEL) val |= PROFILE_SEL; writel(val, reg); } static int mmdc_pmu_event_add(struct perf_event *event, int flags) { struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu); struct hw_perf_event *hwc = &event->hw; int cfg = event->attr.config; if (flags & PERF_EF_START) mmdc_pmu_event_start(event, flags); if (pmu_mmdc->mmdc_events[cfg] != NULL) return -EAGAIN; pmu_mmdc->mmdc_events[cfg] = event; pmu_mmdc->active_events++; local64_set(&hwc->prev_count, mmdc_pmu_read_counter(pmu_mmdc, cfg)); return 0; } static void mmdc_pmu_event_stop(struct perf_event *event, int flags) { struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu); void __iomem *mmdc_base, *reg; mmdc_base = pmu_mmdc->mmdc_base; reg = mmdc_base + MMDC_MADPCR0; writel(PRF_FRZ, reg); reg = mmdc_base + MMDC_MADPCR1; writel(MMDC_PRF_AXI_ID_CLEAR, reg); mmdc_pmu_event_update(event); } static void mmdc_pmu_event_del(struct perf_event *event, int flags) { struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu); int cfg = event->attr.config; pmu_mmdc->mmdc_events[cfg] = NULL; pmu_mmdc->active_events--; if (pmu_mmdc->active_events == 0) hrtimer_cancel(&pmu_mmdc->hrtimer); mmdc_pmu_event_stop(event, PERF_EF_UPDATE); } static void mmdc_pmu_overflow_handler(struct mmdc_pmu *pmu_mmdc) { int i; for (i = 0; i < MMDC_NUM_COUNTERS; i++) { struct perf_event *event = pmu_mmdc->mmdc_events[i]; if (event) mmdc_pmu_event_update(event); } } static enum hrtimer_restart mmdc_pmu_timer_handler(struct hrtimer *hrtimer) { struct mmdc_pmu *pmu_mmdc = container_of(hrtimer, struct mmdc_pmu, hrtimer); mmdc_pmu_overflow_handler(pmu_mmdc); hrtimer_forward_now(hrtimer, mmdc_pmu_timer_period()); return HRTIMER_RESTART; } static int mmdc_pmu_init(struct mmdc_pmu *pmu_mmdc, void __iomem *mmdc_base, struct device *dev) { *pmu_mmdc = (struct mmdc_pmu) { .pmu = (struct pmu) { .task_ctx_nr = perf_invalid_context, .attr_groups = attr_groups, .event_init = mmdc_pmu_event_init, .add = mmdc_pmu_event_add, .del = mmdc_pmu_event_del, .start = mmdc_pmu_event_start, .stop = mmdc_pmu_event_stop, .read = mmdc_pmu_event_update, .capabilities = PERF_PMU_CAP_NO_EXCLUDE, }, .mmdc_base = mmdc_base, .dev = dev, .active_events = 0, }; pmu_mmdc->id = ida_simple_get(&mmdc_ida, 0, 0, GFP_KERNEL); return pmu_mmdc->id; } static void imx_mmdc_remove(struct platform_device *pdev) { struct mmdc_pmu *pmu_mmdc = platform_get_drvdata(pdev); ida_simple_remove(&mmdc_ida, pmu_mmdc->id); cpuhp_state_remove_instance_nocalls(cpuhp_mmdc_state, &pmu_mmdc->node); perf_pmu_unregister(&pmu_mmdc->pmu); iounmap(pmu_mmdc->mmdc_base); clk_disable_unprepare(pmu_mmdc->mmdc_ipg_clk); kfree(pmu_mmdc); } static int imx_mmdc_perf_init(struct platform_device *pdev, void __iomem *mmdc_base, struct clk *mmdc_ipg_clk) { struct mmdc_pmu *pmu_mmdc; char *name; int ret; const struct of_device_id *of_id = of_match_device(imx_mmdc_dt_ids, &pdev->dev); pmu_mmdc = kzalloc(sizeof(*pmu_mmdc), GFP_KERNEL); if (!pmu_mmdc) { pr_err("failed to allocate PMU device!\n"); return -ENOMEM; } /* The first instance registers the hotplug state */ if (!cpuhp_mmdc_state) { ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "perf/arm/mmdc:online", NULL, mmdc_pmu_offline_cpu); if (ret < 0) { pr_err("cpuhp_setup_state_multi failed\n"); goto pmu_free; } cpuhp_mmdc_state = ret; } ret = mmdc_pmu_init(pmu_mmdc, mmdc_base, &pdev->dev); if (ret < 0) goto pmu_free; name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "mmdc%d", ret); pmu_mmdc->mmdc_ipg_clk = mmdc_ipg_clk; pmu_mmdc->devtype_data = (struct fsl_mmdc_devtype_data *)of_id->data; hrtimer_init(&pmu_mmdc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); pmu_mmdc->hrtimer.function = mmdc_pmu_timer_handler; cpumask_set_cpu(raw_smp_processor_id(), &pmu_mmdc->cpu); /* Register the pmu instance for cpu hotplug */ cpuhp_state_add_instance_nocalls(cpuhp_mmdc_state, &pmu_mmdc->node); ret = perf_pmu_register(&(pmu_mmdc->pmu), name, -1); if (ret) goto pmu_register_err; platform_set_drvdata(pdev, pmu_mmdc); return 0; pmu_register_err: pr_warn("MMDC Perf PMU failed (%d), disabled\n", ret); ida_simple_remove(&mmdc_ida, pmu_mmdc->id); cpuhp_state_remove_instance_nocalls(cpuhp_mmdc_state, &pmu_mmdc->node); hrtimer_cancel(&pmu_mmdc->hrtimer); pmu_free: kfree(pmu_mmdc); return ret; } #else #define imx_mmdc_remove NULL #define imx_mmdc_perf_init(pdev, mmdc_base, mmdc_ipg_clk) 0 #endif static int imx_mmdc_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; void __iomem *mmdc_base, *reg; struct clk *mmdc_ipg_clk; u32 val; int err; /* the ipg clock is optional */ mmdc_ipg_clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(mmdc_ipg_clk)) mmdc_ipg_clk = NULL; err = clk_prepare_enable(mmdc_ipg_clk); if (err) { dev_err(&pdev->dev, "Unable to enable mmdc ipg clock.\n"); return err; } mmdc_base = of_iomap(np, 0); WARN_ON(!mmdc_base); reg = mmdc_base + MMDC_MDMISC; /* Get ddr type */ val = readl_relaxed(reg); ddr_type = (val & BM_MMDC_MDMISC_DDR_TYPE) >> BP_MMDC_MDMISC_DDR_TYPE; reg = mmdc_base + MMDC_MAPSR; /* Enable automatic power saving */ val = readl_relaxed(reg); val &= ~(1 << BP_MMDC_MAPSR_PSD); writel_relaxed(val, reg); err = imx_mmdc_perf_init(pdev, mmdc_base, mmdc_ipg_clk); if (err) { iounmap(mmdc_base); clk_disable_unprepare(mmdc_ipg_clk); } return err; } int imx_mmdc_get_ddr_type(void) { return ddr_type; } static struct platform_driver imx_mmdc_driver = { .driver = { .name = "imx-mmdc", .of_match_table = imx_mmdc_dt_ids, }, .probe = imx_mmdc_probe, .remove_new = imx_mmdc_remove, }; static int __init imx_mmdc_init(void) { return platform_driver_register(&imx_mmdc_driver); } postcore_initcall(imx_mmdc_init);
linux-master
arch/arm/mach-imx/mmdc.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2012 Freescale Semiconductor, Inc. */ #include <linux/context_tracking.h> #include <linux/cpuidle.h> #include <linux/module.h> #include <asm/cpuidle.h> #include <soc/imx/cpuidle.h> #include "common.h" #include "cpuidle.h" #include "hardware.h" static int num_idle_cpus = 0; static DEFINE_RAW_SPINLOCK(cpuidle_lock); static __cpuidle int imx6q_enter_wait(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { raw_spin_lock(&cpuidle_lock); if (++num_idle_cpus == num_online_cpus()) imx6_set_lpm(WAIT_UNCLOCKED); raw_spin_unlock(&cpuidle_lock); ct_cpuidle_enter(); cpu_do_idle(); ct_cpuidle_exit(); raw_spin_lock(&cpuidle_lock); if (num_idle_cpus-- == num_online_cpus()) imx6_set_lpm(WAIT_CLOCKED); raw_spin_unlock(&cpuidle_lock); return index; } static struct cpuidle_driver imx6q_cpuidle_driver = { .name = "imx6q_cpuidle", .owner = THIS_MODULE, .states = { /* WFI */ ARM_CPUIDLE_WFI_STATE, /* WAIT */ { .exit_latency = 50, .target_residency = 75, .flags = CPUIDLE_FLAG_TIMER_STOP | CPUIDLE_FLAG_RCU_IDLE, .enter = imx6q_enter_wait, .name = "WAIT", .desc = "Clock off", }, }, .state_count = 2, .safe_state_index = 0, }; /* * i.MX6 Q/DL has an erratum (ERR006687) that prevents the FEC from waking the * CPUs when they are in wait(unclocked) state. As the hardware workaround isn't * applicable to all boards, disable the deeper idle state when the workaround * isn't present and the FEC is in use. */ void imx6q_cpuidle_fec_irqs_used(void) { cpuidle_driver_state_disabled(&imx6q_cpuidle_driver, 1, true); } EXPORT_SYMBOL_GPL(imx6q_cpuidle_fec_irqs_used); void imx6q_cpuidle_fec_irqs_unused(void) { cpuidle_driver_state_disabled(&imx6q_cpuidle_driver, 1, false); } EXPORT_SYMBOL_GPL(imx6q_cpuidle_fec_irqs_unused); int __init imx6q_cpuidle_init(void) { /* Set INT_MEM_CLK_LPM bit to get a reliable WAIT mode support */ imx6_set_int_mem_clk_lpm(true); return cpuidle_register(&imx6q_cpuidle_driver, NULL); }
linux-master
arch/arm/mach-imx/cpuidle-imx6q.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright 2012 Sascha Hauer, Pengutronix */ #include <linux/init.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include "common.h" #include "hardware.h" #include "mx27.h" /* MX27 memory map definition */ static struct map_desc imx27_io_desc[] __initdata = { /* * this fixed mapping covers: * - AIPI1 * - AIPI2 * - AITC * - ROM Patch * - and some reserved space */ imx_map_entry(MX27, AIPI, MT_DEVICE), /* * this fixed mapping covers: * - CSI * - ATA */ imx_map_entry(MX27, SAHB1, MT_DEVICE), /* * this fixed mapping covers: * - EMI */ imx_map_entry(MX27, X_MEMC, MT_DEVICE), }; /* * Initialize the memory map. It is called during the * system startup to create static physical to virtual * memory map for the IO modules. */ static void __init mx27_map_io(void) { iotable_init(imx27_io_desc, ARRAY_SIZE(imx27_io_desc)); } static void __init imx27_init_early(void) { mxc_set_cpu_type(MXC_CPU_MX27); } static const char * const imx27_dt_board_compat[] __initconst = { "fsl,imx27", NULL }; DT_MACHINE_START(IMX27_DT, "Freescale i.MX27 (Device Tree Support)") .map_io = mx27_map_io, .init_early = imx27_init_early, .init_late = imx27_pm_init, .dt_compat = imx27_dt_board_compat, MACHINE_END
linux-master
arch/arm/mach-imx/mach-imx27.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright 2007 Freescale Semiconductor, Inc. All Rights Reserved. * Copyright 2008 Juergen Beisert, [email protected] */ /* * i.MX27 specific CPU detection code */ #include <linux/io.h> #include <linux/of_address.h> #include <linux/module.h> #include "hardware.h" static int mx27_cpu_rev = -1; static int mx27_cpu_partnumber; #define SYS_CHIP_ID 0x00 /* The offset of CHIP ID register */ #define SYSCTRL_OFFSET 0x800 /* Offset from CCM base address */ static int mx27_read_cpu_rev(void) { void __iomem *ccm_base; struct device_node *np; u32 val; np = of_find_compatible_node(NULL, NULL, "fsl,imx27-ccm"); ccm_base = of_iomap(np, 0); of_node_put(np); BUG_ON(!ccm_base); /* * now we have access to the IO registers. As we need * the silicon revision very early we read it here to * avoid any further hooks */ val = imx_readl(ccm_base + SYSCTRL_OFFSET + SYS_CHIP_ID); mx27_cpu_partnumber = (int)((val >> 12) & 0xFFFF); switch (val >> 28) { case 0: return IMX_CHIP_REVISION_1_0; case 1: return IMX_CHIP_REVISION_2_0; case 2: return IMX_CHIP_REVISION_2_1; default: return IMX_CHIP_REVISION_UNKNOWN; } } /* * Returns: * the silicon revision of the cpu * -EINVAL - not a mx27 */ int mx27_revision(void) { if (mx27_cpu_rev == -1) mx27_cpu_rev = mx27_read_cpu_rev(); if (mx27_cpu_partnumber != 0x8821) return -EINVAL; return mx27_cpu_rev; } EXPORT_SYMBOL(mx27_revision);
linux-master
arch/arm/mach-imx/cpu-imx27.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2012 Freescale Semiconductor, Inc. */ #include <linux/cpuidle.h> #include <linux/module.h> #include <asm/system_misc.h> #include "cpuidle.h" static __cpuidle int imx5_cpuidle_enter(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { arm_pm_idle(); return index; } static struct cpuidle_driver imx5_cpuidle_driver = { .name = "imx5_cpuidle", .owner = THIS_MODULE, .states[0] = { .enter = imx5_cpuidle_enter, .exit_latency = 2, .target_residency = 1, .name = "IMX5 SRPG", .desc = "CPU state retained,powered off", }, .state_count = 1, }; int __init imx5_cpuidle_init(void) { return cpuidle_register(&imx5_cpuidle_driver, NULL); }
linux-master
arch/arm/mach-imx/cpuidle-imx5.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2014 Freescale Semiconductor, Inc. */ #include <linux/cpuidle.h> #include <linux/cpu_pm.h> #include <linux/module.h> #include <asm/cacheflush.h> #include <asm/cpuidle.h> #include <asm/suspend.h> #include "common.h" #include "cpuidle.h" #include "hardware.h" static int imx6sx_idle_finish(unsigned long val) { /* * for Cortex-A7 which has an internal L2 * cache, need to flush it before powering * down ARM platform, since flushing L1 cache * here again has very small overhead, compared * to adding conditional code for L2 cache type, * just call flush_cache_all() is fine. */ flush_cache_all(); cpu_do_idle(); return 0; } static __cpuidle int imx6sx_enter_wait(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { imx6_set_lpm(WAIT_UNCLOCKED); switch (index) { case 1: cpu_do_idle(); break; case 2: imx6_enable_rbc(true); imx_gpc_set_arm_power_in_lpm(true); imx_set_cpu_jump(0, v7_cpu_resume); /* Need to notify there is a cpu pm operation. */ cpu_pm_enter(); cpu_cluster_pm_enter(); ct_cpuidle_enter(); cpu_suspend(0, imx6sx_idle_finish); ct_cpuidle_exit(); cpu_cluster_pm_exit(); cpu_pm_exit(); imx_gpc_set_arm_power_in_lpm(false); imx6_enable_rbc(false); break; default: break; } imx6_set_lpm(WAIT_CLOCKED); return index; } static struct cpuidle_driver imx6sx_cpuidle_driver = { .name = "imx6sx_cpuidle", .owner = THIS_MODULE, .states = { /* WFI */ ARM_CPUIDLE_WFI_STATE, /* WAIT */ { .exit_latency = 50, .target_residency = 75, .flags = CPUIDLE_FLAG_TIMER_STOP, .enter = imx6sx_enter_wait, .name = "WAIT", .desc = "Clock off", }, /* WAIT + ARM power off */ { /* * ARM gating 31us * 5 + RBC clear 65us * and some margin for SW execution, here set it * to 300us. */ .exit_latency = 300, .target_residency = 500, .flags = CPUIDLE_FLAG_TIMER_STOP | CPUIDLE_FLAG_RCU_IDLE, .enter = imx6sx_enter_wait, .name = "LOW-POWER-IDLE", .desc = "ARM power off", }, }, .state_count = 3, .safe_state_index = 0, }; int __init imx6sx_cpuidle_init(void) { imx6_set_int_mem_clk_lpm(true); imx6_enable_rbc(false); imx_gpc_set_l2_mem_power_in_lpm(false); /* * set ARM power up/down timing to the fastest, * sw2iso and sw can be set to one 32K cycle = 31us * except for power up sw2iso which need to be * larger than LDO ramp up time. */ imx_gpc_set_arm_power_up_timing(cpu_is_imx6sx() ? 0xf : 0x2, 1); imx_gpc_set_arm_power_down_timing(1, 1); return cpuidle_register(&imx6sx_cpuidle_driver, NULL); }
linux-master
arch/arm/mach-imx/cpuidle-imx6sx.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright 2013-2014 Freescale Semiconductor, Inc. */ #include <asm/mach/arch.h> #include "common.h" static const char * const ls1021a_dt_compat[] __initconst = { "fsl,ls1021a", NULL, }; DT_MACHINE_START(LS1021A, "Freescale LS1021A") .smp = smp_ops(ls1021a_smp_ops), .dt_compat = ls1021a_dt_compat, MACHINE_END
linux-master
arch/arm/mach-imx/mach-ls1021a.c
// SPDX-License-Identifier: GPL-2.0+ /* * Copyright (C) 2016 Freescale Semiconductor, Inc. * Copyright 2017-2018 NXP * Anson Huang <[email protected]> */ #include <linux/cpuidle.h> #include <linux/module.h> #include <asm/cpuidle.h> #include "common.h" #include "cpuidle.h" static __cpuidle int imx7ulp_enter_wait(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { if (index == 1) imx7ulp_set_lpm(ULP_PM_WAIT); else imx7ulp_set_lpm(ULP_PM_STOP); cpu_do_idle(); imx7ulp_set_lpm(ULP_PM_RUN); return index; } static struct cpuidle_driver imx7ulp_cpuidle_driver = { .name = "imx7ulp_cpuidle", .owner = THIS_MODULE, .states = { /* WFI */ ARM_CPUIDLE_WFI_STATE, /* WAIT */ { .exit_latency = 50, .target_residency = 75, .enter = imx7ulp_enter_wait, .name = "WAIT", .desc = "PSTOP2", }, /* STOP */ { .exit_latency = 100, .target_residency = 150, .enter = imx7ulp_enter_wait, .name = "STOP", .desc = "PSTOP1", }, }, .state_count = 3, .safe_state_index = 0, }; int __init imx7ulp_cpuidle_init(void) { return cpuidle_register(&imx7ulp_cpuidle_driver, NULL); }
linux-master
arch/arm/mach-imx/cpuidle-imx7ulp.c
// SPDX-License-Identifier: GPL-2.0-only /* * Exported ksyms for the SSI FIQ handler * * Copyright (C) 2009, Sascha Hauer <[email protected]> */ #include <linux/module.h> #include <linux/platform_data/asoc-imx-ssi.h> EXPORT_SYMBOL(imx_ssi_fiq_tx_buffer); EXPORT_SYMBOL(imx_ssi_fiq_rx_buffer); EXPORT_SYMBOL(imx_ssi_fiq_start); EXPORT_SYMBOL(imx_ssi_fiq_end); EXPORT_SYMBOL(imx_ssi_fiq_base);
linux-master
arch/arm/mach-imx/ssi-fiq-ksym.c
// SPDX-License-Identifier: GPL-2.0+ /* * Copyright (C) 2016 Freescale Semiconductor, Inc. * Copyright 2017-2018 NXP * Author: Dong Aisheng <[email protected]> */ #include <linux/io.h> #include <linux/of.h> #include <linux/of_address.h> #include "common.h" #define SMC_PMCTRL 0x10 #define BP_PMCTRL_PSTOPO 16 #define PSTOPO_PSTOP3 0x3 #define PSTOPO_PSTOP2 0x2 #define PSTOPO_PSTOP1 0x1 #define BP_PMCTRL_RUNM 8 #define RUNM_RUN 0 #define BP_PMCTRL_STOPM 0 #define STOPM_STOP 0 #define BM_PMCTRL_PSTOPO (3 << BP_PMCTRL_PSTOPO) #define BM_PMCTRL_RUNM (3 << BP_PMCTRL_RUNM) #define BM_PMCTRL_STOPM (7 << BP_PMCTRL_STOPM) static void __iomem *smc1_base; int imx7ulp_set_lpm(enum ulp_cpu_pwr_mode mode) { u32 val = readl_relaxed(smc1_base + SMC_PMCTRL); /* clear all */ val &= ~(BM_PMCTRL_RUNM | BM_PMCTRL_STOPM | BM_PMCTRL_PSTOPO); switch (mode) { case ULP_PM_RUN: /* system/bus clock enabled */ val |= PSTOPO_PSTOP3 << BP_PMCTRL_PSTOPO; break; case ULP_PM_WAIT: /* system clock disabled, bus clock enabled */ val |= PSTOPO_PSTOP2 << BP_PMCTRL_PSTOPO; break; case ULP_PM_STOP: /* system/bus clock disabled */ val |= PSTOPO_PSTOP1 << BP_PMCTRL_PSTOPO; break; default: return -EINVAL; } writel_relaxed(val, smc1_base + SMC_PMCTRL); return 0; } void __init imx7ulp_pm_init(void) { struct device_node *np; np = of_find_compatible_node(NULL, NULL, "fsl,imx7ulp-smc1"); smc1_base = of_iomap(np, 0); of_node_put(np); WARN_ON(!smc1_base); imx7ulp_set_lpm(ULP_PM_RUN); }
linux-master
arch/arm/mach-imx/pm-imx7ulp.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2015 Freescale Semiconductor, Inc. */ #include <linux/irqchip.h> #include <linux/mfd/syscon.h> #include <linux/mfd/syscon/imx7-iomuxc-gpr.h> #include <linux/platform_device.h> #include <linux/phy.h> #include <linux/regmap.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include "common.h" static int bcm54220_phy_fixup(struct phy_device *dev) { /* enable RXC skew select RGMII copper mode */ phy_write(dev, 0x1e, 0x21); phy_write(dev, 0x1f, 0x7ea8); phy_write(dev, 0x1e, 0x2f); phy_write(dev, 0x1f, 0x71b7); return 0; } #define PHY_ID_BCM54220 0x600d8589 static void __init imx7d_enet_phy_init(void) { if (IS_BUILTIN(CONFIG_PHYLIB)) { phy_register_fixup_for_uid(PHY_ID_BCM54220, 0xffffffff, bcm54220_phy_fixup); } } static void __init imx7d_enet_clk_sel(void) { struct regmap *gpr; gpr = syscon_regmap_lookup_by_compatible("fsl,imx7d-iomuxc-gpr"); if (!IS_ERR(gpr)) { regmap_update_bits(gpr, IOMUXC_GPR1, IMX7D_GPR1_ENET_TX_CLK_SEL_MASK, 0); regmap_update_bits(gpr, IOMUXC_GPR1, IMX7D_GPR1_ENET_CLK_DIR_MASK, 0); } else { pr_err("failed to find fsl,imx7d-iomux-gpr regmap\n"); } } static inline void imx7d_enet_init(void) { imx7d_enet_phy_init(); imx7d_enet_clk_sel(); } static void __init imx7d_init_machine(void) { imx_anatop_init(); imx7d_enet_init(); } static void __init imx7d_init_late(void) { if (IS_ENABLED(CONFIG_ARM_IMX_CPUFREQ_DT)) platform_device_register_simple("imx-cpufreq-dt", -1, NULL, 0); } static void __init imx7d_init_irq(void) { imx_init_revision_from_anatop(); imx7_src_init(); irqchip_init(); } static const char *const imx7d_dt_compat[] __initconst = { "fsl,imx7d", "fsl,imx7s", NULL, }; DT_MACHINE_START(IMX7D, "Freescale i.MX7 Dual (Device Tree)") .smp = smp_ops(imx7_smp_ops), .init_irq = imx7d_init_irq, .init_machine = imx7d_init_machine, .init_late = imx7d_init_late, .dt_compat = imx7d_dt_compat, MACHINE_END
linux-master
arch/arm/mach-imx/mach-imx7d.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved. * Copyright 2008 Juergen Beisert, [email protected] */ #include <linux/module.h> #include <linux/irq.h> #include <linux/irqdomain.h> #include <linux/irqchip.h> #include <linux/io.h> #include <linux/of.h> #include <linux/of_address.h> #include <asm/mach/irq.h> #include <asm/exception.h> #include "common.h" #include "hardware.h" #include "irq-common.h" #define AVIC_INTCNTL 0x00 /* int control reg */ #define AVIC_NIMASK 0x04 /* int mask reg */ #define AVIC_INTENNUM 0x08 /* int enable number reg */ #define AVIC_INTDISNUM 0x0C /* int disable number reg */ #define AVIC_INTENABLEH 0x10 /* int enable reg high */ #define AVIC_INTENABLEL 0x14 /* int enable reg low */ #define AVIC_INTTYPEH 0x18 /* int type reg high */ #define AVIC_INTTYPEL 0x1C /* int type reg low */ #define AVIC_NIPRIORITY(x) (0x20 + 4 * (7 - (x))) /* int priority */ #define AVIC_NIVECSR 0x40 /* norm int vector/status */ #define AVIC_FIVECSR 0x44 /* fast int vector/status */ #define AVIC_INTSRCH 0x48 /* int source reg high */ #define AVIC_INTSRCL 0x4C /* int source reg low */ #define AVIC_INTFRCH 0x50 /* int force reg high */ #define AVIC_INTFRCL 0x54 /* int force reg low */ #define AVIC_NIPNDH 0x58 /* norm int pending high */ #define AVIC_NIPNDL 0x5C /* norm int pending low */ #define AVIC_FIPNDH 0x60 /* fast int pending high */ #define AVIC_FIPNDL 0x64 /* fast int pending low */ #define AVIC_NUM_IRQS 64 /* low power interrupt mask registers */ #define MX25_CCM_LPIMR0 0x68 #define MX25_CCM_LPIMR1 0x6C static void __iomem *avic_base; static void __iomem *mx25_ccm_base; static struct irq_domain *domain; #ifdef CONFIG_FIQ static int avic_set_irq_fiq(unsigned int hwirq, unsigned int type) { unsigned int irqt; if (hwirq >= AVIC_NUM_IRQS) return -EINVAL; if (hwirq < AVIC_NUM_IRQS / 2) { irqt = imx_readl(avic_base + AVIC_INTTYPEL) & ~(1 << hwirq); imx_writel(irqt | (!!type << hwirq), avic_base + AVIC_INTTYPEL); } else { hwirq -= AVIC_NUM_IRQS / 2; irqt = imx_readl(avic_base + AVIC_INTTYPEH) & ~(1 << hwirq); imx_writel(irqt | (!!type << hwirq), avic_base + AVIC_INTTYPEH); } return 0; } #endif /* CONFIG_FIQ */ static struct mxc_extra_irq avic_extra_irq = { #ifdef CONFIG_FIQ .set_irq_fiq = avic_set_irq_fiq, #endif }; #ifdef CONFIG_PM static u32 avic_saved_mask_reg[2]; static void avic_irq_suspend(struct irq_data *d) { struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); struct irq_chip_type *ct = gc->chip_types; int idx = d->hwirq >> 5; avic_saved_mask_reg[idx] = imx_readl(avic_base + ct->regs.mask); imx_writel(gc->wake_active, avic_base + ct->regs.mask); if (mx25_ccm_base) { u8 offs = d->hwirq < AVIC_NUM_IRQS / 2 ? MX25_CCM_LPIMR0 : MX25_CCM_LPIMR1; /* * The interrupts which are still enabled will be used as wakeup * sources. Allow those interrupts in low-power mode. * The LPIMR registers use 0 to allow an interrupt, the AVIC * registers use 1. */ imx_writel(~gc->wake_active, mx25_ccm_base + offs); } } static void avic_irq_resume(struct irq_data *d) { struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); struct irq_chip_type *ct = gc->chip_types; int idx = d->hwirq >> 5; imx_writel(avic_saved_mask_reg[idx], avic_base + ct->regs.mask); if (mx25_ccm_base) { u8 offs = d->hwirq < AVIC_NUM_IRQS / 2 ? MX25_CCM_LPIMR0 : MX25_CCM_LPIMR1; imx_writel(0xffffffff, mx25_ccm_base + offs); } } #else #define avic_irq_suspend NULL #define avic_irq_resume NULL #endif static __init void avic_init_gc(int idx, unsigned int irq_start) { struct irq_chip_generic *gc; struct irq_chip_type *ct; gc = irq_alloc_generic_chip("mxc-avic", 1, irq_start, avic_base, handle_level_irq); gc->private = &avic_extra_irq; gc->wake_enabled = IRQ_MSK(32); ct = gc->chip_types; ct->chip.irq_mask = irq_gc_mask_clr_bit; ct->chip.irq_unmask = irq_gc_mask_set_bit; ct->chip.irq_ack = irq_gc_mask_clr_bit; ct->chip.irq_set_wake = irq_gc_set_wake; ct->chip.irq_suspend = avic_irq_suspend; ct->chip.irq_resume = avic_irq_resume; ct->regs.mask = !idx ? AVIC_INTENABLEL : AVIC_INTENABLEH; ct->regs.ack = ct->regs.mask; irq_setup_generic_chip(gc, IRQ_MSK(32), 0, IRQ_NOREQUEST, 0); } static void __exception_irq_entry avic_handle_irq(struct pt_regs *regs) { u32 nivector; do { nivector = imx_readl(avic_base + AVIC_NIVECSR) >> 16; if (nivector == 0xffff) break; generic_handle_domain_irq(domain, nivector); } while (1); } /* * This function initializes the AVIC hardware and disables all the * interrupts. It registers the interrupt enable and disable functions * to the kernel for each interrupt source. */ static void __init mxc_init_irq(void __iomem *irqbase) { struct device_node *np; int irq_base; int i; avic_base = irqbase; np = of_find_compatible_node(NULL, NULL, "fsl,imx25-ccm"); mx25_ccm_base = of_iomap(np, 0); if (mx25_ccm_base) { /* * By default, we mask all interrupts. We set the actual mask * before we go into low-power mode. */ imx_writel(0xffffffff, mx25_ccm_base + MX25_CCM_LPIMR0); imx_writel(0xffffffff, mx25_ccm_base + MX25_CCM_LPIMR1); } /* put the AVIC into the reset value with * all interrupts disabled */ imx_writel(0, avic_base + AVIC_INTCNTL); imx_writel(0x1f, avic_base + AVIC_NIMASK); /* disable all interrupts */ imx_writel(0, avic_base + AVIC_INTENABLEH); imx_writel(0, avic_base + AVIC_INTENABLEL); /* all IRQ no FIQ */ imx_writel(0, avic_base + AVIC_INTTYPEH); imx_writel(0, avic_base + AVIC_INTTYPEL); irq_base = irq_alloc_descs(-1, 0, AVIC_NUM_IRQS, numa_node_id()); WARN_ON(irq_base < 0); np = of_find_compatible_node(NULL, NULL, "fsl,avic"); domain = irq_domain_add_legacy(np, AVIC_NUM_IRQS, irq_base, 0, &irq_domain_simple_ops, NULL); WARN_ON(!domain); for (i = 0; i < AVIC_NUM_IRQS / 32; i++, irq_base += 32) avic_init_gc(i, irq_base); /* Set default priority value (0) for all IRQ's */ for (i = 0; i < 8; i++) imx_writel(0, avic_base + AVIC_NIPRIORITY(i)); set_handle_irq(avic_handle_irq); #ifdef CONFIG_FIQ /* Initialize FIQ */ init_FIQ(FIQ_START); #endif printk(KERN_INFO "MXC IRQ initialized\n"); } static int __init imx_avic_init(struct device_node *node, struct device_node *parent) { void __iomem *avic_base; avic_base = of_iomap(node, 0); BUG_ON(!avic_base); mxc_init_irq(avic_base); return 0; } IRQCHIP_DECLARE(imx_avic, "fsl,avic", imx_avic_init);
linux-master
arch/arm/mach-imx/avic.c
// SPDX-License-Identifier: GPL-2.0+ /* * Copyright (C) 2016 Freescale Semiconductor, Inc. * Copyright 2017-2018 NXP * Author: Dong Aisheng <[email protected]> */ #include <linux/irqchip.h> #include <linux/mfd/syscon.h> #include <linux/of_platform.h> #include <linux/regmap.h> #include <asm/mach/arch.h> #include "common.h" #include "cpuidle.h" #include "hardware.h" #define SIM_JTAG_ID_REG 0x8c static void __init imx7ulp_set_revision(void) { struct regmap *sim; u32 revision; sim = syscon_regmap_lookup_by_compatible("fsl,imx7ulp-sim"); if (IS_ERR(sim)) { pr_warn("failed to find fsl,imx7ulp-sim regmap!\n"); return; } if (regmap_read(sim, SIM_JTAG_ID_REG, &revision)) { pr_warn("failed to read sim regmap!\n"); return; } /* * bit[31:28] of JTAG_ID register defines revision as below from B0: * 0001 B0 * 0010 B1 * 0011 B2 */ switch (revision >> 28) { case 1: imx_set_soc_revision(IMX_CHIP_REVISION_2_0); break; case 2: imx_set_soc_revision(IMX_CHIP_REVISION_2_1); break; case 3: imx_set_soc_revision(IMX_CHIP_REVISION_2_2); break; default: imx_set_soc_revision(IMX_CHIP_REVISION_1_0); break; } } static void __init imx7ulp_init_machine(void) { imx7ulp_pm_init(); mxc_set_cpu_type(MXC_CPU_IMX7ULP); imx7ulp_set_revision(); of_platform_default_populate(NULL, NULL, NULL); } static const char *const imx7ulp_dt_compat[] __initconst = { "fsl,imx7ulp", NULL, }; static void __init imx7ulp_init_late(void) { if (IS_ENABLED(CONFIG_ARM_IMX_CPUFREQ_DT)) platform_device_register_simple("imx-cpufreq-dt", -1, NULL, 0); imx7ulp_cpuidle_init(); } DT_MACHINE_START(IMX7ulp, "Freescale i.MX7ULP (Device Tree)") .init_machine = imx7ulp_init_machine, .dt_compat = imx7ulp_dt_compat, .init_late = imx7ulp_init_late, MACHINE_END
linux-master
arch/arm/mach-imx/mach-imx7ulp.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright 2011 Freescale Semiconductor, Inc. * Copyright 2011 Linaro Ltd. */ #include <linux/init.h> #include <linux/of_address.h> #include <linux/of.h> #include <linux/smp.h> #include <asm/cacheflush.h> #include <asm/page.h> #include <asm/smp_scu.h> #include <asm/mach/map.h> #include "common.h" #include "hardware.h" u32 g_diag_reg; static void __iomem *scu_base; static struct map_desc scu_io_desc __initdata = { /* .virtual and .pfn are run-time assigned */ .length = SZ_4K, .type = MT_DEVICE, }; void __init imx_scu_map_io(void) { unsigned long base; /* Get SCU base */ asm("mrc p15, 4, %0, c15, c0, 0" : "=r" (base)); scu_io_desc.virtual = IMX_IO_P2V(base); scu_io_desc.pfn = __phys_to_pfn(base); iotable_init(&scu_io_desc, 1); scu_base = IMX_IO_ADDRESS(base); } static int imx_boot_secondary(unsigned int cpu, struct task_struct *idle) { imx_set_cpu_jump(cpu, v7_secondary_startup); imx_enable_cpu(cpu, true); return 0; } /* * Initialise the CPU possible map early - this describes the CPUs * which may be present or become present in the system. */ static void __init imx_smp_init_cpus(void) { int i, ncores; ncores = scu_get_core_count(scu_base); for (i = ncores; i < NR_CPUS; i++) set_cpu_possible(i, false); } void imx_smp_prepare(void) { scu_enable(scu_base); } static void __init imx_smp_prepare_cpus(unsigned int max_cpus) { imx_smp_prepare(); /* * The diagnostic register holds the errata bits. Mostly bootloader * does not bring up secondary cores, so that when errata bits are set * in bootloader, they are set only for boot cpu. But on a SMP * configuration, it should be equally done on every single core. * Read the register from boot cpu here, and will replicate it into * secondary cores when booting them. */ asm("mrc p15, 0, %0, c15, c0, 1" : "=r" (g_diag_reg) : : "cc"); sync_cache_w(&g_diag_reg); } const struct smp_operations imx_smp_ops __initconst = { .smp_init_cpus = imx_smp_init_cpus, .smp_prepare_cpus = imx_smp_prepare_cpus, .smp_boot_secondary = imx_boot_secondary, #ifdef CONFIG_HOTPLUG_CPU .cpu_die = imx_cpu_die, .cpu_kill = imx_cpu_kill, #endif }; /* * Initialise the CPU possible map early - this describes the CPUs * which may be present or become present in the system. */ static void __init imx7_smp_init_cpus(void) { struct device_node *np; int i, ncores = 0; /* The iMX7D SCU does not report core count, get it from DT */ for_each_of_cpu_node(np) ncores++; for (i = ncores; i < NR_CPUS; i++) set_cpu_possible(i, false); } const struct smp_operations imx7_smp_ops __initconst = { .smp_init_cpus = imx7_smp_init_cpus, .smp_boot_secondary = imx_boot_secondary, #ifdef CONFIG_HOTPLUG_CPU .cpu_die = imx_cpu_die, .cpu_kill = imx_cpu_kill, #endif }; #define DCFG_CCSR_SCRATCHRW1 0x200 static int ls1021a_boot_secondary(unsigned int cpu, struct task_struct *idle) { arch_send_wakeup_ipi_mask(cpumask_of(cpu)); return 0; } static void __init ls1021a_smp_prepare_cpus(unsigned int max_cpus) { struct device_node *np; void __iomem *dcfg_base; unsigned long paddr; np = of_find_compatible_node(NULL, NULL, "fsl,ls1021a-dcfg"); dcfg_base = of_iomap(np, 0); of_node_put(np); BUG_ON(!dcfg_base); paddr = __pa_symbol(secondary_startup); writel_relaxed(cpu_to_be32(paddr), dcfg_base + DCFG_CCSR_SCRATCHRW1); iounmap(dcfg_base); } const struct smp_operations ls1021a_smp_ops __initconst = { .smp_prepare_cpus = ls1021a_smp_prepare_cpus, .smp_boot_secondary = ls1021a_boot_secondary, };
linux-master
arch/arm/mach-imx/platsmp.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright 2011 Freescale Semiconductor, Inc. * Copyright 2011 Linaro Ltd. */ #include <linux/errno.h> #include <linux/jiffies.h> #include <asm/cacheflush.h> #include <asm/cp15.h> #include <asm/proc-fns.h> #include "common.h" #include "hardware.h" /* * platform-specific code to shutdown a CPU * * Called with IRQs disabled */ void imx_cpu_die(unsigned int cpu) { v7_exit_coherency_flush(louis); /* * We use the cpu jumping argument register to sync with * imx_cpu_kill() which is running on cpu0 and waiting for * the register being cleared to kill the cpu. */ imx_set_cpu_arg(cpu, ~0); while (1) cpu_do_idle(); } int imx_cpu_kill(unsigned int cpu) { unsigned long timeout = jiffies + msecs_to_jiffies(50); while (imx_get_cpu_arg(cpu) == 0) if (time_after(jiffies, timeout)) return 0; imx_enable_cpu(cpu, false); imx_set_cpu_arg(cpu, 0); if (cpu_is_imx7d()) imx_gpcv2_set_core1_pdn_pup_by_software(true); return 1; }
linux-master
arch/arm/mach-imx/hotplug.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright 2011 Freescale Semiconductor, Inc. All Rights Reserved. * Copyright 2011 Linaro Ltd. */ #include <linux/io.h> #include <linux/of.h> #include <linux/of_address.h> #include <asm/mach/arch.h> #include "common.h" #include "hardware.h" static void __init imx51_init_early(void) { mxc_set_cpu_type(MXC_CPU_MX51); } /* * The MIPI HSC unit has been removed from the i.MX51 Reference Manual by * the Freescale marketing division. However this did not remove the * hardware from the chip which still needs to be configured for proper * IPU support. */ #define MX51_MIPI_HSC_BASE 0x83fdc000 static void __init imx51_ipu_mipi_setup(void) { void __iomem *hsc_addr; hsc_addr = ioremap(MX51_MIPI_HSC_BASE, SZ_16K); WARN_ON(!hsc_addr); /* setup MIPI module to legacy mode */ imx_writel(0xf00, hsc_addr); /* CSI mode: reserved; DI control mode: legacy (from Freescale BSP) */ imx_writel(imx_readl(hsc_addr + 0x800) | 0x30ff, hsc_addr + 0x800); iounmap(hsc_addr); } static void __init imx51_m4if_setup(void) { void __iomem *m4if_base; struct device_node *np; np = of_find_compatible_node(NULL, NULL, "fsl,imx51-m4if"); if (!np) return; m4if_base = of_iomap(np, 0); of_node_put(np); if (!m4if_base) { pr_err("Unable to map M4IF registers\n"); return; } /* * Configure VPU and IPU with higher priorities * in order to avoid artifacts during video playback */ writel_relaxed(0x00000203, m4if_base + 0x40); writel_relaxed(0x00000000, m4if_base + 0x44); writel_relaxed(0x00120125, m4if_base + 0x9c); writel_relaxed(0x001901A3, m4if_base + 0x48); iounmap(m4if_base); } static void __init imx51_dt_init(void) { imx51_ipu_mipi_setup(); imx_src_init(); imx51_m4if_setup(); imx5_pmu_init(); imx_aips_allow_unprivileged_access("fsl,imx51-aipstz"); } static void __init imx51_init_late(void) { mx51_neon_fixup(); imx51_pm_init(); } static const char * const imx51_dt_board_compat[] __initconst = { "fsl,imx51", NULL }; DT_MACHINE_START(IMX51_DT, "Freescale i.MX51 (Device Tree Support)") .init_early = imx51_init_early, .init_machine = imx51_dt_init, .init_late = imx51_init_late, .dt_compat = imx51_dt_board_compat, MACHINE_END
linux-master
arch/arm/mach-imx/mach-imx51.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright 2011 Freescale Semiconductor, Inc. * Copyright 2011 Linaro Ltd. */ #include <linux/init.h> #include <linux/io.h> #include <linux/iopoll.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/platform_device.h> #include <linux/reset-controller.h> #include <linux/smp.h> #include <asm/smp_plat.h> #include "common.h" #include "hardware.h" #define SRC_SCR 0x000 #define SRC_GPR1_V1 0x020 #define SRC_GPR1_V2 0x074 #define SRC_GPR1(gpr_v2) ((gpr_v2) ? SRC_GPR1_V2 : SRC_GPR1_V1) #define BP_SRC_SCR_WARM_RESET_ENABLE 0 #define BP_SRC_SCR_SW_GPU_RST 1 #define BP_SRC_SCR_SW_VPU_RST 2 #define BP_SRC_SCR_SW_IPU1_RST 3 #define BP_SRC_SCR_SW_OPEN_VG_RST 4 #define BP_SRC_SCR_SW_IPU2_RST 12 #define BP_SRC_SCR_CORE1_RST 14 #define BP_SRC_SCR_CORE1_ENABLE 22 /* below is for i.MX7D */ #define SRC_A7RCR1 0x008 #define BP_SRC_A7RCR1_A7_CORE1_ENABLE 1 #define GPC_CPU_PGC_SW_PUP_REQ 0xf0 #define GPC_CPU_PGC_SW_PDN_REQ 0xfc #define GPC_PGC_C1 0x840 #define BM_CPU_PGC_SW_PDN_PUP_REQ_CORE1_A7 0x2 static void __iomem *src_base; static DEFINE_SPINLOCK(scr_lock); static bool gpr_v2; static void __iomem *gpc_base; static const int sw_reset_bits[5] = { BP_SRC_SCR_SW_GPU_RST, BP_SRC_SCR_SW_VPU_RST, BP_SRC_SCR_SW_IPU1_RST, BP_SRC_SCR_SW_OPEN_VG_RST, BP_SRC_SCR_SW_IPU2_RST }; static int imx_src_reset_module(struct reset_controller_dev *rcdev, unsigned long sw_reset_idx) { unsigned long timeout; unsigned long flags; int bit; u32 val; if (sw_reset_idx >= ARRAY_SIZE(sw_reset_bits)) return -EINVAL; bit = 1 << sw_reset_bits[sw_reset_idx]; spin_lock_irqsave(&scr_lock, flags); val = readl_relaxed(src_base + SRC_SCR); val |= bit; writel_relaxed(val, src_base + SRC_SCR); spin_unlock_irqrestore(&scr_lock, flags); timeout = jiffies + msecs_to_jiffies(1000); while (readl(src_base + SRC_SCR) & bit) { if (time_after(jiffies, timeout)) return -ETIME; cpu_relax(); } return 0; } static const struct reset_control_ops imx_src_ops = { .reset = imx_src_reset_module, }; static void imx_gpcv2_set_m_core_pgc(bool enable, u32 offset) { writel_relaxed(enable, gpc_base + offset); } /* * The motivation for bringing up the second i.MX7D core inside the kernel * is that legacy vendor bootloaders usually do not implement PSCI support. * This is a significant blocker for systems in the field that are running old * bootloader versions to upgrade to a modern mainline kernel version, as only * one CPU of the i.MX7D would be brought up. * Bring up the second i.MX7D core inside the kernel to make the migration * path to mainline kernel easier for the existing iMX7D users. */ void imx_gpcv2_set_core1_pdn_pup_by_software(bool pdn) { u32 reg = pdn ? GPC_CPU_PGC_SW_PDN_REQ : GPC_CPU_PGC_SW_PUP_REQ; u32 val, pup; int ret; imx_gpcv2_set_m_core_pgc(true, GPC_PGC_C1); val = readl_relaxed(gpc_base + reg); val |= BM_CPU_PGC_SW_PDN_PUP_REQ_CORE1_A7; writel_relaxed(val, gpc_base + reg); ret = readl_relaxed_poll_timeout_atomic(gpc_base + reg, pup, !(pup & BM_CPU_PGC_SW_PDN_PUP_REQ_CORE1_A7), 5, 1000000); if (ret < 0) { pr_err("i.MX7D: CORE1_A7 power up timeout\n"); val &= ~BM_CPU_PGC_SW_PDN_PUP_REQ_CORE1_A7; writel_relaxed(val, gpc_base + reg); } imx_gpcv2_set_m_core_pgc(false, GPC_PGC_C1); } void imx_enable_cpu(int cpu, bool enable) { u32 mask, val; cpu = cpu_logical_map(cpu); spin_lock(&scr_lock); if (gpr_v2) { if (enable) imx_gpcv2_set_core1_pdn_pup_by_software(false); mask = 1 << (BP_SRC_A7RCR1_A7_CORE1_ENABLE + cpu - 1); val = readl_relaxed(src_base + SRC_A7RCR1); val = enable ? val | mask : val & ~mask; writel_relaxed(val, src_base + SRC_A7RCR1); } else { mask = 1 << (BP_SRC_SCR_CORE1_ENABLE + cpu - 1); val = readl_relaxed(src_base + SRC_SCR); val = enable ? val | mask : val & ~mask; val |= 1 << (BP_SRC_SCR_CORE1_RST + cpu - 1); writel_relaxed(val, src_base + SRC_SCR); } spin_unlock(&scr_lock); } void imx_set_cpu_jump(int cpu, void *jump_addr) { cpu = cpu_logical_map(cpu); writel_relaxed(__pa_symbol(jump_addr), src_base + SRC_GPR1(gpr_v2) + cpu * 8); } u32 imx_get_cpu_arg(int cpu) { cpu = cpu_logical_map(cpu); return readl_relaxed(src_base + SRC_GPR1(gpr_v2) + cpu * 8 + 4); } void imx_set_cpu_arg(int cpu, u32 arg) { cpu = cpu_logical_map(cpu); writel_relaxed(arg, src_base + SRC_GPR1(gpr_v2) + cpu * 8 + 4); } void __init imx_src_init(void) { struct device_node *np; u32 val; np = of_find_compatible_node(NULL, NULL, "fsl,imx51-src"); if (!np) return; src_base = of_iomap(np, 0); WARN_ON(!src_base); /* * force warm reset sources to generate cold reset * for a more reliable restart */ spin_lock(&scr_lock); val = readl_relaxed(src_base + SRC_SCR); val &= ~(1 << BP_SRC_SCR_WARM_RESET_ENABLE); writel_relaxed(val, src_base + SRC_SCR); spin_unlock(&scr_lock); } void __init imx7_src_init(void) { struct device_node *np; gpr_v2 = true; np = of_find_compatible_node(NULL, NULL, "fsl,imx7d-src"); if (!np) return; src_base = of_iomap(np, 0); if (!src_base) return; np = of_find_compatible_node(NULL, NULL, "fsl,imx7d-gpc"); if (!np) return; gpc_base = of_iomap(np, 0); if (!gpc_base) return; } static const struct of_device_id imx_src_dt_ids[] = { { .compatible = "fsl,imx51-src" }, { /* sentinel */ } }; static int imx_src_probe(struct platform_device *pdev) { struct reset_controller_dev *rcdev; rcdev = devm_kzalloc(&pdev->dev, sizeof(*rcdev), GFP_KERNEL); if (!rcdev) return -ENOMEM; rcdev->ops = &imx_src_ops; rcdev->dev = &pdev->dev; rcdev->of_node = pdev->dev.of_node; rcdev->nr_resets = ARRAY_SIZE(sw_reset_bits); return devm_reset_controller_register(&pdev->dev, rcdev); } static struct platform_driver imx_src_driver = { .driver = { .name = "imx-src", .of_match_table = imx_src_dt_ids, }, .probe = imx_src_probe, }; builtin_platform_driver(imx_src_driver);
linux-master
arch/arm/mach-imx/src.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * mach-davinci/sram.c - DaVinci simple SRAM allocator * * Copyright (C) 2009 David Brownell */ #include <linux/module.h> #include <linux/init.h> #include <linux/io.h> #include <linux/genalloc.h> #include "common.h" #include "sram.h" static struct gen_pool *sram_pool; struct gen_pool *sram_get_gen_pool(void) { return sram_pool; } void *sram_alloc(size_t len, dma_addr_t *dma) { dma_addr_t dma_base = davinci_soc_info.sram_dma; if (dma) *dma = 0; if (!sram_pool || (dma && !dma_base)) return NULL; return gen_pool_dma_alloc(sram_pool, len, dma); } EXPORT_SYMBOL(sram_alloc); void sram_free(void *addr, size_t len) { gen_pool_free(sram_pool, (unsigned long) addr, len); } EXPORT_SYMBOL(sram_free); /* * REVISIT This supports CPU and DMA access to/from SRAM, but it * doesn't (yet?) support some other notable uses of SRAM: as TCM * for data and/or instructions; and holding code needed to enter * and exit suspend states (while DRAM can't be used). */ static int __init sram_init(void) { phys_addr_t phys = davinci_soc_info.sram_dma; unsigned len = davinci_soc_info.sram_len; int status = 0; void __iomem *addr; if (len) { len = min_t(unsigned, len, SRAM_SIZE); sram_pool = gen_pool_create(ilog2(SRAM_GRANULARITY), -1); if (!sram_pool) status = -ENOMEM; } if (sram_pool) { addr = ioremap(phys, len); if (!addr) return -ENOMEM; status = gen_pool_add_virt(sram_pool, (unsigned long) addr, phys, len, -1); if (status < 0) iounmap(addr); } WARN_ON(status < 0); return status; } core_initcall(sram_init);
linux-master
arch/arm/mach-davinci/sram.c
// SPDX-License-Identifier: GPL-2.0-only /* * Code commons to all DaVinci SoCs. * * Author: Mark A. Greer <[email protected]> * * 2009 (c) MontaVista Software, Inc. */ #include <linux/module.h> #include <linux/io.h> #include <linux/etherdevice.h> #include <linux/davinci_emac.h> #include <linux/dma-mapping.h> #include <linux/platform_data/davinci-cpufreq.h> #include <asm/tlb.h> #include <asm/mach/map.h> #include "common.h" #include "cputype.h" struct davinci_soc_info davinci_soc_info; EXPORT_SYMBOL(davinci_soc_info); static int __init davinci_init_id(struct davinci_soc_info *soc_info) { int i; struct davinci_id *dip; u8 variant; u16 part_no; void __iomem *base; base = ioremap(soc_info->jtag_id_reg, SZ_4K); if (!base) { pr_err("Unable to map JTAG ID register\n"); return -ENOMEM; } soc_info->jtag_id = __raw_readl(base); iounmap(base); variant = (soc_info->jtag_id & 0xf0000000) >> 28; part_no = (soc_info->jtag_id & 0x0ffff000) >> 12; for (i = 0, dip = soc_info->ids; i < soc_info->ids_num; i++, dip++) /* Don't care about the manufacturer right now */ if ((dip->part_no == part_no) && (dip->variant == variant)) { soc_info->cpu_id = dip->cpu_id; pr_info("DaVinci %s variant 0x%x\n", dip->name, dip->variant); return 0; } pr_err("Unknown DaVinci JTAG ID 0x%x\n", soc_info->jtag_id); return -EINVAL; } void __init davinci_common_init(const struct davinci_soc_info *soc_info) { int ret; if (!soc_info) { ret = -EINVAL; goto err; } memcpy(&davinci_soc_info, soc_info, sizeof(struct davinci_soc_info)); if (davinci_soc_info.io_desc && (davinci_soc_info.io_desc_num > 0)) iotable_init(davinci_soc_info.io_desc, davinci_soc_info.io_desc_num); /* * Normally devicemaps_init() would flush caches and tlb after * mdesc->map_io(), but we must also do it here because of the CPU * revision check below. */ local_flush_tlb_all(); flush_cache_all(); /* * We want to check CPU revision early for cpu_is_xxxx() macros. * IO space mapping must be initialized before we can do that. */ ret = davinci_init_id(&davinci_soc_info); if (ret < 0) goto err; return; err: panic("davinci_common_init: SoC Initialization failed\n"); } void __init davinci_init_late(void) { davinci_cpufreq_init(); }
linux-master
arch/arm/mach-davinci/common.c
// SPDX-License-Identifier: GPL-2.0-only /* * TI DA830/OMAP L137 chip specific setup * * Author: Mark A. Greer <[email protected]> * * 2009 (c) MontaVista Software, Inc. */ #include <linux/clk-provider.h> #include <linux/clk/davinci.h> #include <linux/gpio.h> #include <linux/init.h> #include <linux/io.h> #include <linux/irqchip/irq-davinci-cp-intc.h> #include <clocksource/timer-davinci.h> #include <asm/mach/map.h> #include "common.h" #include "cputype.h" #include "da8xx.h" #include "irqs.h" #include "mux.h" /* Offsets of the 8 compare registers on the da830 */ #define DA830_CMP12_0 0x60 #define DA830_CMP12_1 0x64 #define DA830_CMP12_2 0x68 #define DA830_CMP12_3 0x6c #define DA830_CMP12_4 0x70 #define DA830_CMP12_5 0x74 #define DA830_CMP12_6 0x78 #define DA830_CMP12_7 0x7c #define DA830_REF_FREQ 24000000 /* * Device specific mux setup * * soc description mux mode mode mux dbg * reg offset mask mode */ static const struct mux_config da830_pins[] = { #ifdef CONFIG_DAVINCI_MUX MUX_CFG(DA830, GPIO7_14, 0, 0, 0xf, 1, false) MUX_CFG(DA830, RTCK, 0, 0, 0xf, 8, false) MUX_CFG(DA830, GPIO7_15, 0, 4, 0xf, 1, false) MUX_CFG(DA830, EMU_0, 0, 4, 0xf, 8, false) MUX_CFG(DA830, EMB_SDCKE, 0, 8, 0xf, 1, false) MUX_CFG(DA830, EMB_CLK_GLUE, 0, 12, 0xf, 1, false) MUX_CFG(DA830, EMB_CLK, 0, 12, 0xf, 2, false) MUX_CFG(DA830, NEMB_CS_0, 0, 16, 0xf, 1, false) MUX_CFG(DA830, NEMB_CAS, 0, 20, 0xf, 1, false) MUX_CFG(DA830, NEMB_RAS, 0, 24, 0xf, 1, false) MUX_CFG(DA830, NEMB_WE, 0, 28, 0xf, 1, false) MUX_CFG(DA830, EMB_BA_1, 1, 0, 0xf, 1, false) MUX_CFG(DA830, EMB_BA_0, 1, 4, 0xf, 1, false) MUX_CFG(DA830, EMB_A_0, 1, 8, 0xf, 1, false) MUX_CFG(DA830, EMB_A_1, 1, 12, 0xf, 1, false) MUX_CFG(DA830, EMB_A_2, 1, 16, 0xf, 1, false) MUX_CFG(DA830, EMB_A_3, 1, 20, 0xf, 1, false) MUX_CFG(DA830, EMB_A_4, 1, 24, 0xf, 1, false) MUX_CFG(DA830, EMB_A_5, 1, 28, 0xf, 1, false) MUX_CFG(DA830, GPIO7_0, 1, 0, 0xf, 8, false) MUX_CFG(DA830, GPIO7_1, 1, 4, 0xf, 8, false) MUX_CFG(DA830, GPIO7_2, 1, 8, 0xf, 8, false) MUX_CFG(DA830, GPIO7_3, 1, 12, 0xf, 8, false) MUX_CFG(DA830, GPIO7_4, 1, 16, 0xf, 8, false) MUX_CFG(DA830, GPIO7_5, 1, 20, 0xf, 8, false) MUX_CFG(DA830, GPIO7_6, 1, 24, 0xf, 8, false) MUX_CFG(DA830, GPIO7_7, 1, 28, 0xf, 8, false) MUX_CFG(DA830, EMB_A_6, 2, 0, 0xf, 1, false) MUX_CFG(DA830, EMB_A_7, 2, 4, 0xf, 1, false) MUX_CFG(DA830, EMB_A_8, 2, 8, 0xf, 1, false) MUX_CFG(DA830, EMB_A_9, 2, 12, 0xf, 1, false) MUX_CFG(DA830, EMB_A_10, 2, 16, 0xf, 1, false) MUX_CFG(DA830, EMB_A_11, 2, 20, 0xf, 1, false) MUX_CFG(DA830, EMB_A_12, 2, 24, 0xf, 1, false) MUX_CFG(DA830, EMB_D_31, 2, 28, 0xf, 1, false) MUX_CFG(DA830, GPIO7_8, 2, 0, 0xf, 8, false) MUX_CFG(DA830, GPIO7_9, 2, 4, 0xf, 8, false) MUX_CFG(DA830, GPIO7_10, 2, 8, 0xf, 8, false) MUX_CFG(DA830, GPIO7_11, 2, 12, 0xf, 8, false) MUX_CFG(DA830, GPIO7_12, 2, 16, 0xf, 8, false) MUX_CFG(DA830, GPIO7_13, 2, 20, 0xf, 8, false) MUX_CFG(DA830, GPIO3_13, 2, 24, 0xf, 8, false) MUX_CFG(DA830, EMB_D_30, 3, 0, 0xf, 1, false) MUX_CFG(DA830, EMB_D_29, 3, 4, 0xf, 1, false) MUX_CFG(DA830, EMB_D_28, 3, 8, 0xf, 1, false) MUX_CFG(DA830, EMB_D_27, 3, 12, 0xf, 1, false) MUX_CFG(DA830, EMB_D_26, 3, 16, 0xf, 1, false) MUX_CFG(DA830, EMB_D_25, 3, 20, 0xf, 1, false) MUX_CFG(DA830, EMB_D_24, 3, 24, 0xf, 1, false) MUX_CFG(DA830, EMB_D_23, 3, 28, 0xf, 1, false) MUX_CFG(DA830, EMB_D_22, 4, 0, 0xf, 1, false) MUX_CFG(DA830, EMB_D_21, 4, 4, 0xf, 1, false) MUX_CFG(DA830, EMB_D_20, 4, 8, 0xf, 1, false) MUX_CFG(DA830, EMB_D_19, 4, 12, 0xf, 1, false) MUX_CFG(DA830, EMB_D_18, 4, 16, 0xf, 1, false) MUX_CFG(DA830, EMB_D_17, 4, 20, 0xf, 1, false) MUX_CFG(DA830, EMB_D_16, 4, 24, 0xf, 1, false) MUX_CFG(DA830, NEMB_WE_DQM_3, 4, 28, 0xf, 1, false) MUX_CFG(DA830, NEMB_WE_DQM_2, 5, 0, 0xf, 1, false) MUX_CFG(DA830, EMB_D_0, 5, 4, 0xf, 1, false) MUX_CFG(DA830, EMB_D_1, 5, 8, 0xf, 1, false) MUX_CFG(DA830, EMB_D_2, 5, 12, 0xf, 1, false) MUX_CFG(DA830, EMB_D_3, 5, 16, 0xf, 1, false) MUX_CFG(DA830, EMB_D_4, 5, 20, 0xf, 1, false) MUX_CFG(DA830, EMB_D_5, 5, 24, 0xf, 1, false) MUX_CFG(DA830, EMB_D_6, 5, 28, 0xf, 1, false) MUX_CFG(DA830, GPIO6_0, 5, 4, 0xf, 8, false) MUX_CFG(DA830, GPIO6_1, 5, 8, 0xf, 8, false) MUX_CFG(DA830, GPIO6_2, 5, 12, 0xf, 8, false) MUX_CFG(DA830, GPIO6_3, 5, 16, 0xf, 8, false) MUX_CFG(DA830, GPIO6_4, 5, 20, 0xf, 8, false) MUX_CFG(DA830, GPIO6_5, 5, 24, 0xf, 8, false) MUX_CFG(DA830, GPIO6_6, 5, 28, 0xf, 8, false) MUX_CFG(DA830, EMB_D_7, 6, 0, 0xf, 1, false) MUX_CFG(DA830, EMB_D_8, 6, 4, 0xf, 1, false) MUX_CFG(DA830, EMB_D_9, 6, 8, 0xf, 1, false) MUX_CFG(DA830, EMB_D_10, 6, 12, 0xf, 1, false) MUX_CFG(DA830, EMB_D_11, 6, 16, 0xf, 1, false) MUX_CFG(DA830, EMB_D_12, 6, 20, 0xf, 1, false) MUX_CFG(DA830, EMB_D_13, 6, 24, 0xf, 1, false) MUX_CFG(DA830, EMB_D_14, 6, 28, 0xf, 1, false) MUX_CFG(DA830, GPIO6_7, 6, 0, 0xf, 8, false) MUX_CFG(DA830, GPIO6_8, 6, 4, 0xf, 8, false) MUX_CFG(DA830, GPIO6_9, 6, 8, 0xf, 8, false) MUX_CFG(DA830, GPIO6_10, 6, 12, 0xf, 8, false) MUX_CFG(DA830, GPIO6_11, 6, 16, 0xf, 8, false) MUX_CFG(DA830, GPIO6_12, 6, 20, 0xf, 8, false) MUX_CFG(DA830, GPIO6_13, 6, 24, 0xf, 8, false) MUX_CFG(DA830, GPIO6_14, 6, 28, 0xf, 8, false) MUX_CFG(DA830, EMB_D_15, 7, 0, 0xf, 1, false) MUX_CFG(DA830, NEMB_WE_DQM_1, 7, 4, 0xf, 1, false) MUX_CFG(DA830, NEMB_WE_DQM_0, 7, 8, 0xf, 1, false) MUX_CFG(DA830, SPI0_SOMI_0, 7, 12, 0xf, 1, false) MUX_CFG(DA830, SPI0_SIMO_0, 7, 16, 0xf, 1, false) MUX_CFG(DA830, SPI0_CLK, 7, 20, 0xf, 1, false) MUX_CFG(DA830, NSPI0_ENA, 7, 24, 0xf, 1, false) MUX_CFG(DA830, NSPI0_SCS_0, 7, 28, 0xf, 1, false) MUX_CFG(DA830, EQEP0I, 7, 12, 0xf, 2, false) MUX_CFG(DA830, EQEP0S, 7, 16, 0xf, 2, false) MUX_CFG(DA830, EQEP1I, 7, 20, 0xf, 2, false) MUX_CFG(DA830, NUART0_CTS, 7, 24, 0xf, 2, false) MUX_CFG(DA830, NUART0_RTS, 7, 28, 0xf, 2, false) MUX_CFG(DA830, EQEP0A, 7, 24, 0xf, 4, false) MUX_CFG(DA830, EQEP0B, 7, 28, 0xf, 4, false) MUX_CFG(DA830, GPIO6_15, 7, 0, 0xf, 8, false) MUX_CFG(DA830, GPIO5_14, 7, 4, 0xf, 8, false) MUX_CFG(DA830, GPIO5_15, 7, 8, 0xf, 8, false) MUX_CFG(DA830, GPIO5_0, 7, 12, 0xf, 8, false) MUX_CFG(DA830, GPIO5_1, 7, 16, 0xf, 8, false) MUX_CFG(DA830, GPIO5_2, 7, 20, 0xf, 8, false) MUX_CFG(DA830, GPIO5_3, 7, 24, 0xf, 8, false) MUX_CFG(DA830, GPIO5_4, 7, 28, 0xf, 8, false) MUX_CFG(DA830, SPI1_SOMI_0, 8, 0, 0xf, 1, false) MUX_CFG(DA830, SPI1_SIMO_0, 8, 4, 0xf, 1, false) MUX_CFG(DA830, SPI1_CLK, 8, 8, 0xf, 1, false) MUX_CFG(DA830, UART0_RXD, 8, 12, 0xf, 1, false) MUX_CFG(DA830, UART0_TXD, 8, 16, 0xf, 1, false) MUX_CFG(DA830, AXR1_10, 8, 20, 0xf, 1, false) MUX_CFG(DA830, AXR1_11, 8, 24, 0xf, 1, false) MUX_CFG(DA830, NSPI1_ENA, 8, 28, 0xf, 1, false) MUX_CFG(DA830, I2C1_SCL, 8, 0, 0xf, 2, false) MUX_CFG(DA830, I2C1_SDA, 8, 4, 0xf, 2, false) MUX_CFG(DA830, EQEP1S, 8, 8, 0xf, 2, false) MUX_CFG(DA830, I2C0_SDA, 8, 12, 0xf, 2, false) MUX_CFG(DA830, I2C0_SCL, 8, 16, 0xf, 2, false) MUX_CFG(DA830, UART2_RXD, 8, 28, 0xf, 2, false) MUX_CFG(DA830, TM64P0_IN12, 8, 12, 0xf, 4, false) MUX_CFG(DA830, TM64P0_OUT12, 8, 16, 0xf, 4, false) MUX_CFG(DA830, GPIO5_5, 8, 0, 0xf, 8, false) MUX_CFG(DA830, GPIO5_6, 8, 4, 0xf, 8, false) MUX_CFG(DA830, GPIO5_7, 8, 8, 0xf, 8, false) MUX_CFG(DA830, GPIO5_8, 8, 12, 0xf, 8, false) MUX_CFG(DA830, GPIO5_9, 8, 16, 0xf, 8, false) MUX_CFG(DA830, GPIO5_10, 8, 20, 0xf, 8, false) MUX_CFG(DA830, GPIO5_11, 8, 24, 0xf, 8, false) MUX_CFG(DA830, GPIO5_12, 8, 28, 0xf, 8, false) MUX_CFG(DA830, NSPI1_SCS_0, 9, 0, 0xf, 1, false) MUX_CFG(DA830, USB0_DRVVBUS, 9, 4, 0xf, 1, false) MUX_CFG(DA830, AHCLKX0, 9, 8, 0xf, 1, false) MUX_CFG(DA830, ACLKX0, 9, 12, 0xf, 1, false) MUX_CFG(DA830, AFSX0, 9, 16, 0xf, 1, false) MUX_CFG(DA830, AHCLKR0, 9, 20, 0xf, 1, false) MUX_CFG(DA830, ACLKR0, 9, 24, 0xf, 1, false) MUX_CFG(DA830, AFSR0, 9, 28, 0xf, 1, false) MUX_CFG(DA830, UART2_TXD, 9, 0, 0xf, 2, false) MUX_CFG(DA830, AHCLKX2, 9, 8, 0xf, 2, false) MUX_CFG(DA830, ECAP0_APWM0, 9, 12, 0xf, 2, false) MUX_CFG(DA830, RMII_MHZ_50_CLK, 9, 20, 0xf, 2, false) MUX_CFG(DA830, ECAP1_APWM1, 9, 24, 0xf, 2, false) MUX_CFG(DA830, USB_REFCLKIN, 9, 8, 0xf, 4, false) MUX_CFG(DA830, GPIO5_13, 9, 0, 0xf, 8, false) MUX_CFG(DA830, GPIO4_15, 9, 4, 0xf, 8, false) MUX_CFG(DA830, GPIO2_11, 9, 8, 0xf, 8, false) MUX_CFG(DA830, GPIO2_12, 9, 12, 0xf, 8, false) MUX_CFG(DA830, GPIO2_13, 9, 16, 0xf, 8, false) MUX_CFG(DA830, GPIO2_14, 9, 20, 0xf, 8, false) MUX_CFG(DA830, GPIO2_15, 9, 24, 0xf, 8, false) MUX_CFG(DA830, GPIO3_12, 9, 28, 0xf, 8, false) MUX_CFG(DA830, AMUTE0, 10, 0, 0xf, 1, false) MUX_CFG(DA830, AXR0_0, 10, 4, 0xf, 1, false) MUX_CFG(DA830, AXR0_1, 10, 8, 0xf, 1, false) MUX_CFG(DA830, AXR0_2, 10, 12, 0xf, 1, false) MUX_CFG(DA830, AXR0_3, 10, 16, 0xf, 1, false) MUX_CFG(DA830, AXR0_4, 10, 20, 0xf, 1, false) MUX_CFG(DA830, AXR0_5, 10, 24, 0xf, 1, false) MUX_CFG(DA830, AXR0_6, 10, 28, 0xf, 1, false) MUX_CFG(DA830, RMII_TXD_0, 10, 4, 0xf, 2, false) MUX_CFG(DA830, RMII_TXD_1, 10, 8, 0xf, 2, false) MUX_CFG(DA830, RMII_TXEN, 10, 12, 0xf, 2, false) MUX_CFG(DA830, RMII_CRS_DV, 10, 16, 0xf, 2, false) MUX_CFG(DA830, RMII_RXD_0, 10, 20, 0xf, 2, false) MUX_CFG(DA830, RMII_RXD_1, 10, 24, 0xf, 2, false) MUX_CFG(DA830, RMII_RXER, 10, 28, 0xf, 2, false) MUX_CFG(DA830, AFSR2, 10, 4, 0xf, 4, false) MUX_CFG(DA830, ACLKX2, 10, 8, 0xf, 4, false) MUX_CFG(DA830, AXR2_3, 10, 12, 0xf, 4, false) MUX_CFG(DA830, AXR2_2, 10, 16, 0xf, 4, false) MUX_CFG(DA830, AXR2_1, 10, 20, 0xf, 4, false) MUX_CFG(DA830, AFSX2, 10, 24, 0xf, 4, false) MUX_CFG(DA830, ACLKR2, 10, 28, 0xf, 4, false) MUX_CFG(DA830, NRESETOUT, 10, 0, 0xf, 8, false) MUX_CFG(DA830, GPIO3_0, 10, 4, 0xf, 8, false) MUX_CFG(DA830, GPIO3_1, 10, 8, 0xf, 8, false) MUX_CFG(DA830, GPIO3_2, 10, 12, 0xf, 8, false) MUX_CFG(DA830, GPIO3_3, 10, 16, 0xf, 8, false) MUX_CFG(DA830, GPIO3_4, 10, 20, 0xf, 8, false) MUX_CFG(DA830, GPIO3_5, 10, 24, 0xf, 8, false) MUX_CFG(DA830, GPIO3_6, 10, 28, 0xf, 8, false) MUX_CFG(DA830, AXR0_7, 11, 0, 0xf, 1, false) MUX_CFG(DA830, AXR0_8, 11, 4, 0xf, 1, false) MUX_CFG(DA830, UART1_RXD, 11, 8, 0xf, 1, false) MUX_CFG(DA830, UART1_TXD, 11, 12, 0xf, 1, false) MUX_CFG(DA830, AXR0_11, 11, 16, 0xf, 1, false) MUX_CFG(DA830, AHCLKX1, 11, 20, 0xf, 1, false) MUX_CFG(DA830, ACLKX1, 11, 24, 0xf, 1, false) MUX_CFG(DA830, AFSX1, 11, 28, 0xf, 1, false) MUX_CFG(DA830, MDIO_CLK, 11, 0, 0xf, 2, false) MUX_CFG(DA830, MDIO_D, 11, 4, 0xf, 2, false) MUX_CFG(DA830, AXR0_9, 11, 8, 0xf, 2, false) MUX_CFG(DA830, AXR0_10, 11, 12, 0xf, 2, false) MUX_CFG(DA830, EPWM0B, 11, 20, 0xf, 2, false) MUX_CFG(DA830, EPWM0A, 11, 24, 0xf, 2, false) MUX_CFG(DA830, EPWMSYNCI, 11, 28, 0xf, 2, false) MUX_CFG(DA830, AXR2_0, 11, 16, 0xf, 4, false) MUX_CFG(DA830, EPWMSYNC0, 11, 28, 0xf, 4, false) MUX_CFG(DA830, GPIO3_7, 11, 0, 0xf, 8, false) MUX_CFG(DA830, GPIO3_8, 11, 4, 0xf, 8, false) MUX_CFG(DA830, GPIO3_9, 11, 8, 0xf, 8, false) MUX_CFG(DA830, GPIO3_10, 11, 12, 0xf, 8, false) MUX_CFG(DA830, GPIO3_11, 11, 16, 0xf, 8, false) MUX_CFG(DA830, GPIO3_14, 11, 20, 0xf, 8, false) MUX_CFG(DA830, GPIO3_15, 11, 24, 0xf, 8, false) MUX_CFG(DA830, GPIO4_10, 11, 28, 0xf, 8, false) MUX_CFG(DA830, AHCLKR1, 12, 0, 0xf, 1, false) MUX_CFG(DA830, ACLKR1, 12, 4, 0xf, 1, false) MUX_CFG(DA830, AFSR1, 12, 8, 0xf, 1, false) MUX_CFG(DA830, AMUTE1, 12, 12, 0xf, 1, false) MUX_CFG(DA830, AXR1_0, 12, 16, 0xf, 1, false) MUX_CFG(DA830, AXR1_1, 12, 20, 0xf, 1, false) MUX_CFG(DA830, AXR1_2, 12, 24, 0xf, 1, false) MUX_CFG(DA830, AXR1_3, 12, 28, 0xf, 1, false) MUX_CFG(DA830, ECAP2_APWM2, 12, 4, 0xf, 2, false) MUX_CFG(DA830, EHRPWMGLUETZ, 12, 12, 0xf, 2, false) MUX_CFG(DA830, EQEP1A, 12, 28, 0xf, 2, false) MUX_CFG(DA830, GPIO4_11, 12, 0, 0xf, 8, false) MUX_CFG(DA830, GPIO4_12, 12, 4, 0xf, 8, false) MUX_CFG(DA830, GPIO4_13, 12, 8, 0xf, 8, false) MUX_CFG(DA830, GPIO4_14, 12, 12, 0xf, 8, false) MUX_CFG(DA830, GPIO4_0, 12, 16, 0xf, 8, false) MUX_CFG(DA830, GPIO4_1, 12, 20, 0xf, 8, false) MUX_CFG(DA830, GPIO4_2, 12, 24, 0xf, 8, false) MUX_CFG(DA830, GPIO4_3, 12, 28, 0xf, 8, false) MUX_CFG(DA830, AXR1_4, 13, 0, 0xf, 1, false) MUX_CFG(DA830, AXR1_5, 13, 4, 0xf, 1, false) MUX_CFG(DA830, AXR1_6, 13, 8, 0xf, 1, false) MUX_CFG(DA830, AXR1_7, 13, 12, 0xf, 1, false) MUX_CFG(DA830, AXR1_8, 13, 16, 0xf, 1, false) MUX_CFG(DA830, AXR1_9, 13, 20, 0xf, 1, false) MUX_CFG(DA830, EMA_D_0, 13, 24, 0xf, 1, false) MUX_CFG(DA830, EMA_D_1, 13, 28, 0xf, 1, false) MUX_CFG(DA830, EQEP1B, 13, 0, 0xf, 2, false) MUX_CFG(DA830, EPWM2B, 13, 4, 0xf, 2, false) MUX_CFG(DA830, EPWM2A, 13, 8, 0xf, 2, false) MUX_CFG(DA830, EPWM1B, 13, 12, 0xf, 2, false) MUX_CFG(DA830, EPWM1A, 13, 16, 0xf, 2, false) MUX_CFG(DA830, MMCSD_DAT_0, 13, 24, 0xf, 2, false) MUX_CFG(DA830, MMCSD_DAT_1, 13, 28, 0xf, 2, false) MUX_CFG(DA830, UHPI_HD_0, 13, 24, 0xf, 4, false) MUX_CFG(DA830, UHPI_HD_1, 13, 28, 0xf, 4, false) MUX_CFG(DA830, GPIO4_4, 13, 0, 0xf, 8, false) MUX_CFG(DA830, GPIO4_5, 13, 4, 0xf, 8, false) MUX_CFG(DA830, GPIO4_6, 13, 8, 0xf, 8, false) MUX_CFG(DA830, GPIO4_7, 13, 12, 0xf, 8, false) MUX_CFG(DA830, GPIO4_8, 13, 16, 0xf, 8, false) MUX_CFG(DA830, GPIO4_9, 13, 20, 0xf, 8, false) MUX_CFG(DA830, GPIO0_0, 13, 24, 0xf, 8, false) MUX_CFG(DA830, GPIO0_1, 13, 28, 0xf, 8, false) MUX_CFG(DA830, EMA_D_2, 14, 0, 0xf, 1, false) MUX_CFG(DA830, EMA_D_3, 14, 4, 0xf, 1, false) MUX_CFG(DA830, EMA_D_4, 14, 8, 0xf, 1, false) MUX_CFG(DA830, EMA_D_5, 14, 12, 0xf, 1, false) MUX_CFG(DA830, EMA_D_6, 14, 16, 0xf, 1, false) MUX_CFG(DA830, EMA_D_7, 14, 20, 0xf, 1, false) MUX_CFG(DA830, EMA_D_8, 14, 24, 0xf, 1, false) MUX_CFG(DA830, EMA_D_9, 14, 28, 0xf, 1, false) MUX_CFG(DA830, MMCSD_DAT_2, 14, 0, 0xf, 2, false) MUX_CFG(DA830, MMCSD_DAT_3, 14, 4, 0xf, 2, false) MUX_CFG(DA830, MMCSD_DAT_4, 14, 8, 0xf, 2, false) MUX_CFG(DA830, MMCSD_DAT_5, 14, 12, 0xf, 2, false) MUX_CFG(DA830, MMCSD_DAT_6, 14, 16, 0xf, 2, false) MUX_CFG(DA830, MMCSD_DAT_7, 14, 20, 0xf, 2, false) MUX_CFG(DA830, UHPI_HD_8, 14, 24, 0xf, 2, false) MUX_CFG(DA830, UHPI_HD_9, 14, 28, 0xf, 2, false) MUX_CFG(DA830, UHPI_HD_2, 14, 0, 0xf, 4, false) MUX_CFG(DA830, UHPI_HD_3, 14, 4, 0xf, 4, false) MUX_CFG(DA830, UHPI_HD_4, 14, 8, 0xf, 4, false) MUX_CFG(DA830, UHPI_HD_5, 14, 12, 0xf, 4, false) MUX_CFG(DA830, UHPI_HD_6, 14, 16, 0xf, 4, false) MUX_CFG(DA830, UHPI_HD_7, 14, 20, 0xf, 4, false) MUX_CFG(DA830, LCD_D_8, 14, 24, 0xf, 4, false) MUX_CFG(DA830, LCD_D_9, 14, 28, 0xf, 4, false) MUX_CFG(DA830, GPIO0_2, 14, 0, 0xf, 8, false) MUX_CFG(DA830, GPIO0_3, 14, 4, 0xf, 8, false) MUX_CFG(DA830, GPIO0_4, 14, 8, 0xf, 8, false) MUX_CFG(DA830, GPIO0_5, 14, 12, 0xf, 8, false) MUX_CFG(DA830, GPIO0_6, 14, 16, 0xf, 8, false) MUX_CFG(DA830, GPIO0_7, 14, 20, 0xf, 8, false) MUX_CFG(DA830, GPIO0_8, 14, 24, 0xf, 8, false) MUX_CFG(DA830, GPIO0_9, 14, 28, 0xf, 8, false) MUX_CFG(DA830, EMA_D_10, 15, 0, 0xf, 1, false) MUX_CFG(DA830, EMA_D_11, 15, 4, 0xf, 1, false) MUX_CFG(DA830, EMA_D_12, 15, 8, 0xf, 1, false) MUX_CFG(DA830, EMA_D_13, 15, 12, 0xf, 1, false) MUX_CFG(DA830, EMA_D_14, 15, 16, 0xf, 1, false) MUX_CFG(DA830, EMA_D_15, 15, 20, 0xf, 1, false) MUX_CFG(DA830, EMA_A_0, 15, 24, 0xf, 1, false) MUX_CFG(DA830, EMA_A_1, 15, 28, 0xf, 1, false) MUX_CFG(DA830, UHPI_HD_10, 15, 0, 0xf, 2, false) MUX_CFG(DA830, UHPI_HD_11, 15, 4, 0xf, 2, false) MUX_CFG(DA830, UHPI_HD_12, 15, 8, 0xf, 2, false) MUX_CFG(DA830, UHPI_HD_13, 15, 12, 0xf, 2, false) MUX_CFG(DA830, UHPI_HD_14, 15, 16, 0xf, 2, false) MUX_CFG(DA830, UHPI_HD_15, 15, 20, 0xf, 2, false) MUX_CFG(DA830, LCD_D_7, 15, 24, 0xf, 2, false) MUX_CFG(DA830, MMCSD_CLK, 15, 28, 0xf, 2, false) MUX_CFG(DA830, LCD_D_10, 15, 0, 0xf, 4, false) MUX_CFG(DA830, LCD_D_11, 15, 4, 0xf, 4, false) MUX_CFG(DA830, LCD_D_12, 15, 8, 0xf, 4, false) MUX_CFG(DA830, LCD_D_13, 15, 12, 0xf, 4, false) MUX_CFG(DA830, LCD_D_14, 15, 16, 0xf, 4, false) MUX_CFG(DA830, LCD_D_15, 15, 20, 0xf, 4, false) MUX_CFG(DA830, UHPI_HCNTL0, 15, 28, 0xf, 4, false) MUX_CFG(DA830, GPIO0_10, 15, 0, 0xf, 8, false) MUX_CFG(DA830, GPIO0_11, 15, 4, 0xf, 8, false) MUX_CFG(DA830, GPIO0_12, 15, 8, 0xf, 8, false) MUX_CFG(DA830, GPIO0_13, 15, 12, 0xf, 8, false) MUX_CFG(DA830, GPIO0_14, 15, 16, 0xf, 8, false) MUX_CFG(DA830, GPIO0_15, 15, 20, 0xf, 8, false) MUX_CFG(DA830, GPIO1_0, 15, 24, 0xf, 8, false) MUX_CFG(DA830, GPIO1_1, 15, 28, 0xf, 8, false) MUX_CFG(DA830, EMA_A_2, 16, 0, 0xf, 1, false) MUX_CFG(DA830, EMA_A_3, 16, 4, 0xf, 1, false) MUX_CFG(DA830, EMA_A_4, 16, 8, 0xf, 1, false) MUX_CFG(DA830, EMA_A_5, 16, 12, 0xf, 1, false) MUX_CFG(DA830, EMA_A_6, 16, 16, 0xf, 1, false) MUX_CFG(DA830, EMA_A_7, 16, 20, 0xf, 1, false) MUX_CFG(DA830, EMA_A_8, 16, 24, 0xf, 1, false) MUX_CFG(DA830, EMA_A_9, 16, 28, 0xf, 1, false) MUX_CFG(DA830, MMCSD_CMD, 16, 0, 0xf, 2, false) MUX_CFG(DA830, LCD_D_6, 16, 4, 0xf, 2, false) MUX_CFG(DA830, LCD_D_3, 16, 8, 0xf, 2, false) MUX_CFG(DA830, LCD_D_2, 16, 12, 0xf, 2, false) MUX_CFG(DA830, LCD_D_1, 16, 16, 0xf, 2, false) MUX_CFG(DA830, LCD_D_0, 16, 20, 0xf, 2, false) MUX_CFG(DA830, LCD_PCLK, 16, 24, 0xf, 2, false) MUX_CFG(DA830, LCD_HSYNC, 16, 28, 0xf, 2, false) MUX_CFG(DA830, UHPI_HCNTL1, 16, 0, 0xf, 4, false) MUX_CFG(DA830, GPIO1_2, 16, 0, 0xf, 8, false) MUX_CFG(DA830, GPIO1_3, 16, 4, 0xf, 8, false) MUX_CFG(DA830, GPIO1_4, 16, 8, 0xf, 8, false) MUX_CFG(DA830, GPIO1_5, 16, 12, 0xf, 8, false) MUX_CFG(DA830, GPIO1_6, 16, 16, 0xf, 8, false) MUX_CFG(DA830, GPIO1_7, 16, 20, 0xf, 8, false) MUX_CFG(DA830, GPIO1_8, 16, 24, 0xf, 8, false) MUX_CFG(DA830, GPIO1_9, 16, 28, 0xf, 8, false) MUX_CFG(DA830, EMA_A_10, 17, 0, 0xf, 1, false) MUX_CFG(DA830, EMA_A_11, 17, 4, 0xf, 1, false) MUX_CFG(DA830, EMA_A_12, 17, 8, 0xf, 1, false) MUX_CFG(DA830, EMA_BA_1, 17, 12, 0xf, 1, false) MUX_CFG(DA830, EMA_BA_0, 17, 16, 0xf, 1, false) MUX_CFG(DA830, EMA_CLK, 17, 20, 0xf, 1, false) MUX_CFG(DA830, EMA_SDCKE, 17, 24, 0xf, 1, false) MUX_CFG(DA830, NEMA_CAS, 17, 28, 0xf, 1, false) MUX_CFG(DA830, LCD_VSYNC, 17, 0, 0xf, 2, false) MUX_CFG(DA830, NLCD_AC_ENB_CS, 17, 4, 0xf, 2, false) MUX_CFG(DA830, LCD_MCLK, 17, 8, 0xf, 2, false) MUX_CFG(DA830, LCD_D_5, 17, 12, 0xf, 2, false) MUX_CFG(DA830, LCD_D_4, 17, 16, 0xf, 2, false) MUX_CFG(DA830, OBSCLK, 17, 20, 0xf, 2, false) MUX_CFG(DA830, NEMA_CS_4, 17, 28, 0xf, 2, false) MUX_CFG(DA830, UHPI_HHWIL, 17, 12, 0xf, 4, false) MUX_CFG(DA830, AHCLKR2, 17, 20, 0xf, 4, false) MUX_CFG(DA830, GPIO1_10, 17, 0, 0xf, 8, false) MUX_CFG(DA830, GPIO1_11, 17, 4, 0xf, 8, false) MUX_CFG(DA830, GPIO1_12, 17, 8, 0xf, 8, false) MUX_CFG(DA830, GPIO1_13, 17, 12, 0xf, 8, false) MUX_CFG(DA830, GPIO1_14, 17, 16, 0xf, 8, false) MUX_CFG(DA830, GPIO1_15, 17, 20, 0xf, 8, false) MUX_CFG(DA830, GPIO2_0, 17, 24, 0xf, 8, false) MUX_CFG(DA830, GPIO2_1, 17, 28, 0xf, 8, false) MUX_CFG(DA830, NEMA_RAS, 18, 0, 0xf, 1, false) MUX_CFG(DA830, NEMA_WE, 18, 4, 0xf, 1, false) MUX_CFG(DA830, NEMA_CS_0, 18, 8, 0xf, 1, false) MUX_CFG(DA830, NEMA_CS_2, 18, 12, 0xf, 1, false) MUX_CFG(DA830, NEMA_CS_3, 18, 16, 0xf, 1, false) MUX_CFG(DA830, NEMA_OE, 18, 20, 0xf, 1, false) MUX_CFG(DA830, NEMA_WE_DQM_1, 18, 24, 0xf, 1, false) MUX_CFG(DA830, NEMA_WE_DQM_0, 18, 28, 0xf, 1, false) MUX_CFG(DA830, NEMA_CS_5, 18, 0, 0xf, 2, false) MUX_CFG(DA830, UHPI_HRNW, 18, 4, 0xf, 2, false) MUX_CFG(DA830, NUHPI_HAS, 18, 8, 0xf, 2, false) MUX_CFG(DA830, NUHPI_HCS, 18, 12, 0xf, 2, false) MUX_CFG(DA830, NUHPI_HDS1, 18, 20, 0xf, 2, false) MUX_CFG(DA830, NUHPI_HDS2, 18, 24, 0xf, 2, false) MUX_CFG(DA830, NUHPI_HINT, 18, 28, 0xf, 2, false) MUX_CFG(DA830, AXR0_12, 18, 4, 0xf, 4, false) MUX_CFG(DA830, AMUTE2, 18, 16, 0xf, 4, false) MUX_CFG(DA830, AXR0_13, 18, 20, 0xf, 4, false) MUX_CFG(DA830, AXR0_14, 18, 24, 0xf, 4, false) MUX_CFG(DA830, AXR0_15, 18, 28, 0xf, 4, false) MUX_CFG(DA830, GPIO2_2, 18, 0, 0xf, 8, false) MUX_CFG(DA830, GPIO2_3, 18, 4, 0xf, 8, false) MUX_CFG(DA830, GPIO2_4, 18, 8, 0xf, 8, false) MUX_CFG(DA830, GPIO2_5, 18, 12, 0xf, 8, false) MUX_CFG(DA830, GPIO2_6, 18, 16, 0xf, 8, false) MUX_CFG(DA830, GPIO2_7, 18, 20, 0xf, 8, false) MUX_CFG(DA830, GPIO2_8, 18, 24, 0xf, 8, false) MUX_CFG(DA830, GPIO2_9, 18, 28, 0xf, 8, false) MUX_CFG(DA830, EMA_WAIT_0, 19, 0, 0xf, 1, false) MUX_CFG(DA830, NUHPI_HRDY, 19, 0, 0xf, 2, false) MUX_CFG(DA830, GPIO2_10, 19, 0, 0xf, 8, false) #endif }; static struct map_desc da830_io_desc[] = { { .virtual = IO_VIRT, .pfn = __phys_to_pfn(IO_PHYS), .length = IO_SIZE, .type = MT_DEVICE }, { .virtual = DA8XX_CP_INTC_VIRT, .pfn = __phys_to_pfn(DA8XX_CP_INTC_BASE), .length = DA8XX_CP_INTC_SIZE, .type = MT_DEVICE }, }; /* Contents of JTAG ID register used to identify exact cpu type */ static struct davinci_id da830_ids[] = { { .variant = 0x0, .part_no = 0xb7df, .manufacturer = 0x017, /* 0x02f >> 1 */ .cpu_id = DAVINCI_CPU_ID_DA830, .name = "da830/omap-l137 rev1.0", }, { .variant = 0x8, .part_no = 0xb7df, .manufacturer = 0x017, .cpu_id = DAVINCI_CPU_ID_DA830, .name = "da830/omap-l137 rev1.1", }, { .variant = 0x9, .part_no = 0xb7df, .manufacturer = 0x017, .cpu_id = DAVINCI_CPU_ID_DA830, .name = "da830/omap-l137 rev2.0", }, }; static const struct davinci_soc_info davinci_soc_info_da830 = { .io_desc = da830_io_desc, .io_desc_num = ARRAY_SIZE(da830_io_desc), .jtag_id_reg = DA8XX_SYSCFG0_BASE + DA8XX_JTAG_ID_REG, .ids = da830_ids, .ids_num = ARRAY_SIZE(da830_ids), .pinmux_base = DA8XX_SYSCFG0_BASE + 0x120, .pinmux_pins = da830_pins, .pinmux_pins_num = ARRAY_SIZE(da830_pins), }; void __init da830_init(void) { davinci_common_init(&davinci_soc_info_da830); da8xx_syscfg0_base = ioremap(DA8XX_SYSCFG0_BASE, SZ_4K); WARN(!da8xx_syscfg0_base, "Unable to map syscfg0 module"); }
linux-master
arch/arm/mach-davinci/da830.c
// SPDX-License-Identifier: GPL-2.0-only /* * Runtime PM support code for DaVinci * * Author: Kevin Hilman * * Copyright (C) 2012 Texas Instruments, Inc. */ #include <linux/init.h> #include <linux/pm_runtime.h> #include <linux/pm_clock.h> #include <linux/platform_device.h> #include <linux/of.h> static struct dev_pm_domain davinci_pm_domain = { .ops = { USE_PM_CLK_RUNTIME_OPS USE_PLATFORM_PM_SLEEP_OPS }, }; static struct pm_clk_notifier_block platform_bus_notifier = { .pm_domain = &davinci_pm_domain, .con_ids = { "fck", "master", "slave", NULL }, }; static int __init davinci_pm_runtime_init(void) { if (of_have_populated_dt()) return 0; /* Use pm_clk as fallback if we're not using genpd. */ pm_clk_add_notifier(&platform_bus_type, &platform_bus_notifier); return 0; } core_initcall(davinci_pm_runtime_init);
linux-master
arch/arm/mach-davinci/pm_domain.c
// SPDX-License-Identifier: GPL-2.0-only /* * CPU idle for DaVinci SoCs * * Copyright (C) 2009 Texas Instruments Incorporated. https://www.ti.com/ * * Derived from Marvell Kirkwood CPU idle code * (arch/arm/mach-kirkwood/cpuidle.c) */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/cpuidle.h> #include <linux/io.h> #include <linux/export.h> #include <asm/cpuidle.h> #include "cpuidle.h" #include "ddr2.h" #define DAVINCI_CPUIDLE_MAX_STATES 2 static void __iomem *ddr2_reg_base; static bool ddr2_pdown; static void davinci_save_ddr_power(int enter, bool pdown) { u32 val; val = __raw_readl(ddr2_reg_base + DDR2_SDRCR_OFFSET); if (enter) { if (pdown) val |= DDR2_SRPD_BIT; else val &= ~DDR2_SRPD_BIT; val |= DDR2_LPMODEN_BIT; } else { val &= ~(DDR2_SRPD_BIT | DDR2_LPMODEN_BIT); } __raw_writel(val, ddr2_reg_base + DDR2_SDRCR_OFFSET); } /* Actual code that puts the SoC in different idle states */ static __cpuidle int davinci_enter_idle(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { davinci_save_ddr_power(1, ddr2_pdown); cpu_do_idle(); davinci_save_ddr_power(0, ddr2_pdown); return index; } static struct cpuidle_driver davinci_idle_driver = { .name = "cpuidle-davinci", .owner = THIS_MODULE, .states[0] = ARM_CPUIDLE_WFI_STATE, .states[1] = { .enter = davinci_enter_idle, .exit_latency = 10, .target_residency = 10000, .name = "DDR SR", .desc = "WFI and DDR Self Refresh", }, .state_count = DAVINCI_CPUIDLE_MAX_STATES, }; static int __init davinci_cpuidle_probe(struct platform_device *pdev) { struct davinci_cpuidle_config *pdata = pdev->dev.platform_data; if (!pdata) { dev_err(&pdev->dev, "cannot get platform data\n"); return -ENOENT; } ddr2_reg_base = pdata->ddr2_ctlr_base; ddr2_pdown = pdata->ddr2_pdown; return cpuidle_register(&davinci_idle_driver, NULL); } static struct platform_driver davinci_cpuidle_driver = { .driver = { .name = "cpuidle-davinci", }, }; static int __init davinci_cpuidle_init(void) { return platform_driver_probe(&davinci_cpuidle_driver, davinci_cpuidle_probe); } device_initcall(davinci_cpuidle_init);
linux-master
arch/arm/mach-davinci/cpuidle.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com/ * * Modified from mach-omap/omap2/board-generic.c */ #include <asm/mach/arch.h> #include "common.h" #include "da8xx.h" #ifdef CONFIG_ARCH_DAVINCI_DA850 static void __init da850_init_machine(void) { davinci_pm_init(); pdata_quirks_init(); } static const char *const da850_boards_compat[] __initconst = { "enbw,cmc", "ti,da850-lcdk", "ti,da850-evm", "ti,da850", NULL, }; DT_MACHINE_START(DA850_DT, "Generic DA850/OMAP-L138/AM18x") .map_io = da850_init, .init_machine = da850_init_machine, .dt_compat = da850_boards_compat, .init_late = davinci_init_late, MACHINE_END #endif
linux-master
arch/arm/mach-davinci/da8xx-dt.c
// SPDX-License-Identifier: GPL-2.0-only /* * Legacy platform_data quirks * * Copyright (C) 2016 BayLibre, Inc */ #include <linux/kernel.h> #include <linux/of.h> #include <media/i2c/tvp514x.h> #include <media/i2c/adv7343.h> #include "common.h" #include "da8xx.h" struct pdata_init { const char *compatible; void (*fn)(void); }; #define TVP5147_CH0 "tvp514x-0" #define TVP5147_CH1 "tvp514x-1" /* VPIF capture configuration */ static struct tvp514x_platform_data tvp5146_pdata = { .clk_polarity = 0, .hs_polarity = 1, .vs_polarity = 1, }; #define TVP514X_STD_ALL (V4L2_STD_NTSC | V4L2_STD_PAL) static struct vpif_input da850_ch0_inputs[] = { { .input = { .index = 0, .name = "Composite", .type = V4L2_INPUT_TYPE_CAMERA, .capabilities = V4L2_IN_CAP_STD, .std = TVP514X_STD_ALL, }, .input_route = INPUT_CVBS_VI2B, .output_route = OUTPUT_10BIT_422_EMBEDDED_SYNC, .subdev_name = TVP5147_CH0, }, }; static struct vpif_input da850_ch1_inputs[] = { { .input = { .index = 0, .name = "S-Video", .type = V4L2_INPUT_TYPE_CAMERA, .capabilities = V4L2_IN_CAP_STD, .std = TVP514X_STD_ALL, }, .input_route = INPUT_SVIDEO_VI2C_VI1C, .output_route = OUTPUT_10BIT_422_EMBEDDED_SYNC, .subdev_name = TVP5147_CH1, }, }; static struct vpif_subdev_info da850_vpif_capture_sdev_info[] = { { .name = TVP5147_CH0, .board_info = { I2C_BOARD_INFO("tvp5146", 0x5d), .platform_data = &tvp5146_pdata, }, }, { .name = TVP5147_CH1, .board_info = { I2C_BOARD_INFO("tvp5146", 0x5c), .platform_data = &tvp5146_pdata, }, }, }; static struct vpif_capture_config da850_vpif_capture_config = { .subdev_info = da850_vpif_capture_sdev_info, .subdev_count = ARRAY_SIZE(da850_vpif_capture_sdev_info), .chan_config[0] = { .inputs = da850_ch0_inputs, .input_count = ARRAY_SIZE(da850_ch0_inputs), .vpif_if = { .if_type = VPIF_IF_BT656, .hd_pol = 1, .vd_pol = 1, .fid_pol = 0, }, }, .chan_config[1] = { .inputs = da850_ch1_inputs, .input_count = ARRAY_SIZE(da850_ch1_inputs), .vpif_if = { .if_type = VPIF_IF_BT656, .hd_pol = 1, .vd_pol = 1, .fid_pol = 0, }, }, .card_name = "DA850/OMAP-L138 Video Capture", }; static void __init da850_vpif_legacy_register_capture(void) { int ret; ret = da850_register_vpif_capture(&da850_vpif_capture_config); if (ret) pr_warn("%s: VPIF capture setup failed: %d\n", __func__, ret); } static void __init da850_vpif_capture_legacy_init_lcdk(void) { da850_vpif_capture_config.subdev_count = 1; da850_vpif_legacy_register_capture(); } static void __init da850_vpif_capture_legacy_init_evm(void) { da850_vpif_legacy_register_capture(); } static struct adv7343_platform_data adv7343_pdata = { .mode_config = { .dac = { 1, 1, 1 }, }, .sd_config = { .sd_dac_out = { 1 }, }, }; static struct vpif_subdev_info da850_vpif_subdev[] = { { .name = "adv7343", .board_info = { I2C_BOARD_INFO("adv7343", 0x2a), .platform_data = &adv7343_pdata, }, }, }; static const struct vpif_output da850_ch0_outputs[] = { { .output = { .index = 0, .name = "Composite", .type = V4L2_OUTPUT_TYPE_ANALOG, .capabilities = V4L2_OUT_CAP_STD, .std = V4L2_STD_ALL, }, .subdev_name = "adv7343", .output_route = ADV7343_COMPOSITE_ID, }, { .output = { .index = 1, .name = "S-Video", .type = V4L2_OUTPUT_TYPE_ANALOG, .capabilities = V4L2_OUT_CAP_STD, .std = V4L2_STD_ALL, }, .subdev_name = "adv7343", .output_route = ADV7343_SVIDEO_ID, }, }; static struct vpif_display_config da850_vpif_display_config = { .subdevinfo = da850_vpif_subdev, .subdev_count = ARRAY_SIZE(da850_vpif_subdev), .chan_config[0] = { .outputs = da850_ch0_outputs, .output_count = ARRAY_SIZE(da850_ch0_outputs), }, .card_name = "DA850/OMAP-L138 Video Display", }; static void __init da850_vpif_display_legacy_init_evm(void) { int ret; ret = da850_register_vpif_display(&da850_vpif_display_config); if (ret) pr_warn("%s: VPIF display setup failed: %d\n", __func__, ret); } static void pdata_quirks_check(struct pdata_init *quirks) { while (quirks->compatible) { if (of_machine_is_compatible(quirks->compatible)) { if (quirks->fn) quirks->fn(); } quirks++; } } static struct pdata_init pdata_quirks[] __initdata = { { "ti,da850-lcdk", da850_vpif_capture_legacy_init_lcdk, }, { "ti,da850-evm", da850_vpif_display_legacy_init_evm, }, { "ti,da850-evm", da850_vpif_capture_legacy_init_evm, }, { /* sentinel */ }, }; void __init pdata_quirks_init(void) { pdata_quirks_check(pdata_quirks); }
linux-master
arch/arm/mach-davinci/pdata-quirks.c
// SPDX-License-Identifier: GPL-2.0-only /* * DaVinci Power Management Routines * * Copyright (C) 2009 Texas Instruments, Inc. https://www.ti.com/ */ #include <linux/pm.h> #include <linux/suspend.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/spinlock.h> #include <asm/cacheflush.h> #include <asm/delay.h> #include <asm/io.h> #include "common.h" #include "da8xx.h" #include "mux.h" #include "pm.h" #include "clock.h" #include "psc.h" #include "sram.h" #define DA850_PLL1_BASE 0x01e1a000 #define DEEPSLEEP_SLEEPCOUNT_MASK 0xFFFF #define DEEPSLEEP_SLEEPCOUNT 128 static void (*davinci_sram_suspend) (struct davinci_pm_config *); static struct davinci_pm_config pm_config = { .sleepcount = DEEPSLEEP_SLEEPCOUNT, .ddrpsc_num = DA8XX_LPSC1_EMIF3C, }; static void davinci_sram_push(void *dest, void *src, unsigned int size) { memcpy(dest, src, size); flush_icache_range((unsigned long)dest, (unsigned long)(dest + size)); } static void davinci_pm_suspend(void) { unsigned val; if (pm_config.cpupll_reg_base != pm_config.ddrpll_reg_base) { /* Switch CPU PLL to bypass mode */ val = __raw_readl(pm_config.cpupll_reg_base + PLLCTL); val &= ~(PLLCTL_PLLENSRC | PLLCTL_PLLEN); __raw_writel(val, pm_config.cpupll_reg_base + PLLCTL); udelay(PLL_BYPASS_TIME); /* Powerdown CPU PLL */ val = __raw_readl(pm_config.cpupll_reg_base + PLLCTL); val |= PLLCTL_PLLPWRDN; __raw_writel(val, pm_config.cpupll_reg_base + PLLCTL); } /* Configure sleep count in deep sleep register */ val = __raw_readl(pm_config.deepsleep_reg); val &= ~DEEPSLEEP_SLEEPCOUNT_MASK, val |= pm_config.sleepcount; __raw_writel(val, pm_config.deepsleep_reg); /* System goes to sleep in this call */ davinci_sram_suspend(&pm_config); if (pm_config.cpupll_reg_base != pm_config.ddrpll_reg_base) { /* put CPU PLL in reset */ val = __raw_readl(pm_config.cpupll_reg_base + PLLCTL); val &= ~PLLCTL_PLLRST; __raw_writel(val, pm_config.cpupll_reg_base + PLLCTL); /* put CPU PLL in power down */ val = __raw_readl(pm_config.cpupll_reg_base + PLLCTL); val &= ~PLLCTL_PLLPWRDN; __raw_writel(val, pm_config.cpupll_reg_base + PLLCTL); /* wait for CPU PLL reset */ udelay(PLL_RESET_TIME); /* bring CPU PLL out of reset */ val = __raw_readl(pm_config.cpupll_reg_base + PLLCTL); val |= PLLCTL_PLLRST; __raw_writel(val, pm_config.cpupll_reg_base + PLLCTL); /* Wait for CPU PLL to lock */ udelay(PLL_LOCK_TIME); /* Remove CPU PLL from bypass mode */ val = __raw_readl(pm_config.cpupll_reg_base + PLLCTL); val &= ~PLLCTL_PLLENSRC; val |= PLLCTL_PLLEN; __raw_writel(val, pm_config.cpupll_reg_base + PLLCTL); } } static int davinci_pm_enter(suspend_state_t state) { int ret = 0; switch (state) { case PM_SUSPEND_MEM: davinci_pm_suspend(); break; default: ret = -EINVAL; } return ret; } static const struct platform_suspend_ops davinci_pm_ops = { .enter = davinci_pm_enter, .valid = suspend_valid_only_mem, }; int __init davinci_pm_init(void) { int ret; ret = davinci_cfg_reg(DA850_RTC_ALARM); if (ret) return ret; pm_config.ddr2_ctlr_base = da8xx_get_mem_ctlr(); pm_config.deepsleep_reg = DA8XX_SYSCFG1_VIRT(DA8XX_DEEPSLEEP_REG); pm_config.cpupll_reg_base = ioremap(DA8XX_PLL0_BASE, SZ_4K); if (!pm_config.cpupll_reg_base) return -ENOMEM; pm_config.ddrpll_reg_base = ioremap(DA850_PLL1_BASE, SZ_4K); if (!pm_config.ddrpll_reg_base) { ret = -ENOMEM; goto no_ddrpll_mem; } pm_config.ddrpsc_reg_base = ioremap(DA8XX_PSC1_BASE, SZ_4K); if (!pm_config.ddrpsc_reg_base) { ret = -ENOMEM; goto no_ddrpsc_mem; } davinci_sram_suspend = sram_alloc(davinci_cpu_suspend_sz, NULL); if (!davinci_sram_suspend) { pr_err("PM: cannot allocate SRAM memory\n"); ret = -ENOMEM; goto no_sram_mem; } davinci_sram_push(davinci_sram_suspend, davinci_cpu_suspend, davinci_cpu_suspend_sz); suspend_set_ops(&davinci_pm_ops); return 0; no_sram_mem: iounmap(pm_config.ddrpsc_reg_base); no_ddrpsc_mem: iounmap(pm_config.ddrpll_reg_base); no_ddrpll_mem: iounmap(pm_config.cpupll_reg_base); return ret; }
linux-master
arch/arm/mach-davinci/pm.c
// SPDX-License-Identifier: GPL-2.0-only /* * Utility to set the DAVINCI MUX register from a table in mux.h * * Author: Vladimir Barinov, MontaVista Software, Inc. <[email protected]> * * Based on linux/arch/arm/plat-omap/mux.c: * Copyright (C) 2003 - 2005 Nokia Corporation * * Written by Tony Lindgren * * 2007 (c) MontaVista Software, Inc. * * Copyright (C) 2008 Texas Instruments. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/io.h> #include <linux/module.h> #include <linux/spinlock.h> #include "mux.h" #include "common.h" static void __iomem *pinmux_base; /* * Sets the DAVINCI MUX register based on the table */ int davinci_cfg_reg(const unsigned long index) { static DEFINE_SPINLOCK(mux_spin_lock); struct davinci_soc_info *soc_info = &davinci_soc_info; unsigned long flags; const struct mux_config *cfg; unsigned int reg_orig = 0, reg = 0; unsigned int mask, warn = 0; if (WARN_ON(!soc_info->pinmux_pins)) return -ENODEV; if (!pinmux_base) { pinmux_base = ioremap(soc_info->pinmux_base, SZ_4K); if (WARN_ON(!pinmux_base)) return -ENOMEM; } if (index >= soc_info->pinmux_pins_num) { pr_err("Invalid pin mux index: %lu (%lu)\n", index, soc_info->pinmux_pins_num); dump_stack(); return -ENODEV; } cfg = &soc_info->pinmux_pins[index]; if (cfg->name == NULL) { pr_err("No entry for the specified index\n"); return -ENODEV; } /* Update the mux register in question */ if (cfg->mask) { unsigned tmp1, tmp2; spin_lock_irqsave(&mux_spin_lock, flags); reg_orig = __raw_readl(pinmux_base + cfg->mux_reg); mask = (cfg->mask << cfg->mask_offset); tmp1 = reg_orig & mask; reg = reg_orig & ~mask; tmp2 = (cfg->mode << cfg->mask_offset); reg |= tmp2; if (tmp1 != tmp2) warn = 1; __raw_writel(reg, pinmux_base + cfg->mux_reg); spin_unlock_irqrestore(&mux_spin_lock, flags); } if (warn) { #ifdef CONFIG_DAVINCI_MUX_WARNINGS pr_warn("initialized %s\n", cfg->name); #endif } #ifdef CONFIG_DAVINCI_MUX_DEBUG if (cfg->debug || warn) { pr_warn("Setting register %s\n", cfg->name); pr_warn(" %s (0x%08x) = 0x%08x -> 0x%08x\n", cfg->mux_reg_name, cfg->mux_reg, reg_orig, reg); } #endif return 0; }
linux-master
arch/arm/mach-davinci/mux.c
// SPDX-License-Identifier: GPL-2.0-only /* * TI DA850/OMAP-L138 chip specific setup * * Copyright (C) 2009 Texas Instruments Incorporated - https://www.ti.com/ * * Derived from: arch/arm/mach-davinci/da830.c * Original Copyrights follow: * * 2009 (c) MontaVista Software, Inc. */ #include <linux/gpio.h> #include <linux/init.h> #include <linux/io.h> #include <linux/mfd/da8xx-cfgchip.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/regulator/consumer.h> #include <clocksource/timer-davinci.h> #include <asm/mach/map.h> #include "common.h" #include "cputype.h" #include "da8xx.h" #include "hardware.h" #include "pm.h" #include "irqs.h" #include "mux.h" #define DA850_PLL1_BASE 0x01e1a000 #define DA850_TIMER64P2_BASE 0x01f0c000 #define DA850_TIMER64P3_BASE 0x01f0d000 #define DA850_REF_FREQ 24000000 /* * Device specific mux setup * * soc description mux mode mode mux dbg * reg offset mask mode */ static const struct mux_config da850_pins[] = { #ifdef CONFIG_DAVINCI_MUX /* UART0 function */ MUX_CFG(DA850, NUART0_CTS, 3, 24, 15, 2, false) MUX_CFG(DA850, NUART0_RTS, 3, 28, 15, 2, false) MUX_CFG(DA850, UART0_RXD, 3, 16, 15, 2, false) MUX_CFG(DA850, UART0_TXD, 3, 20, 15, 2, false) /* UART1 function */ MUX_CFG(DA850, UART1_RXD, 4, 24, 15, 2, false) MUX_CFG(DA850, UART1_TXD, 4, 28, 15, 2, false) /* UART2 function */ MUX_CFG(DA850, UART2_RXD, 4, 16, 15, 2, false) MUX_CFG(DA850, UART2_TXD, 4, 20, 15, 2, false) /* I2C1 function */ MUX_CFG(DA850, I2C1_SCL, 4, 16, 15, 4, false) MUX_CFG(DA850, I2C1_SDA, 4, 20, 15, 4, false) /* I2C0 function */ MUX_CFG(DA850, I2C0_SDA, 4, 12, 15, 2, false) MUX_CFG(DA850, I2C0_SCL, 4, 8, 15, 2, false) /* EMAC function */ MUX_CFG(DA850, MII_TXEN, 2, 4, 15, 8, false) MUX_CFG(DA850, MII_TXCLK, 2, 8, 15, 8, false) MUX_CFG(DA850, MII_COL, 2, 12, 15, 8, false) MUX_CFG(DA850, MII_TXD_3, 2, 16, 15, 8, false) MUX_CFG(DA850, MII_TXD_2, 2, 20, 15, 8, false) MUX_CFG(DA850, MII_TXD_1, 2, 24, 15, 8, false) MUX_CFG(DA850, MII_TXD_0, 2, 28, 15, 8, false) MUX_CFG(DA850, MII_RXCLK, 3, 0, 15, 8, false) MUX_CFG(DA850, MII_RXDV, 3, 4, 15, 8, false) MUX_CFG(DA850, MII_RXER, 3, 8, 15, 8, false) MUX_CFG(DA850, MII_CRS, 3, 12, 15, 8, false) MUX_CFG(DA850, MII_RXD_3, 3, 16, 15, 8, false) MUX_CFG(DA850, MII_RXD_2, 3, 20, 15, 8, false) MUX_CFG(DA850, MII_RXD_1, 3, 24, 15, 8, false) MUX_CFG(DA850, MII_RXD_0, 3, 28, 15, 8, false) MUX_CFG(DA850, MDIO_CLK, 4, 0, 15, 8, false) MUX_CFG(DA850, MDIO_D, 4, 4, 15, 8, false) MUX_CFG(DA850, RMII_TXD_0, 14, 12, 15, 8, false) MUX_CFG(DA850, RMII_TXD_1, 14, 8, 15, 8, false) MUX_CFG(DA850, RMII_TXEN, 14, 16, 15, 8, false) MUX_CFG(DA850, RMII_CRS_DV, 15, 4, 15, 8, false) MUX_CFG(DA850, RMII_RXD_0, 14, 24, 15, 8, false) MUX_CFG(DA850, RMII_RXD_1, 14, 20, 15, 8, false) MUX_CFG(DA850, RMII_RXER, 14, 28, 15, 8, false) MUX_CFG(DA850, RMII_MHZ_50_CLK, 15, 0, 15, 0, false) /* McASP function */ MUX_CFG(DA850, ACLKR, 0, 0, 15, 1, false) MUX_CFG(DA850, ACLKX, 0, 4, 15, 1, false) MUX_CFG(DA850, AFSR, 0, 8, 15, 1, false) MUX_CFG(DA850, AFSX, 0, 12, 15, 1, false) MUX_CFG(DA850, AHCLKR, 0, 16, 15, 1, false) MUX_CFG(DA850, AHCLKX, 0, 20, 15, 1, false) MUX_CFG(DA850, AMUTE, 0, 24, 15, 1, false) MUX_CFG(DA850, AXR_15, 1, 0, 15, 1, false) MUX_CFG(DA850, AXR_14, 1, 4, 15, 1, false) MUX_CFG(DA850, AXR_13, 1, 8, 15, 1, false) MUX_CFG(DA850, AXR_12, 1, 12, 15, 1, false) MUX_CFG(DA850, AXR_11, 1, 16, 15, 1, false) MUX_CFG(DA850, AXR_10, 1, 20, 15, 1, false) MUX_CFG(DA850, AXR_9, 1, 24, 15, 1, false) MUX_CFG(DA850, AXR_8, 1, 28, 15, 1, false) MUX_CFG(DA850, AXR_7, 2, 0, 15, 1, false) MUX_CFG(DA850, AXR_6, 2, 4, 15, 1, false) MUX_CFG(DA850, AXR_5, 2, 8, 15, 1, false) MUX_CFG(DA850, AXR_4, 2, 12, 15, 1, false) MUX_CFG(DA850, AXR_3, 2, 16, 15, 1, false) MUX_CFG(DA850, AXR_2, 2, 20, 15, 1, false) MUX_CFG(DA850, AXR_1, 2, 24, 15, 1, false) MUX_CFG(DA850, AXR_0, 2, 28, 15, 1, false) /* LCD function */ MUX_CFG(DA850, LCD_D_7, 16, 8, 15, 2, false) MUX_CFG(DA850, LCD_D_6, 16, 12, 15, 2, false) MUX_CFG(DA850, LCD_D_5, 16, 16, 15, 2, false) MUX_CFG(DA850, LCD_D_4, 16, 20, 15, 2, false) MUX_CFG(DA850, LCD_D_3, 16, 24, 15, 2, false) MUX_CFG(DA850, LCD_D_2, 16, 28, 15, 2, false) MUX_CFG(DA850, LCD_D_1, 17, 0, 15, 2, false) MUX_CFG(DA850, LCD_D_0, 17, 4, 15, 2, false) MUX_CFG(DA850, LCD_D_15, 17, 8, 15, 2, false) MUX_CFG(DA850, LCD_D_14, 17, 12, 15, 2, false) MUX_CFG(DA850, LCD_D_13, 17, 16, 15, 2, false) MUX_CFG(DA850, LCD_D_12, 17, 20, 15, 2, false) MUX_CFG(DA850, LCD_D_11, 17, 24, 15, 2, false) MUX_CFG(DA850, LCD_D_10, 17, 28, 15, 2, false) MUX_CFG(DA850, LCD_D_9, 18, 0, 15, 2, false) MUX_CFG(DA850, LCD_D_8, 18, 4, 15, 2, false) MUX_CFG(DA850, LCD_PCLK, 18, 24, 15, 2, false) MUX_CFG(DA850, LCD_HSYNC, 19, 0, 15, 2, false) MUX_CFG(DA850, LCD_VSYNC, 19, 4, 15, 2, false) MUX_CFG(DA850, NLCD_AC_ENB_CS, 19, 24, 15, 2, false) /* MMC/SD0 function */ MUX_CFG(DA850, MMCSD0_DAT_0, 10, 8, 15, 2, false) MUX_CFG(DA850, MMCSD0_DAT_1, 10, 12, 15, 2, false) MUX_CFG(DA850, MMCSD0_DAT_2, 10, 16, 15, 2, false) MUX_CFG(DA850, MMCSD0_DAT_3, 10, 20, 15, 2, false) MUX_CFG(DA850, MMCSD0_CLK, 10, 0, 15, 2, false) MUX_CFG(DA850, MMCSD0_CMD, 10, 4, 15, 2, false) /* MMC/SD1 function */ MUX_CFG(DA850, MMCSD1_DAT_0, 18, 8, 15, 2, false) MUX_CFG(DA850, MMCSD1_DAT_1, 19, 16, 15, 2, false) MUX_CFG(DA850, MMCSD1_DAT_2, 19, 12, 15, 2, false) MUX_CFG(DA850, MMCSD1_DAT_3, 19, 8, 15, 2, false) MUX_CFG(DA850, MMCSD1_CLK, 18, 12, 15, 2, false) MUX_CFG(DA850, MMCSD1_CMD, 18, 16, 15, 2, false) /* EMIF2.5/EMIFA function */ MUX_CFG(DA850, EMA_D_7, 9, 0, 15, 1, false) MUX_CFG(DA850, EMA_D_6, 9, 4, 15, 1, false) MUX_CFG(DA850, EMA_D_5, 9, 8, 15, 1, false) MUX_CFG(DA850, EMA_D_4, 9, 12, 15, 1, false) MUX_CFG(DA850, EMA_D_3, 9, 16, 15, 1, false) MUX_CFG(DA850, EMA_D_2, 9, 20, 15, 1, false) MUX_CFG(DA850, EMA_D_1, 9, 24, 15, 1, false) MUX_CFG(DA850, EMA_D_0, 9, 28, 15, 1, false) MUX_CFG(DA850, EMA_A_1, 12, 24, 15, 1, false) MUX_CFG(DA850, EMA_A_2, 12, 20, 15, 1, false) MUX_CFG(DA850, NEMA_CS_3, 7, 4, 15, 1, false) MUX_CFG(DA850, NEMA_CS_4, 7, 8, 15, 1, false) MUX_CFG(DA850, NEMA_WE, 7, 16, 15, 1, false) MUX_CFG(DA850, NEMA_OE, 7, 20, 15, 1, false) MUX_CFG(DA850, EMA_A_0, 12, 28, 15, 1, false) MUX_CFG(DA850, EMA_A_3, 12, 16, 15, 1, false) MUX_CFG(DA850, EMA_A_4, 12, 12, 15, 1, false) MUX_CFG(DA850, EMA_A_5, 12, 8, 15, 1, false) MUX_CFG(DA850, EMA_A_6, 12, 4, 15, 1, false) MUX_CFG(DA850, EMA_A_7, 12, 0, 15, 1, false) MUX_CFG(DA850, EMA_A_8, 11, 28, 15, 1, false) MUX_CFG(DA850, EMA_A_9, 11, 24, 15, 1, false) MUX_CFG(DA850, EMA_A_10, 11, 20, 15, 1, false) MUX_CFG(DA850, EMA_A_11, 11, 16, 15, 1, false) MUX_CFG(DA850, EMA_A_12, 11, 12, 15, 1, false) MUX_CFG(DA850, EMA_A_13, 11, 8, 15, 1, false) MUX_CFG(DA850, EMA_A_14, 11, 4, 15, 1, false) MUX_CFG(DA850, EMA_A_15, 11, 0, 15, 1, false) MUX_CFG(DA850, EMA_A_16, 10, 28, 15, 1, false) MUX_CFG(DA850, EMA_A_17, 10, 24, 15, 1, false) MUX_CFG(DA850, EMA_A_18, 10, 20, 15, 1, false) MUX_CFG(DA850, EMA_A_19, 10, 16, 15, 1, false) MUX_CFG(DA850, EMA_A_20, 10, 12, 15, 1, false) MUX_CFG(DA850, EMA_A_21, 10, 8, 15, 1, false) MUX_CFG(DA850, EMA_A_22, 10, 4, 15, 1, false) MUX_CFG(DA850, EMA_A_23, 10, 0, 15, 1, false) MUX_CFG(DA850, EMA_D_8, 8, 28, 15, 1, false) MUX_CFG(DA850, EMA_D_9, 8, 24, 15, 1, false) MUX_CFG(DA850, EMA_D_10, 8, 20, 15, 1, false) MUX_CFG(DA850, EMA_D_11, 8, 16, 15, 1, false) MUX_CFG(DA850, EMA_D_12, 8, 12, 15, 1, false) MUX_CFG(DA850, EMA_D_13, 8, 8, 15, 1, false) MUX_CFG(DA850, EMA_D_14, 8, 4, 15, 1, false) MUX_CFG(DA850, EMA_D_15, 8, 0, 15, 1, false) MUX_CFG(DA850, EMA_BA_1, 5, 24, 15, 1, false) MUX_CFG(DA850, EMA_CLK, 6, 0, 15, 1, false) MUX_CFG(DA850, EMA_WAIT_1, 6, 24, 15, 1, false) MUX_CFG(DA850, NEMA_CS_2, 7, 0, 15, 1, false) /* GPIO function */ MUX_CFG(DA850, GPIO2_4, 6, 12, 15, 8, false) MUX_CFG(DA850, GPIO2_6, 6, 4, 15, 8, false) MUX_CFG(DA850, GPIO2_8, 5, 28, 15, 8, false) MUX_CFG(DA850, GPIO2_15, 5, 0, 15, 8, false) MUX_CFG(DA850, GPIO3_12, 7, 12, 15, 8, false) MUX_CFG(DA850, GPIO3_13, 7, 8, 15, 8, false) MUX_CFG(DA850, GPIO4_0, 10, 28, 15, 8, false) MUX_CFG(DA850, GPIO4_1, 10, 24, 15, 8, false) MUX_CFG(DA850, GPIO6_9, 13, 24, 15, 8, false) MUX_CFG(DA850, GPIO6_10, 13, 20, 15, 8, false) MUX_CFG(DA850, GPIO6_13, 13, 8, 15, 8, false) MUX_CFG(DA850, RTC_ALARM, 0, 28, 15, 2, false) /* VPIF Capture */ MUX_CFG(DA850, VPIF_DIN0, 15, 4, 15, 1, false) MUX_CFG(DA850, VPIF_DIN1, 15, 0, 15, 1, false) MUX_CFG(DA850, VPIF_DIN2, 14, 28, 15, 1, false) MUX_CFG(DA850, VPIF_DIN3, 14, 24, 15, 1, false) MUX_CFG(DA850, VPIF_DIN4, 14, 20, 15, 1, false) MUX_CFG(DA850, VPIF_DIN5, 14, 16, 15, 1, false) MUX_CFG(DA850, VPIF_DIN6, 14, 12, 15, 1, false) MUX_CFG(DA850, VPIF_DIN7, 14, 8, 15, 1, false) MUX_CFG(DA850, VPIF_DIN8, 16, 4, 15, 1, false) MUX_CFG(DA850, VPIF_DIN9, 16, 0, 15, 1, false) MUX_CFG(DA850, VPIF_DIN10, 15, 28, 15, 1, false) MUX_CFG(DA850, VPIF_DIN11, 15, 24, 15, 1, false) MUX_CFG(DA850, VPIF_DIN12, 15, 20, 15, 1, false) MUX_CFG(DA850, VPIF_DIN13, 15, 16, 15, 1, false) MUX_CFG(DA850, VPIF_DIN14, 15, 12, 15, 1, false) MUX_CFG(DA850, VPIF_DIN15, 15, 8, 15, 1, false) MUX_CFG(DA850, VPIF_CLKIN0, 14, 0, 15, 1, false) MUX_CFG(DA850, VPIF_CLKIN1, 14, 4, 15, 1, false) MUX_CFG(DA850, VPIF_CLKIN2, 19, 8, 15, 1, false) MUX_CFG(DA850, VPIF_CLKIN3, 19, 16, 15, 1, false) /* VPIF Display */ MUX_CFG(DA850, VPIF_DOUT0, 17, 4, 15, 1, false) MUX_CFG(DA850, VPIF_DOUT1, 17, 0, 15, 1, false) MUX_CFG(DA850, VPIF_DOUT2, 16, 28, 15, 1, false) MUX_CFG(DA850, VPIF_DOUT3, 16, 24, 15, 1, false) MUX_CFG(DA850, VPIF_DOUT4, 16, 20, 15, 1, false) MUX_CFG(DA850, VPIF_DOUT5, 16, 16, 15, 1, false) MUX_CFG(DA850, VPIF_DOUT6, 16, 12, 15, 1, false) MUX_CFG(DA850, VPIF_DOUT7, 16, 8, 15, 1, false) MUX_CFG(DA850, VPIF_DOUT8, 18, 4, 15, 1, false) MUX_CFG(DA850, VPIF_DOUT9, 18, 0, 15, 1, false) MUX_CFG(DA850, VPIF_DOUT10, 17, 28, 15, 1, false) MUX_CFG(DA850, VPIF_DOUT11, 17, 24, 15, 1, false) MUX_CFG(DA850, VPIF_DOUT12, 17, 20, 15, 1, false) MUX_CFG(DA850, VPIF_DOUT13, 17, 16, 15, 1, false) MUX_CFG(DA850, VPIF_DOUT14, 17, 12, 15, 1, false) MUX_CFG(DA850, VPIF_DOUT15, 17, 8, 15, 1, false) MUX_CFG(DA850, VPIF_CLKO2, 19, 12, 15, 1, false) MUX_CFG(DA850, VPIF_CLKO3, 19, 20, 15, 1, false) #endif }; static struct map_desc da850_io_desc[] = { { .virtual = IO_VIRT, .pfn = __phys_to_pfn(IO_PHYS), .length = IO_SIZE, .type = MT_DEVICE }, { .virtual = DA8XX_CP_INTC_VIRT, .pfn = __phys_to_pfn(DA8XX_CP_INTC_BASE), .length = DA8XX_CP_INTC_SIZE, .type = MT_DEVICE }, }; /* Contents of JTAG ID register used to identify exact cpu type */ static struct davinci_id da850_ids[] = { { .variant = 0x0, .part_no = 0xb7d1, .manufacturer = 0x017, /* 0x02f >> 1 */ .cpu_id = DAVINCI_CPU_ID_DA850, .name = "da850/omap-l138", }, { .variant = 0x1, .part_no = 0xb7d1, .manufacturer = 0x017, /* 0x02f >> 1 */ .cpu_id = DAVINCI_CPU_ID_DA850, .name = "da850/omap-l138/am18x", }, }; /* VPIF resource, platform data */ static u64 da850_vpif_dma_mask = DMA_BIT_MASK(32); static struct resource da850_vpif_display_resource[] = { { .start = DAVINCI_INTC_IRQ(IRQ_DA850_VPIFINT), .end = DAVINCI_INTC_IRQ(IRQ_DA850_VPIFINT), .flags = IORESOURCE_IRQ, }, }; static struct platform_device da850_vpif_display_dev = { .name = "vpif_display", .id = -1, .dev = { .dma_mask = &da850_vpif_dma_mask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .resource = da850_vpif_display_resource, .num_resources = ARRAY_SIZE(da850_vpif_display_resource), }; static struct resource da850_vpif_capture_resource[] = { { .start = DAVINCI_INTC_IRQ(IRQ_DA850_VPIFINT), .end = DAVINCI_INTC_IRQ(IRQ_DA850_VPIFINT), .flags = IORESOURCE_IRQ, }, { .start = DAVINCI_INTC_IRQ(IRQ_DA850_VPIFINT), .end = DAVINCI_INTC_IRQ(IRQ_DA850_VPIFINT), .flags = IORESOURCE_IRQ, }, }; static struct platform_device da850_vpif_capture_dev = { .name = "vpif_capture", .id = -1, .dev = { .dma_mask = &da850_vpif_dma_mask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .resource = da850_vpif_capture_resource, .num_resources = ARRAY_SIZE(da850_vpif_capture_resource), }; int __init da850_register_vpif_display(struct vpif_display_config *display_config) { da850_vpif_display_dev.dev.platform_data = display_config; return platform_device_register(&da850_vpif_display_dev); } int __init da850_register_vpif_capture(struct vpif_capture_config *capture_config) { da850_vpif_capture_dev.dev.platform_data = capture_config; return platform_device_register(&da850_vpif_capture_dev); } static const struct davinci_soc_info davinci_soc_info_da850 = { .io_desc = da850_io_desc, .io_desc_num = ARRAY_SIZE(da850_io_desc), .jtag_id_reg = DA8XX_SYSCFG0_BASE + DA8XX_JTAG_ID_REG, .ids = da850_ids, .ids_num = ARRAY_SIZE(da850_ids), .pinmux_base = DA8XX_SYSCFG0_BASE + 0x120, .pinmux_pins = da850_pins, .pinmux_pins_num = ARRAY_SIZE(da850_pins), .sram_dma = DA8XX_SHARED_RAM_BASE, .sram_len = SZ_128K, }; void __init da850_init(void) { davinci_common_init(&davinci_soc_info_da850); da8xx_syscfg0_base = ioremap(DA8XX_SYSCFG0_BASE, SZ_4K); if (WARN(!da8xx_syscfg0_base, "Unable to map syscfg0 module")) return; da8xx_syscfg1_base = ioremap(DA8XX_SYSCFG1_BASE, SZ_4K); WARN(!da8xx_syscfg1_base, "Unable to map syscfg1 module"); }
linux-master
arch/arm/mach-davinci/da850.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * DA8XX/OMAP L1XX platform device data * * Copyright (c) 2007-2009, MontaVista Software, Inc. <[email protected]> * Derived from code that was: * Copyright (C) 2006 Komal Shah <[email protected]> */ #include <linux/ahci_platform.h> #include <linux/clk-provider.h> #include <linux/clk.h> #include <linux/clkdev.h> #include <linux/dma-map-ops.h> #include <linux/dmaengine.h> #include <linux/init.h> #include <linux/io.h> #include <linux/platform_device.h> #include <linux/reboot.h> #include <linux/serial_8250.h> #include "common.h" #include "cputype.h" #include "da8xx.h" #include "cpuidle.h" #include "irqs.h" #include "sram.h" #define DA8XX_TPCC_BASE 0x01c00000 #define DA8XX_TPTC0_BASE 0x01c08000 #define DA8XX_TPTC1_BASE 0x01c08400 #define DA8XX_WDOG_BASE 0x01c21000 /* DA8XX_TIMER64P1_BASE */ #define DA8XX_I2C0_BASE 0x01c22000 #define DA8XX_RTC_BASE 0x01c23000 #define DA8XX_PRUSS_MEM_BASE 0x01c30000 #define DA8XX_MMCSD0_BASE 0x01c40000 #define DA8XX_SPI0_BASE 0x01c41000 #define DA830_SPI1_BASE 0x01e12000 #define DA8XX_LCD_CNTRL_BASE 0x01e13000 #define DA850_SATA_BASE 0x01e18000 #define DA850_MMCSD1_BASE 0x01e1b000 #define DA8XX_EMAC_CPPI_PORT_BASE 0x01e20000 #define DA8XX_EMAC_CPGMACSS_BASE 0x01e22000 #define DA8XX_EMAC_CPGMAC_BASE 0x01e23000 #define DA8XX_EMAC_MDIO_BASE 0x01e24000 #define DA8XX_I2C1_BASE 0x01e28000 #define DA850_TPCC1_BASE 0x01e30000 #define DA850_TPTC2_BASE 0x01e38000 #define DA850_SPI1_BASE 0x01f0e000 #define DA8XX_DDR2_CTL_BASE 0xb0000000 #define DA8XX_EMAC_CTRL_REG_OFFSET 0x3000 #define DA8XX_EMAC_MOD_REG_OFFSET 0x2000 #define DA8XX_EMAC_RAM_OFFSET 0x0000 #define DA8XX_EMAC_CTRL_RAM_SIZE SZ_8K void __iomem *da8xx_syscfg0_base; void __iomem *da8xx_syscfg1_base; static void __iomem *da8xx_ddr2_ctlr_base; void __iomem * __init da8xx_get_mem_ctlr(void) { if (da8xx_ddr2_ctlr_base) return da8xx_ddr2_ctlr_base; da8xx_ddr2_ctlr_base = ioremap(DA8XX_DDR2_CTL_BASE, SZ_32K); if (!da8xx_ddr2_ctlr_base) pr_warn("%s: Unable to map DDR2 controller", __func__); return da8xx_ddr2_ctlr_base; }
linux-master
arch/arm/mach-davinci/devices-da8xx.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Actions Semi Leopard * * This file is based on arm realview smp platform. * * Copyright 2012 Actions Semi Inc. * Author: Actions Semi, Inc. * * Copyright (c) 2017 Andreas Färber */ #include <linux/delay.h> #include <linux/io.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/smp.h> #include <linux/soc/actions/owl-sps.h> #include <asm/cacheflush.h> #include <asm/smp_plat.h> #include <asm/smp_scu.h> #include <trace/events/ipi.h> #define OWL_CPU1_ADDR 0x50 #define OWL_CPU1_FLAG 0x5c #define OWL_CPUx_FLAG_BOOT 0x55aa #define OWL_SPS_PG_CTL_PWR_CPU2 BIT(5) #define OWL_SPS_PG_CTL_PWR_CPU3 BIT(6) #define OWL_SPS_PG_CTL_ACK_CPU2 BIT(21) #define OWL_SPS_PG_CTL_ACK_CPU3 BIT(22) static void __iomem *scu_base_addr; static void __iomem *sps_base_addr; static void __iomem *timer_base_addr; static int ncores; static int s500_wakeup_secondary(unsigned int cpu) { int ret; if (cpu > 3) return -EINVAL; /* The generic PM domain driver is not available this early. */ switch (cpu) { case 2: ret = owl_sps_set_pg(sps_base_addr, OWL_SPS_PG_CTL_PWR_CPU2, OWL_SPS_PG_CTL_ACK_CPU2, true); if (ret) return ret; break; case 3: ret = owl_sps_set_pg(sps_base_addr, OWL_SPS_PG_CTL_PWR_CPU3, OWL_SPS_PG_CTL_ACK_CPU3, true); if (ret) return ret; break; } /* wait for CPUx to run to WFE instruction */ udelay(200); writel(__pa_symbol(secondary_startup), timer_base_addr + OWL_CPU1_ADDR + (cpu - 1) * 4); writel(OWL_CPUx_FLAG_BOOT, timer_base_addr + OWL_CPU1_FLAG + (cpu - 1) * 4); dsb_sev(); mb(); return 0; } static int s500_smp_boot_secondary(unsigned int cpu, struct task_struct *idle) { int ret; ret = s500_wakeup_secondary(cpu); if (ret) return ret; udelay(10); smp_send_reschedule(cpu); writel(0, timer_base_addr + OWL_CPU1_ADDR + (cpu - 1) * 4); writel(0, timer_base_addr + OWL_CPU1_FLAG + (cpu - 1) * 4); return 0; } static void __init s500_smp_prepare_cpus(unsigned int max_cpus) { struct device_node *node; node = of_find_compatible_node(NULL, NULL, "actions,s500-timer"); if (!node) { pr_err("%s: missing timer\n", __func__); return; } timer_base_addr = of_iomap(node, 0); if (!timer_base_addr) { pr_err("%s: could not map timer registers\n", __func__); return; } node = of_find_compatible_node(NULL, NULL, "actions,s500-sps"); if (!node) { pr_err("%s: missing sps\n", __func__); return; } sps_base_addr = of_iomap(node, 0); if (!sps_base_addr) { pr_err("%s: could not map sps registers\n", __func__); return; } if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9) { node = of_find_compatible_node(NULL, NULL, "arm,cortex-a9-scu"); if (!node) { pr_err("%s: missing scu\n", __func__); return; } scu_base_addr = of_iomap(node, 0); if (!scu_base_addr) { pr_err("%s: could not map scu registers\n", __func__); return; } /* * While the number of cpus is gathered from dt, also get the * number of cores from the scu to verify this value when * booting the cores. */ ncores = scu_get_core_count(scu_base_addr); pr_debug("%s: ncores %d\n", __func__, ncores); scu_enable(scu_base_addr); } } static const struct smp_operations s500_smp_ops __initconst = { .smp_prepare_cpus = s500_smp_prepare_cpus, .smp_boot_secondary = s500_smp_boot_secondary, }; CPU_METHOD_OF_DECLARE(s500_smp, "actions,s500-smp", &s500_smp_ops);
linux-master
arch/arm/mach-actions/platsmp.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Device Tree support for Mediatek SoCs * * Copyright (c) 2014 MundoReader S.L. * Author: Matthias Brugger <[email protected]> */ #include <linux/init.h> #include <linux/io.h> #include <asm/mach/arch.h> #include <linux/of.h> #include <linux/of_clk.h> #include <linux/clocksource.h> #define GPT6_CON_MT65xx 0x10008060 #define GPT_ENABLE 0x31 static void __init mediatek_timer_init(void) { void __iomem *gpt_base; if (of_machine_is_compatible("mediatek,mt6589") || of_machine_is_compatible("mediatek,mt7623") || of_machine_is_compatible("mediatek,mt8135") || of_machine_is_compatible("mediatek,mt8127")) { /* turn on GPT6 which ungates arch timer clocks */ gpt_base = ioremap(GPT6_CON_MT65xx, 0x04); /* enable clock and set to free-run */ writel(GPT_ENABLE, gpt_base); iounmap(gpt_base); } of_clk_init(NULL); timer_probe(); }; static const char * const mediatek_board_dt_compat[] = { "mediatek,mt2701", "mediatek,mt6589", "mediatek,mt6592", "mediatek,mt7623", "mediatek,mt7629", "mediatek,mt8127", "mediatek,mt8135", NULL, }; DT_MACHINE_START(MEDIATEK_DT, "Mediatek Cortex-A7 (Device Tree)") .dt_compat = mediatek_board_dt_compat, .init_time = mediatek_timer_init, MACHINE_END
linux-master
arch/arm/mach-mediatek/mediatek.c
// SPDX-License-Identifier: GPL-2.0-only /* * arch/arm/mach-mediatek/platsmp.c * * Copyright (c) 2014 Mediatek Inc. * Author: Shunli Wang <[email protected]> * Yingjoe Chen <[email protected]> */ #include <linux/io.h> #include <linux/memblock.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/string.h> #include <linux/threads.h> #define MTK_MAX_CPU 8 #define MTK_SMP_REG_SIZE 0x1000 struct mtk_smp_boot_info { unsigned long smp_base; unsigned int jump_reg; unsigned int core_keys[MTK_MAX_CPU - 1]; unsigned int core_regs[MTK_MAX_CPU - 1]; }; static const struct mtk_smp_boot_info mtk_mt8135_tz_boot = { 0x80002000, 0x3fc, { 0x534c4131, 0x4c415332, 0x41534c33 }, { 0x3f8, 0x3f8, 0x3f8 }, }; static const struct mtk_smp_boot_info mtk_mt6589_boot = { 0x10002000, 0x34, { 0x534c4131, 0x4c415332, 0x41534c33 }, { 0x38, 0x3c, 0x40 }, }; static const struct mtk_smp_boot_info mtk_mt7623_boot = { 0x10202000, 0x34, { 0x534c4131, 0x4c415332, 0x41534c33 }, { 0x38, 0x3c, 0x40 }, }; static const struct of_device_id mtk_tz_smp_boot_infos[] __initconst = { { .compatible = "mediatek,mt8135", .data = &mtk_mt8135_tz_boot }, { .compatible = "mediatek,mt8127", .data = &mtk_mt8135_tz_boot }, { .compatible = "mediatek,mt2701", .data = &mtk_mt8135_tz_boot }, {}, }; static const struct of_device_id mtk_smp_boot_infos[] __initconst = { { .compatible = "mediatek,mt6589", .data = &mtk_mt6589_boot }, { .compatible = "mediatek,mt7623", .data = &mtk_mt7623_boot }, { .compatible = "mediatek,mt7629", .data = &mtk_mt7623_boot }, {}, }; static void __iomem *mtk_smp_base; static const struct mtk_smp_boot_info *mtk_smp_info; static int mtk_boot_secondary(unsigned int cpu, struct task_struct *idle) { if (!mtk_smp_base) return -EINVAL; if (!mtk_smp_info->core_keys[cpu-1]) return -EINVAL; writel_relaxed(mtk_smp_info->core_keys[cpu-1], mtk_smp_base + mtk_smp_info->core_regs[cpu-1]); arch_send_wakeup_ipi_mask(cpumask_of(cpu)); return 0; } static void __init __mtk_smp_prepare_cpus(unsigned int max_cpus, int trustzone) { int i, num; const struct of_device_id *infos; if (trustzone) { num = ARRAY_SIZE(mtk_tz_smp_boot_infos); infos = mtk_tz_smp_boot_infos; } else { num = ARRAY_SIZE(mtk_smp_boot_infos); infos = mtk_smp_boot_infos; } /* Find smp boot info for this SoC */ for (i = 0; i < num; i++) { if (of_machine_is_compatible(infos[i].compatible)) { mtk_smp_info = infos[i].data; break; } } if (!mtk_smp_info) { pr_err("%s: Device is not supported\n", __func__); return; } if (trustzone) { /* smp_base(trustzone-bootinfo) is reserved by device tree */ mtk_smp_base = phys_to_virt(mtk_smp_info->smp_base); } else { mtk_smp_base = ioremap(mtk_smp_info->smp_base, MTK_SMP_REG_SIZE); if (!mtk_smp_base) { pr_err("%s: Can't remap %lx\n", __func__, mtk_smp_info->smp_base); return; } } /* * write the address of slave startup address into the system-wide * jump register */ writel_relaxed(__pa_symbol(secondary_startup_arm), mtk_smp_base + mtk_smp_info->jump_reg); } static void __init mtk_tz_smp_prepare_cpus(unsigned int max_cpus) { __mtk_smp_prepare_cpus(max_cpus, 1); } static void __init mtk_smp_prepare_cpus(unsigned int max_cpus) { __mtk_smp_prepare_cpus(max_cpus, 0); } static const struct smp_operations mt81xx_tz_smp_ops __initconst = { .smp_prepare_cpus = mtk_tz_smp_prepare_cpus, .smp_boot_secondary = mtk_boot_secondary, }; CPU_METHOD_OF_DECLARE(mt81xx_tz_smp, "mediatek,mt81xx-tz-smp", &mt81xx_tz_smp_ops); static const struct smp_operations mt6589_smp_ops __initconst = { .smp_prepare_cpus = mtk_smp_prepare_cpus, .smp_boot_secondary = mtk_boot_secondary, }; CPU_METHOD_OF_DECLARE(mt6589_smp, "mediatek,mt6589-smp", &mt6589_smp_ops);
linux-master
arch/arm/mach-mediatek/platsmp.c
// SPDX-License-Identifier: GPL-2.0-only /* * Keystone2 based boards and SOC related code. * * Copyright 2013 Texas Instruments, Inc. * Cyril Chemparathy <[email protected]> * Santosh Shilimkar <[email protected]> */ #include <linux/io.h> #include <linux/dma-map-ops.h> #include <linux/init.h> #include <linux/pm_runtime.h> #include <linux/pm_clock.h> #include <linux/memblock.h> #include <linux/of.h> #include <linux/platform_device.h> #include <asm/setup.h> #include <asm/mach/map.h> #include <asm/mach/arch.h> #include <asm/mach/time.h> #include <asm/page.h> #define KEYSTONE_LOW_PHYS_START 0x80000000ULL #define KEYSTONE_LOW_PHYS_SIZE 0x80000000ULL /* 2G */ #define KEYSTONE_LOW_PHYS_END (KEYSTONE_LOW_PHYS_START + \ KEYSTONE_LOW_PHYS_SIZE - 1) #define KEYSTONE_HIGH_PHYS_START 0x800000000ULL #define KEYSTONE_HIGH_PHYS_SIZE 0x400000000ULL /* 16G */ #define KEYSTONE_HIGH_PHYS_END (KEYSTONE_HIGH_PHYS_START + \ KEYSTONE_HIGH_PHYS_SIZE - 1) static struct dev_pm_domain keystone_pm_domain = { .ops = { USE_PM_CLK_RUNTIME_OPS USE_PLATFORM_PM_SLEEP_OPS }, }; static struct pm_clk_notifier_block platform_domain_notifier = { .pm_domain = &keystone_pm_domain, .con_ids = { NULL }, }; static const struct of_device_id of_keystone_table[] = { {.compatible = "ti,k2hk"}, {.compatible = "ti,k2e"}, {.compatible = "ti,k2l"}, { /* end of list */ }, }; static int __init keystone_pm_runtime_init(void) { struct device_node *np; np = of_find_matching_node(NULL, of_keystone_table); if (!np) return 0; pm_clk_add_notifier(&platform_bus_type, &platform_domain_notifier); return 0; } #ifdef CONFIG_ARM_LPAE static int keystone_platform_notifier(struct notifier_block *nb, unsigned long event, void *data) { struct device *dev = data; if (event != BUS_NOTIFY_ADD_DEVICE) return NOTIFY_DONE; if (!dev) return NOTIFY_BAD; if (!dev->of_node) { int ret = dma_direct_set_offset(dev, KEYSTONE_HIGH_PHYS_START, KEYSTONE_LOW_PHYS_START, KEYSTONE_HIGH_PHYS_SIZE); dev_err(dev, "set dma_offset%08llx%s\n", KEYSTONE_HIGH_PHYS_START - KEYSTONE_LOW_PHYS_START, ret ? " failed" : ""); } return NOTIFY_OK; } static struct notifier_block platform_nb = { .notifier_call = keystone_platform_notifier, }; #endif /* CONFIG_ARM_LPAE */ static void __init keystone_init(void) { #ifdef CONFIG_ARM_LPAE if (PHYS_OFFSET >= KEYSTONE_HIGH_PHYS_START) bus_register_notifier(&platform_bus_type, &platform_nb); #endif keystone_pm_runtime_init(); } static long long __init keystone_pv_fixup(void) { long long offset; u64 mem_start, mem_end; mem_start = memblock_start_of_DRAM(); mem_end = memblock_end_of_DRAM(); /* nothing to do if we are running out of the <32-bit space */ if (mem_start >= KEYSTONE_LOW_PHYS_START && mem_end <= KEYSTONE_LOW_PHYS_END) return 0; if (mem_start < KEYSTONE_HIGH_PHYS_START || mem_end > KEYSTONE_HIGH_PHYS_END) { pr_crit("Invalid address space for memory (%08llx-%08llx)\n", mem_start, mem_end); return 0; } offset = KEYSTONE_HIGH_PHYS_START - KEYSTONE_LOW_PHYS_START; /* Populate the arch idmap hook */ arch_phys_to_idmap_offset = -offset; return offset; } static const char *const keystone_match[] __initconst = { "ti,k2hk", "ti,k2e", "ti,k2l", "ti,k2g", "ti,keystone", NULL, }; DT_MACHINE_START(KEYSTONE, "Keystone") #if defined(CONFIG_ZONE_DMA) && defined(CONFIG_ARM_LPAE) .dma_zone_size = SZ_2G, #endif .init_machine = keystone_init, .dt_compat = keystone_match, .pv_fixup = keystone_pv_fixup, MACHINE_END
linux-master
arch/arm/mach-keystone/keystone.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Support for the LSI Axxia SoC devices based on ARM cores. * * Copyright (C) 2012 LSI */ #include <linux/init.h> #include <asm/mach/arch.h> static const char *const axxia_dt_match[] __initconst = { "lsi,axm5516", "lsi,axm5516-sim", "lsi,axm5516-emu", NULL }; DT_MACHINE_START(AXXIA_DT, "LSI Axxia AXM55XX") .dt_compat = axxia_dt_match, MACHINE_END
linux-master
arch/arm/mach-axxia/axxia.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/arch/arm/mach-axxia/platsmp.c * * Copyright (C) 2012 LSI Corporation */ #include <linux/init.h> #include <linux/io.h> #include <linux/smp.h> #include <linux/of.h> #include <linux/of_address.h> #include <asm/cacheflush.h> /* Syscon register offsets for releasing cores from reset */ #define SC_CRIT_WRITE_KEY 0x1000 #define SC_RST_CPU_HOLD 0x1010 /* * Write the kernel entry point for secondary CPUs to the specified address */ static void write_release_addr(u32 release_phys) { u32 *virt = (u32 *) phys_to_virt(release_phys); writel_relaxed(__pa_symbol(secondary_startup), virt); /* Make sure this store is visible to other CPUs */ smp_wmb(); __cpuc_flush_dcache_area(virt, sizeof(u32)); } static int axxia_boot_secondary(unsigned int cpu, struct task_struct *idle) { struct device_node *syscon_np; void __iomem *syscon; u32 tmp; syscon_np = of_find_compatible_node(NULL, NULL, "lsi,axxia-syscon"); if (!syscon_np) return -ENOENT; syscon = of_iomap(syscon_np, 0); of_node_put(syscon_np); if (!syscon) return -ENOMEM; tmp = readl(syscon + SC_RST_CPU_HOLD); writel(0xab, syscon + SC_CRIT_WRITE_KEY); tmp &= ~(1 << cpu); writel(tmp, syscon + SC_RST_CPU_HOLD); return 0; } static void __init axxia_smp_prepare_cpus(unsigned int max_cpus) { int cpu_count = 0; int cpu; /* * Initialise the present map, which describes the set of CPUs actually * populated at the present time. */ for_each_possible_cpu(cpu) { struct device_node *np; u32 release_phys; np = of_get_cpu_node(cpu, NULL); if (!np) continue; if (of_property_read_u32(np, "cpu-release-addr", &release_phys)) continue; if (cpu_count < max_cpus) { set_cpu_present(cpu, true); cpu_count++; } if (release_phys != 0) write_release_addr(release_phys); } } static const struct smp_operations axxia_smp_ops __initconst = { .smp_prepare_cpus = axxia_smp_prepare_cpus, .smp_boot_secondary = axxia_boot_secondary, }; CPU_METHOD_OF_DECLARE(axxia_smp, "lsi,syscon-release", &axxia_smp_ops);
linux-master
arch/arm/mach-axxia/platsmp.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) ST-Ericsson SA 2010-2013 * Author: Rickard Andersson <[email protected]> for * ST-Ericsson. * Author: Daniel Lezcano <[email protected]> for Linaro. * Author: Ulf Hansson <[email protected]> for Linaro. */ #include <linux/kernel.h> #include <linux/irqchip/arm-gic.h> #include <linux/delay.h> #include <linux/io.h> #include <linux/suspend.h> #include <linux/platform_data/arm-ux500-pm.h> #include <linux/of.h> #include <linux/of_address.h> /* ARM WFI Standby signal register */ #define PRCM_ARM_WFI_STANDBY (prcmu_base + 0x130) #define PRCM_ARM_WFI_STANDBY_WFI0 0x08 #define PRCM_ARM_WFI_STANDBY_WFI1 0x10 #define PRCM_IOCR (prcmu_base + 0x310) #define PRCM_IOCR_IOFORCE 0x1 /* Dual A9 core interrupt management unit registers */ #define PRCM_A9_MASK_REQ (prcmu_base + 0x328) #define PRCM_A9_MASK_REQ_PRCM_A9_MASK_REQ 0x1 #define PRCM_A9_MASK_ACK (prcmu_base + 0x32c) #define PRCM_ARMITMSK31TO0 (prcmu_base + 0x11c) #define PRCM_ARMITMSK63TO32 (prcmu_base + 0x120) #define PRCM_ARMITMSK95TO64 (prcmu_base + 0x124) #define PRCM_ARMITMSK127TO96 (prcmu_base + 0x128) #define PRCM_POWER_STATE_VAL (prcmu_base + 0x25C) #define PRCM_ARMITVAL31TO0 (prcmu_base + 0x260) #define PRCM_ARMITVAL63TO32 (prcmu_base + 0x264) #define PRCM_ARMITVAL95TO64 (prcmu_base + 0x268) #define PRCM_ARMITVAL127TO96 (prcmu_base + 0x26C) static void __iomem *prcmu_base; static void __iomem *dist_base; /* This function decouple the gic from the prcmu */ int prcmu_gic_decouple(void) { u32 val = readl(PRCM_A9_MASK_REQ); /* Set bit 0 register value to 1 */ writel(val | PRCM_A9_MASK_REQ_PRCM_A9_MASK_REQ, PRCM_A9_MASK_REQ); /* Make sure the register is updated */ readl(PRCM_A9_MASK_REQ); /* Wait a few cycles for the gic mask completion */ udelay(1); return 0; } /* This function recouple the gic with the prcmu */ int prcmu_gic_recouple(void) { u32 val = readl(PRCM_A9_MASK_REQ); /* Set bit 0 register value to 0 */ writel(val & ~PRCM_A9_MASK_REQ_PRCM_A9_MASK_REQ, PRCM_A9_MASK_REQ); return 0; } #define PRCMU_GIC_NUMBER_REGS 5 /* * This function checks if there are pending irq on the gic. It only * makes sense if the gic has been decoupled before with the * db8500_prcmu_gic_decouple function. Disabling an interrupt only * disables the forwarding of the interrupt to any CPU interface. It * does not prevent the interrupt from changing state, for example * becoming pending, or active and pending if it is already * active. Hence, we have to check the interrupt is pending *and* is * active. */ bool prcmu_gic_pending_irq(void) { u32 pr; /* Pending register */ u32 er; /* Enable register */ int i; /* 5 registers. STI & PPI not skipped */ for (i = 0; i < PRCMU_GIC_NUMBER_REGS; i++) { pr = readl_relaxed(dist_base + GIC_DIST_PENDING_SET + i * 4); er = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4); if (pr & er) return true; /* There is a pending interrupt */ } return false; } /* * This function checks if there are pending interrupt on the * prcmu which has been delegated to monitor the irqs with the * db8500_prcmu_copy_gic_settings function. */ bool prcmu_pending_irq(void) { u32 it, im; int i; for (i = 0; i < PRCMU_GIC_NUMBER_REGS - 1; i++) { it = readl(PRCM_ARMITVAL31TO0 + i * 4); im = readl(PRCM_ARMITMSK31TO0 + i * 4); if (it & im) return true; /* There is a pending interrupt */ } return false; } /* * This function checks if the specified cpu is in WFI. It's usage * makes sense only if the gic is decoupled with the db8500_prcmu_gic_decouple * function. Of course passing smp_processor_id() to this function will * always return false... */ bool prcmu_is_cpu_in_wfi(int cpu) { return readl(PRCM_ARM_WFI_STANDBY) & (cpu ? PRCM_ARM_WFI_STANDBY_WFI1 : PRCM_ARM_WFI_STANDBY_WFI0); } /* * This function copies the gic SPI settings to the prcmu in order to * monitor them and abort/finish the retention/off sequence or state. */ int prcmu_copy_gic_settings(void) { u32 er; /* Enable register */ int i; /* We skip the STI and PPI */ for (i = 0; i < PRCMU_GIC_NUMBER_REGS - 1; i++) { er = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + (i + 1) * 4); writel(er, PRCM_ARMITMSK31TO0 + i * 4); } return 0; } #ifdef CONFIG_SUSPEND static int ux500_suspend_enter(suspend_state_t state) { cpu_do_idle(); return 0; } static int ux500_suspend_valid(suspend_state_t state) { return state == PM_SUSPEND_MEM || state == PM_SUSPEND_STANDBY; } static const struct platform_suspend_ops ux500_suspend_ops = { .enter = ux500_suspend_enter, .valid = ux500_suspend_valid, }; #define UX500_SUSPEND_OPS (&ux500_suspend_ops) #else #define UX500_SUSPEND_OPS NULL #endif void __init ux500_pm_init(u32 phy_base, u32 size) { struct device_node *np; prcmu_base = ioremap(phy_base, size); if (!prcmu_base) { pr_err("could not remap PRCMU for PM functions\n"); return; } np = of_find_compatible_node(NULL, NULL, "arm,cortex-a9-gic"); dist_base = of_iomap(np, 0); of_node_put(np); if (!dist_base) { pr_err("could not remap GIC dist base for PM functions\n"); return; } /* * On watchdog reboot the GIC is in some cases decoupled. * This will make sure that the GIC is correctly configured. */ prcmu_gic_recouple(); /* Set up ux500 suspend callbacks. */ suspend_set_ops(UX500_SUSPEND_OPS); }
linux-master
arch/arm/mach-ux500/pm.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2008-2009 ST-Ericsson SA * * Author: Srinidhi KASAGAR <[email protected]> */ #include <linux/types.h> #include <linux/init.h> #include <linux/device.h> #include <linux/amba/bus.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/irqchip.h> #include <linux/irqchip/arm-gic.h> #include <linux/mfd/dbx500-prcmu.h> #include <linux/platform_data/arm-ux500-pm.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_platform.h> #include <linux/regulator/machine.h> #include <asm/outercache.h> #include <asm/hardware/cache-l2x0.h> #include <asm/mach/map.h> #include <asm/mach/arch.h> static int __init ux500_l2x0_unlock(void) { int i; struct device_node *np; void __iomem *l2x0_base; np = of_find_compatible_node(NULL, NULL, "arm,pl310-cache"); l2x0_base = of_iomap(np, 0); of_node_put(np); if (!l2x0_base) return -ENODEV; /* * Unlock Data and Instruction Lock if locked. Ux500 U-Boot versions * apparently locks both caches before jumping to the kernel. The * l2x0 core will not touch the unlock registers if the l2x0 is * already enabled, so we do it right here instead. The PL310 has * 8 sets of registers, one per possible CPU. */ for (i = 0; i < 8; i++) { writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_D_BASE + i * L2X0_LOCKDOWN_STRIDE); writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_I_BASE + i * L2X0_LOCKDOWN_STRIDE); } iounmap(l2x0_base); return 0; } static void ux500_l2c310_write_sec(unsigned long val, unsigned reg) { /* * We can't write to secure registers as we are in non-secure * mode, until we have some SMI service available. */ } /* * FIXME: Should we set up the GPIO domain here? * * The problem is that we cannot put the interrupt resources into the platform * device until the irqdomain has been added. Right now, we set the GIC interrupt * domain from init_irq(), then load the gpio driver from * core_initcall(nmk_gpio_init) and add the platform devices from * arch_initcall(customize_machine). * * This feels fragile because it depends on the gpio device getting probed * _before_ any device uses the gpio interrupts. */ static void __init ux500_init_irq(void) { struct device_node *np; struct resource r; irqchip_init(); prcmu_early_init(); np = of_find_compatible_node(NULL, NULL, "stericsson,db8500-prcmu"); of_address_to_resource(np, 0, &r); of_node_put(np); if (!r.start) { pr_err("could not find PRCMU base resource\n"); return; } ux500_pm_init(r.start, r.end-r.start); /* Unlock before init */ ux500_l2x0_unlock(); outer_cache.write_sec = ux500_l2c310_write_sec; } static void ux500_restart(enum reboot_mode mode, const char *cmd) { local_irq_disable(); local_fiq_disable(); prcmu_system_reset(0); } static const struct of_device_id u8500_local_bus_nodes[] = { /* only create devices below soc node */ { .compatible = "stericsson,db8500", }, { .compatible = "simple-bus"}, { }, }; static void __init u8500_init_machine(void) { of_platform_populate(NULL, u8500_local_bus_nodes, NULL, NULL); } static const char * stericsson_dt_platform_compat[] = { "st-ericsson,u8500", "st-ericsson,u9500", NULL, }; DT_MACHINE_START(U8500_DT, "ST-Ericsson Ux5x0 platform (Device Tree Support)") .l2c_aux_val = 0, .l2c_aux_mask = ~0, .init_irq = ux500_init_irq, .init_machine = u8500_init_machine, .dt_compat = stericsson_dt_platform_compat, .restart = ux500_restart, MACHINE_END
linux-master
arch/arm/mach-ux500/cpu-db8500.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2002 ARM Ltd. * Copyright (C) 2008 STMicroelctronics. * Copyright (C) 2009 ST-Ericsson. * Author: Srinidhi Kasagar <[email protected]> * * This file is based on arm realview platform */ #include <linux/init.h> #include <linux/errno.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/smp.h> #include <linux/io.h> #include <linux/of.h> #include <linux/of_address.h> #include <asm/cacheflush.h> #include <asm/smp_plat.h> #include <asm/smp_scu.h> /* Magic triggers in backup RAM */ #define UX500_CPU1_JUMPADDR_OFFSET 0x1FF4 #define UX500_CPU1_WAKEMAGIC_OFFSET 0x1FF0 static void __iomem *backupram; static void __init ux500_smp_prepare_cpus(unsigned int max_cpus) { struct device_node *np; static void __iomem *scu_base; unsigned int ncores; int i; np = of_find_compatible_node(NULL, NULL, "ste,dbx500-backupram"); if (!np) { pr_err("No backupram base address\n"); return; } backupram = of_iomap(np, 0); of_node_put(np); if (!backupram) { pr_err("No backupram remap\n"); return; } np = of_find_compatible_node(NULL, NULL, "arm,cortex-a9-scu"); if (!np) { pr_err("No SCU base address\n"); return; } scu_base = of_iomap(np, 0); of_node_put(np); if (!scu_base) { pr_err("No SCU remap\n"); return; } scu_enable(scu_base); ncores = scu_get_core_count(scu_base); for (i = 0; i < ncores; i++) set_cpu_possible(i, true); iounmap(scu_base); } static int ux500_boot_secondary(unsigned int cpu, struct task_struct *idle) { /* * write the address of secondary startup into the backup ram register * at offset 0x1FF4, then write the magic number 0xA1FEED01 to the * backup ram register at offset 0x1FF0, which is what boot rom code * is waiting for. This will wake up the secondary core from WFE. */ writel(__pa_symbol(secondary_startup), backupram + UX500_CPU1_JUMPADDR_OFFSET); writel(0xA1FEED01, backupram + UX500_CPU1_WAKEMAGIC_OFFSET); /* make sure write buffer is drained */ mb(); arch_send_wakeup_ipi_mask(cpumask_of(cpu)); return 0; } #ifdef CONFIG_HOTPLUG_CPU static void ux500_cpu_die(unsigned int cpu) { wfi(); } #endif static const struct smp_operations ux500_smp_ops __initconst = { .smp_prepare_cpus = ux500_smp_prepare_cpus, .smp_boot_secondary = ux500_boot_secondary, #ifdef CONFIG_HOTPLUG_CPU .cpu_die = ux500_cpu_die, #endif }; CPU_METHOD_OF_DECLARE(ux500_smp, "ste,dbx500-smp", &ux500_smp_ops);
linux-master
arch/arm/mach-ux500/platsmp.c
// SPDX-License-Identifier: GPL-2.0 // // Copyright (C) 2012 Samsung Electronics. // Kyungmin Park <[email protected]> // Tomasz Figa <[email protected]> #include <linux/kernel.h> #include <linux/io.h> #include <linux/init.h> #include <linux/of.h> #include <linux/of_address.h> #include <asm/cacheflush.h> #include <asm/cputype.h> #include <asm/firmware.h> #include <asm/hardware/cache-l2x0.h> #include <asm/suspend.h> #include "common.h" #include "smc.h" #define EXYNOS_BOOT_ADDR 0x8 #define EXYNOS_BOOT_FLAG 0xc static void exynos_save_cp15(void) { /* Save Power control and Diagnostic registers */ asm ("mrc p15, 0, %0, c15, c0, 0\n" "mrc p15, 0, %1, c15, c0, 1\n" : "=r" (cp15_save_power), "=r" (cp15_save_diag) : : "cc"); } static int exynos_do_idle(unsigned long mode) { switch (mode) { case FW_DO_IDLE_AFTR: if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9) exynos_save_cp15(); writel_relaxed(__pa_symbol(exynos_cpu_resume_ns), sysram_ns_base_addr + 0x24); writel_relaxed(EXYNOS_AFTR_MAGIC, sysram_ns_base_addr + 0x20); if (soc_is_exynos3250()) { flush_cache_all(); exynos_smc(SMC_CMD_SAVE, OP_TYPE_CORE, SMC_POWERSTATE_IDLE, 0); exynos_smc(SMC_CMD_SHUTDOWN, OP_TYPE_CLUSTER, SMC_POWERSTATE_IDLE, 0); } else exynos_smc(SMC_CMD_CPU0AFTR, 0, 0, 0); break; case FW_DO_IDLE_SLEEP: exynos_smc(SMC_CMD_SLEEP, 0, 0, 0); } return 0; } static int exynos_cpu_boot(int cpu) { /* * Exynos3250 doesn't need to send smc command for secondary CPU boot * because Exynos3250 removes WFE in secure mode. * * On Exynos5 devices the call is ignored by trustzone firmware. */ if (!soc_is_exynos4210() && !soc_is_exynos4212() && !soc_is_exynos4412()) return 0; /* * The second parameter of SMC_CMD_CPU1BOOT command means CPU id. * But, Exynos4212 has only one secondary CPU so second parameter * isn't used for informing secure firmware about CPU id. */ if (soc_is_exynos4212()) cpu = 0; exynos_smc(SMC_CMD_CPU1BOOT, cpu, 0, 0); return 0; } static int exynos_set_cpu_boot_addr(int cpu, unsigned long boot_addr) { void __iomem *boot_reg; if (!sysram_ns_base_addr) return -ENODEV; boot_reg = sysram_ns_base_addr + 0x1c; /* * Almost all Exynos-series of SoCs that run in secure mode don't need * additional offset for every CPU, with Exynos4412 being the only * exception. */ if (soc_is_exynos4412()) boot_reg += 4 * cpu; writel_relaxed(boot_addr, boot_reg); return 0; } static int exynos_get_cpu_boot_addr(int cpu, unsigned long *boot_addr) { void __iomem *boot_reg; if (!sysram_ns_base_addr) return -ENODEV; boot_reg = sysram_ns_base_addr + 0x1c; if (soc_is_exynos4412()) boot_reg += 4 * cpu; *boot_addr = readl_relaxed(boot_reg); return 0; } static int exynos_cpu_suspend(unsigned long arg) { flush_cache_all(); outer_flush_all(); exynos_smc(SMC_CMD_SLEEP, 0, 0, 0); pr_info("Failed to suspend the system\n"); writel(0, sysram_ns_base_addr + EXYNOS_BOOT_FLAG); return 1; } static int exynos_suspend(void) { if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9) exynos_save_cp15(); writel(EXYNOS_SLEEP_MAGIC, sysram_ns_base_addr + EXYNOS_BOOT_FLAG); writel(__pa_symbol(exynos_cpu_resume_ns), sysram_ns_base_addr + EXYNOS_BOOT_ADDR); return cpu_suspend(0, exynos_cpu_suspend); } static int exynos_resume(void) { writel(0, sysram_ns_base_addr + EXYNOS_BOOT_FLAG); return 0; } static const struct firmware_ops exynos_firmware_ops = { .do_idle = IS_ENABLED(CONFIG_EXYNOS_CPU_SUSPEND) ? exynos_do_idle : NULL, .set_cpu_boot_addr = exynos_set_cpu_boot_addr, .get_cpu_boot_addr = exynos_get_cpu_boot_addr, .cpu_boot = exynos_cpu_boot, .suspend = IS_ENABLED(CONFIG_PM_SLEEP) ? exynos_suspend : NULL, .resume = IS_ENABLED(CONFIG_EXYNOS_CPU_SUSPEND) ? exynos_resume : NULL, }; static void exynos_l2_write_sec(unsigned long val, unsigned reg) { static int l2cache_enabled; switch (reg) { case L2X0_CTRL: if (val & L2X0_CTRL_EN) { /* * Before the cache can be enabled, due to firmware * design, SMC_CMD_L2X0INVALL must be called. */ if (!l2cache_enabled) { exynos_smc(SMC_CMD_L2X0INVALL, 0, 0, 0); l2cache_enabled = 1; } } else { l2cache_enabled = 0; } exynos_smc(SMC_CMD_L2X0CTRL, val, 0, 0); break; case L2X0_DEBUG_CTRL: exynos_smc(SMC_CMD_L2X0DEBUG, val, 0, 0); break; default: WARN_ONCE(1, "%s: ignoring write to reg 0x%x\n", __func__, reg); } } static void exynos_l2_configure(const struct l2x0_regs *regs) { exynos_smc(SMC_CMD_L2X0SETUP1, regs->tag_latency, regs->data_latency, regs->prefetch_ctrl); exynos_smc(SMC_CMD_L2X0SETUP2, regs->pwr_ctrl, regs->aux_ctrl, 0); } bool __init exynos_secure_firmware_available(void) { struct device_node *nd; const __be32 *addr; nd = of_find_compatible_node(NULL, NULL, "samsung,secure-firmware"); if (!nd) return false; addr = of_get_address(nd, 0, NULL, NULL); of_node_put(nd); if (!addr) { pr_err("%s: No address specified.\n", __func__); return false; } return true; } void __init exynos_firmware_init(void) { if (!exynos_secure_firmware_available()) return; pr_info("Running under secure firmware.\n"); register_firmware_ops(&exynos_firmware_ops); /* * Exynos 4 SoCs (based on Cortex A9 and equipped with L2C-310), * running under secure firmware, require certain registers of L2 * cache controller to be written in secure mode. Here .write_sec * callback is provided to perform necessary SMC calls. */ if (IS_ENABLED(CONFIG_CACHE_L2X0) && read_cpuid_part() == ARM_CPU_PART_CORTEX_A9) { outer_cache.write_sec = exynos_l2_write_sec; outer_cache.configure = exynos_l2_configure; } } #define REG_CPU_STATE_ADDR (sysram_ns_base_addr + 0x28) #define BOOT_MODE_MASK 0x1f void exynos_set_boot_flag(unsigned int cpu, unsigned int mode) { unsigned int tmp; tmp = readl_relaxed(REG_CPU_STATE_ADDR + cpu * 4); if (mode & BOOT_MODE_MASK) tmp &= ~BOOT_MODE_MASK; tmp |= mode; writel_relaxed(tmp, REG_CPU_STATE_ADDR + cpu * 4); } void exynos_clear_boot_flag(unsigned int cpu, unsigned int mode) { unsigned int tmp; tmp = readl_relaxed(REG_CPU_STATE_ADDR + cpu * 4); tmp &= ~mode; writel_relaxed(tmp, REG_CPU_STATE_ADDR + cpu * 4); }
linux-master
arch/arm/mach-exynos/firmware.c
// SPDX-License-Identifier: GPL-2.0 // // Copyright (c) 2011-2014 Samsung Electronics Co., Ltd. // http://www.samsung.com // // Exynos - Suspend support // // Based on arch/arm/mach-s3c2410/pm.c // Copyright (c) 2006 Simtec Electronics // Ben Dooks <[email protected]> #include <linux/init.h> #include <linux/suspend.h> #include <linux/syscore_ops.h> #include <linux/cpu_pm.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/irqchip.h> #include <linux/irqdomain.h> #include <linux/of_address.h> #include <linux/err.h> #include <linux/regulator/machine.h> #include <linux/soc/samsung/exynos-pmu.h> #include <linux/soc/samsung/exynos-regs-pmu.h> #include <asm/cacheflush.h> #include <asm/hardware/cache-l2x0.h> #include <asm/firmware.h> #include <asm/mcpm.h> #include <asm/smp_scu.h> #include <asm/suspend.h> #include "common.h" #include "smc.h" #define REG_TABLE_END (-1U) #define EXYNOS5420_CPU_STATE 0x28 /** * struct exynos_wkup_irq - PMU IRQ to mask mapping * @hwirq: Hardware IRQ signal of the PMU * @mask: Mask in PMU wake-up mask register */ struct exynos_wkup_irq { unsigned int hwirq; u32 mask; }; struct exynos_pm_data { const struct exynos_wkup_irq *wkup_irq; unsigned int wake_disable_mask; void (*pm_prepare)(void); void (*pm_resume_prepare)(void); void (*pm_resume)(void); int (*pm_suspend)(void); int (*cpu_suspend)(unsigned long); }; /* Used only on Exynos542x/5800 */ struct exynos_pm_state { int cpu_state; unsigned int pmu_spare3; void __iomem *sysram_base; phys_addr_t sysram_phys; bool secure_firmware; }; static const struct exynos_pm_data *pm_data __ro_after_init; static struct exynos_pm_state pm_state; /* * GIC wake-up support */ static u32 exynos_irqwake_intmask = 0xffffffff; static const struct exynos_wkup_irq exynos3250_wkup_irq[] = { { 73, BIT(1) }, /* RTC alarm */ { 74, BIT(2) }, /* RTC tick */ { /* sentinel */ }, }; static const struct exynos_wkup_irq exynos4_wkup_irq[] = { { 44, BIT(1) }, /* RTC alarm */ { 45, BIT(2) }, /* RTC tick */ { /* sentinel */ }, }; static const struct exynos_wkup_irq exynos5250_wkup_irq[] = { { 43, BIT(1) }, /* RTC alarm */ { 44, BIT(2) }, /* RTC tick */ { /* sentinel */ }, }; static u32 exynos_read_eint_wakeup_mask(void) { return pmu_raw_readl(EXYNOS_EINT_WAKEUP_MASK); } static int exynos_irq_set_wake(struct irq_data *data, unsigned int state) { const struct exynos_wkup_irq *wkup_irq; if (!pm_data->wkup_irq) return -ENOENT; wkup_irq = pm_data->wkup_irq; while (wkup_irq->mask) { if (wkup_irq->hwirq == data->hwirq) { if (!state) exynos_irqwake_intmask |= wkup_irq->mask; else exynos_irqwake_intmask &= ~wkup_irq->mask; return 0; } ++wkup_irq; } return -ENOENT; } static struct irq_chip exynos_pmu_chip = { .name = "PMU", .irq_eoi = irq_chip_eoi_parent, .irq_mask = irq_chip_mask_parent, .irq_unmask = irq_chip_unmask_parent, .irq_retrigger = irq_chip_retrigger_hierarchy, .irq_set_wake = exynos_irq_set_wake, #ifdef CONFIG_SMP .irq_set_affinity = irq_chip_set_affinity_parent, #endif }; static int exynos_pmu_domain_translate(struct irq_domain *d, struct irq_fwspec *fwspec, unsigned long *hwirq, unsigned int *type) { if (is_of_node(fwspec->fwnode)) { if (fwspec->param_count != 3) return -EINVAL; /* No PPI should point to this domain */ if (fwspec->param[0] != 0) return -EINVAL; *hwirq = fwspec->param[1]; *type = fwspec->param[2]; return 0; } return -EINVAL; } static int exynos_pmu_domain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *data) { struct irq_fwspec *fwspec = data; struct irq_fwspec parent_fwspec; irq_hw_number_t hwirq; int i; if (fwspec->param_count != 3) return -EINVAL; /* Not GIC compliant */ if (fwspec->param[0] != 0) return -EINVAL; /* No PPI should point to this domain */ hwirq = fwspec->param[1]; for (i = 0; i < nr_irqs; i++) irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i, &exynos_pmu_chip, NULL); parent_fwspec = *fwspec; parent_fwspec.fwnode = domain->parent->fwnode; return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &parent_fwspec); } static const struct irq_domain_ops exynos_pmu_domain_ops = { .translate = exynos_pmu_domain_translate, .alloc = exynos_pmu_domain_alloc, .free = irq_domain_free_irqs_common, }; static int __init exynos_pmu_irq_init(struct device_node *node, struct device_node *parent) { struct irq_domain *parent_domain, *domain; if (!parent) { pr_err("%pOF: no parent, giving up\n", node); return -ENODEV; } parent_domain = irq_find_host(parent); if (!parent_domain) { pr_err("%pOF: unable to obtain parent domain\n", node); return -ENXIO; } pmu_base_addr = of_iomap(node, 0); if (!pmu_base_addr) { pr_err("%pOF: failed to find exynos pmu register\n", node); return -ENOMEM; } domain = irq_domain_add_hierarchy(parent_domain, 0, 0, node, &exynos_pmu_domain_ops, NULL); if (!domain) { iounmap(pmu_base_addr); pmu_base_addr = NULL; return -ENOMEM; } /* * Clear the OF_POPULATED flag set in of_irq_init so that * later the Exynos PMU platform device won't be skipped. */ of_node_clear_flag(node, OF_POPULATED); return 0; } #define EXYNOS_PMU_IRQ(symbol, name) IRQCHIP_DECLARE(symbol, name, exynos_pmu_irq_init) EXYNOS_PMU_IRQ(exynos3250_pmu_irq, "samsung,exynos3250-pmu"); EXYNOS_PMU_IRQ(exynos4210_pmu_irq, "samsung,exynos4210-pmu"); EXYNOS_PMU_IRQ(exynos4212_pmu_irq, "samsung,exynos4212-pmu"); EXYNOS_PMU_IRQ(exynos4412_pmu_irq, "samsung,exynos4412-pmu"); EXYNOS_PMU_IRQ(exynos5250_pmu_irq, "samsung,exynos5250-pmu"); EXYNOS_PMU_IRQ(exynos5420_pmu_irq, "samsung,exynos5420-pmu"); static int exynos_cpu_do_idle(void) { /* issue the standby signal into the pm unit. */ cpu_do_idle(); pr_info("Failed to suspend the system\n"); return 1; /* Aborting suspend */ } static void exynos_flush_cache_all(void) { flush_cache_all(); outer_flush_all(); } static int exynos_cpu_suspend(unsigned long arg) { exynos_flush_cache_all(); return exynos_cpu_do_idle(); } static int exynos3250_cpu_suspend(unsigned long arg) { flush_cache_all(); return exynos_cpu_do_idle(); } static int exynos5420_cpu_suspend(unsigned long arg) { /* MCPM works with HW CPU identifiers */ unsigned int mpidr = read_cpuid_mpidr(); unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); if (IS_ENABLED(CONFIG_EXYNOS_MCPM)) { mcpm_set_entry_vector(cpu, cluster, exynos_cpu_resume); mcpm_cpu_suspend(); } pr_info("Failed to suspend the system\n"); /* return value != 0 means failure */ return 1; } static void exynos_pm_set_wakeup_mask(void) { /* * Set wake-up mask registers * EXYNOS_EINT_WAKEUP_MASK is set by pinctrl driver in late suspend. */ pmu_raw_writel(exynos_irqwake_intmask & ~BIT(31), S5P_WAKEUP_MASK); } static void exynos_pm_enter_sleep_mode(void) { /* Set value of power down register for sleep mode */ exynos_sys_powerdown_conf(SYS_SLEEP); pmu_raw_writel(EXYNOS_SLEEP_MAGIC, S5P_INFORM1); } static void exynos_pm_prepare(void) { exynos_set_delayed_reset_assertion(false); /* Set wake-up mask registers */ exynos_pm_set_wakeup_mask(); exynos_pm_enter_sleep_mode(); /* ensure at least INFORM0 has the resume address */ pmu_raw_writel(__pa_symbol(exynos_cpu_resume), S5P_INFORM0); } static void exynos3250_pm_prepare(void) { unsigned int tmp; /* Set wake-up mask registers */ exynos_pm_set_wakeup_mask(); tmp = pmu_raw_readl(EXYNOS3_ARM_L2_OPTION); tmp &= ~EXYNOS5_OPTION_USE_RETENTION; pmu_raw_writel(tmp, EXYNOS3_ARM_L2_OPTION); exynos_pm_enter_sleep_mode(); /* ensure at least INFORM0 has the resume address */ pmu_raw_writel(__pa_symbol(exynos_cpu_resume), S5P_INFORM0); } static void exynos5420_pm_prepare(void) { unsigned int tmp; /* Set wake-up mask registers */ exynos_pm_set_wakeup_mask(); pm_state.pmu_spare3 = pmu_raw_readl(S5P_PMU_SPARE3); /* * The cpu state needs to be saved and restored so that the * secondary CPUs will enter low power start. Though the U-Boot * is setting the cpu state with low power flag, the kernel * needs to restore it back in case, the primary cpu fails to * suspend for any reason. */ pm_state.cpu_state = readl_relaxed(pm_state.sysram_base + EXYNOS5420_CPU_STATE); writel_relaxed(0x0, pm_state.sysram_base + EXYNOS5420_CPU_STATE); if (pm_state.secure_firmware) exynos_smc(SMC_CMD_REG, SMC_REG_ID_SFR_W(pm_state.sysram_phys + EXYNOS5420_CPU_STATE), 0, 0); exynos_pm_enter_sleep_mode(); /* ensure at least INFORM0 has the resume address */ if (IS_ENABLED(CONFIG_EXYNOS_MCPM)) pmu_raw_writel(__pa_symbol(mcpm_entry_point), S5P_INFORM0); tmp = pmu_raw_readl(EXYNOS_L2_OPTION(0)); tmp &= ~EXYNOS_L2_USE_RETENTION; pmu_raw_writel(tmp, EXYNOS_L2_OPTION(0)); tmp = pmu_raw_readl(EXYNOS5420_SFR_AXI_CGDIS1); tmp |= EXYNOS5420_UFS; pmu_raw_writel(tmp, EXYNOS5420_SFR_AXI_CGDIS1); tmp = pmu_raw_readl(EXYNOS5420_ARM_COMMON_OPTION); tmp &= ~EXYNOS5420_L2RSTDISABLE_VALUE; pmu_raw_writel(tmp, EXYNOS5420_ARM_COMMON_OPTION); tmp = pmu_raw_readl(EXYNOS5420_FSYS2_OPTION); tmp |= EXYNOS5420_EMULATION; pmu_raw_writel(tmp, EXYNOS5420_FSYS2_OPTION); tmp = pmu_raw_readl(EXYNOS5420_PSGEN_OPTION); tmp |= EXYNOS5420_EMULATION; pmu_raw_writel(tmp, EXYNOS5420_PSGEN_OPTION); } static int exynos_pm_suspend(void) { exynos_pm_central_suspend(); /* Setting SEQ_OPTION register */ pmu_raw_writel(S5P_USE_STANDBY_WFI0 | S5P_USE_STANDBY_WFE0, S5P_CENTRAL_SEQ_OPTION); if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9) exynos_cpu_save_register(); return 0; } static int exynos5420_pm_suspend(void) { u32 this_cluster; exynos_pm_central_suspend(); /* Setting SEQ_OPTION register */ this_cluster = MPIDR_AFFINITY_LEVEL(read_cpuid_mpidr(), 1); if (!this_cluster) pmu_raw_writel(EXYNOS5420_ARM_USE_STANDBY_WFI0, S5P_CENTRAL_SEQ_OPTION); else pmu_raw_writel(EXYNOS5420_KFC_USE_STANDBY_WFI0, S5P_CENTRAL_SEQ_OPTION); return 0; } static void exynos_pm_resume(void) { u32 cpuid = read_cpuid_part(); if (exynos_pm_central_resume()) goto early_wakeup; if (cpuid == ARM_CPU_PART_CORTEX_A9) exynos_scu_enable(); if (call_firmware_op(resume) == -ENOSYS && cpuid == ARM_CPU_PART_CORTEX_A9) exynos_cpu_restore_register(); early_wakeup: /* Clear SLEEP mode set in INFORM1 */ pmu_raw_writel(0x0, S5P_INFORM1); exynos_set_delayed_reset_assertion(true); } static void exynos3250_pm_resume(void) { u32 cpuid = read_cpuid_part(); if (exynos_pm_central_resume()) goto early_wakeup; pmu_raw_writel(S5P_USE_STANDBY_WFI_ALL, S5P_CENTRAL_SEQ_OPTION); if (call_firmware_op(resume) == -ENOSYS && cpuid == ARM_CPU_PART_CORTEX_A9) exynos_cpu_restore_register(); early_wakeup: /* Clear SLEEP mode set in INFORM1 */ pmu_raw_writel(0x0, S5P_INFORM1); } static void exynos5420_prepare_pm_resume(void) { unsigned int mpidr, cluster; mpidr = read_cpuid_mpidr(); cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); if (IS_ENABLED(CONFIG_EXYNOS_MCPM)) WARN_ON(mcpm_cpu_powered_up()); if (IS_ENABLED(CONFIG_HW_PERF_EVENTS) && cluster != 0) { /* * When system is resumed on the LITTLE/KFC core (cluster 1), * the DSCR is not properly updated until the power is turned * on also for the cluster 0. Enable it for a while to * propagate the SPNIDEN and SPIDEN signals from Secure JTAG * block and avoid undefined instruction issue on CP14 reset. */ pmu_raw_writel(S5P_CORE_LOCAL_PWR_EN, EXYNOS_COMMON_CONFIGURATION(0)); pmu_raw_writel(0, EXYNOS_COMMON_CONFIGURATION(0)); } } static void exynos5420_pm_resume(void) { unsigned long tmp; /* Restore the CPU0 low power state register */ tmp = pmu_raw_readl(EXYNOS5_ARM_CORE0_SYS_PWR_REG); pmu_raw_writel(tmp | S5P_CORE_LOCAL_PWR_EN, EXYNOS5_ARM_CORE0_SYS_PWR_REG); /* Restore the sysram cpu state register */ writel_relaxed(pm_state.cpu_state, pm_state.sysram_base + EXYNOS5420_CPU_STATE); if (pm_state.secure_firmware) exynos_smc(SMC_CMD_REG, SMC_REG_ID_SFR_W(pm_state.sysram_phys + EXYNOS5420_CPU_STATE), EXYNOS_AFTR_MAGIC, 0); pmu_raw_writel(EXYNOS5420_USE_STANDBY_WFI_ALL, S5P_CENTRAL_SEQ_OPTION); if (exynos_pm_central_resume()) goto early_wakeup; pmu_raw_writel(pm_state.pmu_spare3, S5P_PMU_SPARE3); early_wakeup: tmp = pmu_raw_readl(EXYNOS5420_SFR_AXI_CGDIS1); tmp &= ~EXYNOS5420_UFS; pmu_raw_writel(tmp, EXYNOS5420_SFR_AXI_CGDIS1); tmp = pmu_raw_readl(EXYNOS5420_FSYS2_OPTION); tmp &= ~EXYNOS5420_EMULATION; pmu_raw_writel(tmp, EXYNOS5420_FSYS2_OPTION); tmp = pmu_raw_readl(EXYNOS5420_PSGEN_OPTION); tmp &= ~EXYNOS5420_EMULATION; pmu_raw_writel(tmp, EXYNOS5420_PSGEN_OPTION); /* Clear SLEEP mode set in INFORM1 */ pmu_raw_writel(0x0, S5P_INFORM1); } /* * Suspend Ops */ static int exynos_suspend_enter(suspend_state_t state) { u32 eint_wakeup_mask = exynos_read_eint_wakeup_mask(); int ret; pr_debug("%s: suspending the system...\n", __func__); pr_debug("%s: wakeup masks: %08x,%08x\n", __func__, exynos_irqwake_intmask, eint_wakeup_mask); if (exynos_irqwake_intmask == -1U && eint_wakeup_mask == EXYNOS_EINT_WAKEUP_MASK_DISABLED) { pr_err("%s: No wake-up sources!\n", __func__); pr_err("%s: Aborting sleep\n", __func__); return -EINVAL; } if (pm_data->pm_prepare) pm_data->pm_prepare(); flush_cache_all(); ret = call_firmware_op(suspend); if (ret == -ENOSYS) ret = cpu_suspend(0, pm_data->cpu_suspend); if (ret) return ret; if (pm_data->pm_resume_prepare) pm_data->pm_resume_prepare(); pr_debug("%s: wakeup stat: %08x\n", __func__, pmu_raw_readl(S5P_WAKEUP_STAT)); pr_debug("%s: resuming the system...\n", __func__); return 0; } static int exynos_suspend_prepare(void) { int ret; /* * REVISIT: It would be better if struct platform_suspend_ops * .prepare handler get the suspend_state_t as a parameter to * avoid hard-coding the suspend to mem state. It's safe to do * it now only because the suspend_valid_only_mem function is * used as the .valid callback used to check if a given state * is supported by the platform anyways. */ ret = regulator_suspend_prepare(PM_SUSPEND_MEM); if (ret) { pr_err("Failed to prepare regulators for suspend (%d)\n", ret); return ret; } return 0; } static void exynos_suspend_finish(void) { int ret; ret = regulator_suspend_finish(); if (ret) pr_warn("Failed to resume regulators from suspend (%d)\n", ret); } static const struct platform_suspend_ops exynos_suspend_ops = { .enter = exynos_suspend_enter, .prepare = exynos_suspend_prepare, .finish = exynos_suspend_finish, .valid = suspend_valid_only_mem, }; static const struct exynos_pm_data exynos3250_pm_data = { .wkup_irq = exynos3250_wkup_irq, .wake_disable_mask = ((0xFF << 8) | (0x1F << 1)), .pm_suspend = exynos_pm_suspend, .pm_resume = exynos3250_pm_resume, .pm_prepare = exynos3250_pm_prepare, .cpu_suspend = exynos3250_cpu_suspend, }; static const struct exynos_pm_data exynos4_pm_data = { .wkup_irq = exynos4_wkup_irq, .wake_disable_mask = ((0xFF << 8) | (0x1F << 1)), .pm_suspend = exynos_pm_suspend, .pm_resume = exynos_pm_resume, .pm_prepare = exynos_pm_prepare, .cpu_suspend = exynos_cpu_suspend, }; static const struct exynos_pm_data exynos5250_pm_data = { .wkup_irq = exynos5250_wkup_irq, .wake_disable_mask = ((0xFF << 8) | (0x1F << 1)), .pm_suspend = exynos_pm_suspend, .pm_resume = exynos_pm_resume, .pm_prepare = exynos_pm_prepare, .cpu_suspend = exynos_cpu_suspend, }; static const struct exynos_pm_data exynos5420_pm_data = { .wkup_irq = exynos5250_wkup_irq, .wake_disable_mask = (0x7F << 7) | (0x1F << 1), .pm_resume_prepare = exynos5420_prepare_pm_resume, .pm_resume = exynos5420_pm_resume, .pm_suspend = exynos5420_pm_suspend, .pm_prepare = exynos5420_pm_prepare, .cpu_suspend = exynos5420_cpu_suspend, }; static const struct of_device_id exynos_pmu_of_device_ids[] __initconst = { { .compatible = "samsung,exynos3250-pmu", .data = &exynos3250_pm_data, }, { .compatible = "samsung,exynos4210-pmu", .data = &exynos4_pm_data, }, { .compatible = "samsung,exynos4212-pmu", .data = &exynos4_pm_data, }, { .compatible = "samsung,exynos4412-pmu", .data = &exynos4_pm_data, }, { .compatible = "samsung,exynos5250-pmu", .data = &exynos5250_pm_data, }, { .compatible = "samsung,exynos5420-pmu", .data = &exynos5420_pm_data, }, { /*sentinel*/ }, }; static struct syscore_ops exynos_pm_syscore_ops; void __init exynos_pm_init(void) { const struct of_device_id *match; struct device_node *np; u32 tmp; np = of_find_matching_node_and_match(NULL, exynos_pmu_of_device_ids, &match); if (!np) { pr_err("Failed to find PMU node\n"); return; } if (WARN_ON(!of_property_read_bool(np, "interrupt-controller"))) { pr_warn("Outdated DT detected, suspend/resume will NOT work\n"); of_node_put(np); return; } of_node_put(np); pm_data = (const struct exynos_pm_data *) match->data; /* All wakeup disable */ tmp = pmu_raw_readl(S5P_WAKEUP_MASK); tmp |= pm_data->wake_disable_mask; pmu_raw_writel(tmp, S5P_WAKEUP_MASK); exynos_pm_syscore_ops.suspend = pm_data->pm_suspend; exynos_pm_syscore_ops.resume = pm_data->pm_resume; register_syscore_ops(&exynos_pm_syscore_ops); suspend_set_ops(&exynos_suspend_ops); /* * Applicable as of now only to Exynos542x. If booted under secure * firmware, the non-secure region of sysram should be used. */ if (exynos_secure_firmware_available()) { pm_state.sysram_phys = sysram_base_phys; pm_state.sysram_base = sysram_ns_base_addr; pm_state.secure_firmware = true; } else { pm_state.sysram_base = sysram_base_addr; } }
linux-master
arch/arm/mach-exynos/suspend.c
// SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2014 Samsung Electronics Co., Ltd. // http://www.samsung.com // // Based on arch/arm/mach-vexpress/dcscb.c #include <linux/arm-cci.h> #include <linux/delay.h> #include <linux/io.h> #include <linux/of_address.h> #include <linux/syscore_ops.h> #include <linux/soc/samsung/exynos-regs-pmu.h> #include <asm/cputype.h> #include <asm/cp15.h> #include <asm/mcpm.h> #include <asm/smp_plat.h> #include "common.h" #define EXYNOS5420_CPUS_PER_CLUSTER 4 #define EXYNOS5420_NR_CLUSTERS 2 #define EXYNOS5420_ENABLE_AUTOMATIC_CORE_DOWN BIT(9) #define EXYNOS5420_USE_ARM_CORE_DOWN_STATE BIT(29) #define EXYNOS5420_USE_L2_COMMON_UP_STATE BIT(30) static void __iomem *ns_sram_base_addr __ro_after_init; static bool secure_firmware __ro_after_init; /* * The common v7_exit_coherency_flush API could not be used because of the * Erratum 799270 workaround. This macro is the same as the common one (in * arch/arm/include/asm/cacheflush.h) except for the erratum handling. */ #define exynos_v7_exit_coherency_flush(level) \ asm volatile( \ "mrc p15, 0, r0, c1, c0, 0 @ get SCTLR\n\t" \ "bic r0, r0, #"__stringify(CR_C)"\n\t" \ "mcr p15, 0, r0, c1, c0, 0 @ set SCTLR\n\t" \ "isb\n\t"\ "bl v7_flush_dcache_"__stringify(level)"\n\t" \ "mrc p15, 0, r0, c1, c0, 1 @ get ACTLR\n\t" \ "bic r0, r0, #(1 << 6) @ disable local coherency\n\t" \ /* Dummy Load of a device register to avoid Erratum 799270 */ \ "ldr r4, [%0]\n\t" \ "and r4, r4, #0\n\t" \ "orr r0, r0, r4\n\t" \ "mcr p15, 0, r0, c1, c0, 1 @ set ACTLR\n\t" \ "isb\n\t" \ "dsb\n\t" \ : \ : "Ir" (pmu_base_addr + S5P_INFORM0) \ : "r0", "r1", "r2", "r3", "r4", "r5", "r6", \ "r9", "r10", "ip", "lr", "memory") static int exynos_cpu_powerup(unsigned int cpu, unsigned int cluster) { unsigned int cpunr = cpu + (cluster * EXYNOS5420_CPUS_PER_CLUSTER); bool state; pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); if (cpu >= EXYNOS5420_CPUS_PER_CLUSTER || cluster >= EXYNOS5420_NR_CLUSTERS) return -EINVAL; state = exynos_cpu_power_state(cpunr); exynos_cpu_power_up(cpunr); if (!state && secure_firmware) { /* * This assumes the cluster number of the big cores(Cortex A15) * is 0 and the Little cores(Cortex A7) is 1. * When the system was booted from the Little core, * they should be reset during power up cpu. */ if (cluster && cluster == MPIDR_AFFINITY_LEVEL(cpu_logical_map(0), 1)) { unsigned int timeout = 16; /* * Before we reset the Little cores, we should wait * the SPARE2 register is set to 1 because the init * codes of the iROM will set the register after * initialization. */ while (timeout && !pmu_raw_readl(S5P_PMU_SPARE2)) { timeout--; udelay(10); } if (timeout == 0) { pr_err("cpu %u cluster %u powerup failed\n", cpu, cluster); exynos_cpu_power_down(cpunr); return -ETIMEDOUT; } pmu_raw_writel(EXYNOS5420_KFC_CORE_RESET(cpu), EXYNOS_SWRESET); } } return 0; } static int exynos_cluster_powerup(unsigned int cluster) { pr_debug("%s: cluster %u\n", __func__, cluster); if (cluster >= EXYNOS5420_NR_CLUSTERS) return -EINVAL; exynos_cluster_power_up(cluster); return 0; } static void exynos_cpu_powerdown_prepare(unsigned int cpu, unsigned int cluster) { unsigned int cpunr = cpu + (cluster * EXYNOS5420_CPUS_PER_CLUSTER); pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); BUG_ON(cpu >= EXYNOS5420_CPUS_PER_CLUSTER || cluster >= EXYNOS5420_NR_CLUSTERS); exynos_cpu_power_down(cpunr); } static void exynos_cluster_powerdown_prepare(unsigned int cluster) { pr_debug("%s: cluster %u\n", __func__, cluster); BUG_ON(cluster >= EXYNOS5420_NR_CLUSTERS); exynos_cluster_power_down(cluster); } static void exynos_cpu_cache_disable(void) { /* Disable and flush the local CPU cache. */ exynos_v7_exit_coherency_flush(louis); } static void exynos_cluster_cache_disable(void) { if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A15) { /* * On the Cortex-A15 we need to disable * L2 prefetching before flushing the cache. */ asm volatile( "mcr p15, 1, %0, c15, c0, 3\n\t" "isb\n\t" "dsb" : : "r" (0x400)); } /* Flush all cache levels for this cluster. */ exynos_v7_exit_coherency_flush(all); /* * Disable cluster-level coherency by masking * incoming snoops and DVM messages: */ cci_disable_port_by_cpu(read_cpuid_mpidr()); } static int exynos_wait_for_powerdown(unsigned int cpu, unsigned int cluster) { unsigned int tries = 100; unsigned int cpunr = cpu + (cluster * EXYNOS5420_CPUS_PER_CLUSTER); pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); BUG_ON(cpu >= EXYNOS5420_CPUS_PER_CLUSTER || cluster >= EXYNOS5420_NR_CLUSTERS); /* Wait for the core state to be OFF */ while (tries--) { if ((exynos_cpu_power_state(cpunr) == 0)) return 0; /* success: the CPU is halted */ /* Otherwise, wait and retry: */ msleep(1); } return -ETIMEDOUT; /* timeout */ } static void exynos_cpu_is_up(unsigned int cpu, unsigned int cluster) { /* especially when resuming: make sure power control is set */ exynos_cpu_powerup(cpu, cluster); } static const struct mcpm_platform_ops exynos_power_ops = { .cpu_powerup = exynos_cpu_powerup, .cluster_powerup = exynos_cluster_powerup, .cpu_powerdown_prepare = exynos_cpu_powerdown_prepare, .cluster_powerdown_prepare = exynos_cluster_powerdown_prepare, .cpu_cache_disable = exynos_cpu_cache_disable, .cluster_cache_disable = exynos_cluster_cache_disable, .wait_for_powerdown = exynos_wait_for_powerdown, .cpu_is_up = exynos_cpu_is_up, }; /* * Enable cluster-level coherency, in preparation for turning on the MMU. */ static void __naked exynos_pm_power_up_setup(unsigned int affinity_level) { asm volatile ("\n" "cmp r0, #1\n" "bxne lr\n" "b cci_enable_port_for_self"); } static const struct of_device_id exynos_dt_mcpm_match[] = { { .compatible = "samsung,exynos5420" }, { .compatible = "samsung,exynos5800" }, {}, }; static void exynos_mcpm_setup_entry_point(void) { /* * U-Boot SPL is hardcoded to jump to the start of ns_sram_base_addr * as part of secondary_cpu_start(). Let's redirect it to the * mcpm_entry_point(). This is done during both secondary boot-up as * well as system resume. */ __raw_writel(0xe59f0000, ns_sram_base_addr); /* ldr r0, [pc, #0] */ __raw_writel(0xe12fff10, ns_sram_base_addr + 4); /* bx r0 */ __raw_writel(__pa_symbol(mcpm_entry_point), ns_sram_base_addr + 8); } static struct syscore_ops exynos_mcpm_syscore_ops = { .resume = exynos_mcpm_setup_entry_point, }; static int __init exynos_mcpm_init(void) { struct device_node *node; unsigned int value, i; int ret; node = of_find_matching_node(NULL, exynos_dt_mcpm_match); if (!node) return -ENODEV; of_node_put(node); if (!cci_probed()) return -ENODEV; node = of_find_compatible_node(NULL, NULL, "samsung,exynos4210-sysram-ns"); if (!node) return -ENODEV; ns_sram_base_addr = of_iomap(node, 0); of_node_put(node); if (!ns_sram_base_addr) { pr_err("failed to map non-secure iRAM base address\n"); return -ENOMEM; } secure_firmware = exynos_secure_firmware_available(); /* * To increase the stability of KFC reset we need to program * the PMU SPARE3 register */ pmu_raw_writel(EXYNOS5420_SWRESET_KFC_SEL, S5P_PMU_SPARE3); ret = mcpm_platform_register(&exynos_power_ops); if (!ret) ret = mcpm_sync_init(exynos_pm_power_up_setup); if (!ret) ret = mcpm_loopback(exynos_cluster_cache_disable); /* turn on the CCI */ if (ret) { iounmap(ns_sram_base_addr); return ret; } mcpm_smp_set_ops(); pr_info("Exynos MCPM support installed\n"); /* * On Exynos5420/5800 for the A15 and A7 clusters: * * EXYNOS5420_ENABLE_AUTOMATIC_CORE_DOWN ensures that all the cores * in a cluster are turned off before turning off the cluster L2. * * EXYNOS5420_USE_ARM_CORE_DOWN_STATE ensures that a cores is powered * off before waking it up. * * EXYNOS5420_USE_L2_COMMON_UP_STATE ensures that cluster L2 will be * turned on before the first man is powered up. */ for (i = 0; i < EXYNOS5420_NR_CLUSTERS; i++) { value = pmu_raw_readl(EXYNOS_COMMON_OPTION(i)); value |= EXYNOS5420_ENABLE_AUTOMATIC_CORE_DOWN | EXYNOS5420_USE_ARM_CORE_DOWN_STATE | EXYNOS5420_USE_L2_COMMON_UP_STATE; pmu_raw_writel(value, EXYNOS_COMMON_OPTION(i)); } exynos_mcpm_setup_entry_point(); register_syscore_ops(&exynos_mcpm_syscore_ops); return ret; } early_initcall(exynos_mcpm_init);
linux-master
arch/arm/mach-exynos/mcpm-exynos.c
// SPDX-License-Identifier: GPL-2.0 // // Samsung Exynos Flattened Device Tree enabled machine // // Copyright (c) 2010-2014 Samsung Electronics Co., Ltd. // http://www.samsung.com #include <linux/init.h> #include <linux/io.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_fdt.h> #include <linux/platform_device.h> #include <linux/irqchip.h> #include <linux/soc/samsung/exynos-regs-pmu.h> #include <asm/cacheflush.h> #include <asm/hardware/cache-l2x0.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include "common.h" #define S3C_ADDR_BASE 0xF6000000 #define S3C_ADDR(x) ((void __iomem __force *)S3C_ADDR_BASE + (x)) #define S5P_VA_CHIPID S3C_ADDR(0x02000000) static struct platform_device exynos_cpuidle = { .name = "exynos_cpuidle", #ifdef CONFIG_ARM_EXYNOS_CPUIDLE .dev.platform_data = exynos_enter_aftr, #endif .id = -1, }; void __iomem *sysram_base_addr __ro_after_init; phys_addr_t sysram_base_phys __ro_after_init; void __iomem *sysram_ns_base_addr __ro_after_init; unsigned long exynos_cpu_id; static unsigned int exynos_cpu_rev; unsigned int exynos_rev(void) { return exynos_cpu_rev; } void __init exynos_sysram_init(void) { struct device_node *node; for_each_compatible_node(node, NULL, "samsung,exynos4210-sysram") { struct resource res; if (!of_device_is_available(node)) continue; of_address_to_resource(node, 0, &res); sysram_base_addr = ioremap(res.start, resource_size(&res)); sysram_base_phys = res.start; of_node_put(node); break; } for_each_compatible_node(node, NULL, "samsung,exynos4210-sysram-ns") { if (!of_device_is_available(node)) continue; sysram_ns_base_addr = of_iomap(node, 0); of_node_put(node); break; } } static int __init exynos_fdt_map_chipid(unsigned long node, const char *uname, int depth, void *data) { struct map_desc iodesc; const __be32 *reg; int len; if (!of_flat_dt_is_compatible(node, "samsung,exynos4210-chipid")) return 0; reg = of_get_flat_dt_prop(node, "reg", &len); if (reg == NULL || len != (sizeof(unsigned long) * 2)) return 0; iodesc.pfn = __phys_to_pfn(be32_to_cpu(reg[0])); iodesc.length = be32_to_cpu(reg[1]) - 1; iodesc.virtual = (unsigned long)S5P_VA_CHIPID; iodesc.type = MT_DEVICE; iotable_init(&iodesc, 1); return 1; } static void __init exynos_init_io(void) { debug_ll_io_init(); of_scan_flat_dt(exynos_fdt_map_chipid, NULL); /* detect cpu id and rev. */ exynos_cpu_id = readl_relaxed(S5P_VA_CHIPID); exynos_cpu_rev = exynos_cpu_id & 0xFF; pr_info("Samsung CPU ID: 0x%08lx\n", exynos_cpu_id); } /* * Set or clear the USE_DELAYED_RESET_ASSERTION option. Used by smp code * and suspend. * * This is necessary only on Exynos4 SoCs. When system is running * USE_DELAYED_RESET_ASSERTION should be set so the ARM CLK clock down * feature could properly detect global idle state when secondary CPU is * powered down. * * However this should not be set when such system is going into suspend. */ void exynos_set_delayed_reset_assertion(bool enable) { if (of_machine_is_compatible("samsung,exynos4")) { unsigned int tmp, core_id; for (core_id = 0; core_id < num_possible_cpus(); core_id++) { tmp = pmu_raw_readl(EXYNOS_ARM_CORE_OPTION(core_id)); if (enable) tmp |= S5P_USE_DELAYED_RESET_ASSERTION; else tmp &= ~(S5P_USE_DELAYED_RESET_ASSERTION); pmu_raw_writel(tmp, EXYNOS_ARM_CORE_OPTION(core_id)); } } } /* * Apparently, these SoCs are not able to wake-up from suspend using * the PMU. Too bad. Should they suddenly become capable of such a * feat, the matches below should be moved to suspend.c. */ static const struct of_device_id exynos_dt_pmu_match[] = { { .compatible = "samsung,exynos5260-pmu" }, { .compatible = "samsung,exynos5410-pmu" }, { /*sentinel*/ }, }; static void exynos_map_pmu(void) { struct device_node *np; np = of_find_matching_node(NULL, exynos_dt_pmu_match); if (np) pmu_base_addr = of_iomap(np, 0); of_node_put(np); } static void __init exynos_init_irq(void) { irqchip_init(); /* * Since platsmp.c needs pmu base address by the time * DT is not unflatten so we can't use DT APIs before * init_irq */ exynos_map_pmu(); } static void __init exynos_dt_machine_init(void) { /* * This is called from smp_prepare_cpus if we've built for SMP, but * we still need to set it up for PM and firmware ops if not. */ if (!IS_ENABLED(CONFIG_SMP)) exynos_sysram_init(); #if defined(CONFIG_SMP) && defined(CONFIG_ARM_EXYNOS_CPUIDLE) if (of_machine_is_compatible("samsung,exynos4210") || of_machine_is_compatible("samsung,exynos3250")) exynos_cpuidle.dev.platform_data = &cpuidle_coupled_exynos_data; #endif if (of_machine_is_compatible("samsung,exynos4210") || of_machine_is_compatible("samsung,exynos4212") || (of_machine_is_compatible("samsung,exynos4412") && (of_machine_is_compatible("samsung,trats2") || of_machine_is_compatible("samsung,midas") || of_machine_is_compatible("samsung,p4note"))) || of_machine_is_compatible("samsung,exynos3250") || of_machine_is_compatible("samsung,exynos5250")) platform_device_register(&exynos_cpuidle); } static char const *const exynos_dt_compat[] __initconst = { "samsung,exynos3", "samsung,exynos3250", "samsung,exynos4", "samsung,exynos4210", "samsung,exynos4212", "samsung,exynos4412", "samsung,exynos5", "samsung,exynos5250", "samsung,exynos5260", "samsung,exynos5420", NULL }; static void __init exynos_dt_fixup(void) { /* * Some versions of uboot pass garbage entries in the memory node, * use the old CONFIG_ARM_NR_BANKS */ of_fdt_limit_memory(8); } DT_MACHINE_START(EXYNOS_DT, "Samsung Exynos (Flattened Device Tree)") .l2c_aux_val = 0x08400000, .l2c_aux_mask = 0xf60fffff, .smp = smp_ops(exynos_smp_ops), .map_io = exynos_init_io, .init_early = exynos_firmware_init, .init_irq = exynos_init_irq, .init_machine = exynos_dt_machine_init, .init_late = exynos_pm_init, .dt_compat = exynos_dt_compat, .dt_fixup = exynos_dt_fixup, MACHINE_END
linux-master
arch/arm/mach-exynos/exynos.c
// SPDX-License-Identifier: GPL-2.0 // // Copyright (c) 2011-2014 Samsung Electronics Co., Ltd. // http://www.samsung.com // // Exynos - Power Management support // // Based on arch/arm/mach-s3c2410/pm.c // Copyright (c) 2006 Simtec Electronics // Ben Dooks <[email protected]> #include <linux/init.h> #include <linux/suspend.h> #include <linux/cpu_pm.h> #include <linux/io.h> #include <linux/of.h> #include <linux/soc/samsung/exynos-regs-pmu.h> #include <linux/soc/samsung/exynos-pmu.h> #include <asm/firmware.h> #include <asm/smp_scu.h> #include <asm/suspend.h> #include <asm/cacheflush.h> #include "common.h" static inline void __iomem *exynos_boot_vector_addr(void) { if (exynos_rev() == EXYNOS4210_REV_1_1) return pmu_base_addr + S5P_INFORM7; else if (exynos_rev() == EXYNOS4210_REV_1_0) return sysram_base_addr + 0x24; return pmu_base_addr + S5P_INFORM0; } static inline void __iomem *exynos_boot_vector_flag(void) { if (exynos_rev() == EXYNOS4210_REV_1_1) return pmu_base_addr + S5P_INFORM6; else if (exynos_rev() == EXYNOS4210_REV_1_0) return sysram_base_addr + 0x20; return pmu_base_addr + S5P_INFORM1; } #define S5P_CHECK_AFTR 0xFCBA0D10 /* For Cortex-A9 Diagnostic and Power control register */ static unsigned int save_arm_register[2]; void exynos_cpu_save_register(void) { unsigned long tmp; /* Save Power control register */ asm ("mrc p15, 0, %0, c15, c0, 0" : "=r" (tmp) : : "cc"); save_arm_register[0] = tmp; /* Save Diagnostic register */ asm ("mrc p15, 0, %0, c15, c0, 1" : "=r" (tmp) : : "cc"); save_arm_register[1] = tmp; } void exynos_cpu_restore_register(void) { unsigned long tmp; /* Restore Power control register */ tmp = save_arm_register[0]; asm volatile ("mcr p15, 0, %0, c15, c0, 0" : : "r" (tmp) : "cc"); /* Restore Diagnostic register */ tmp = save_arm_register[1]; asm volatile ("mcr p15, 0, %0, c15, c0, 1" : : "r" (tmp) : "cc"); } void exynos_pm_central_suspend(void) { unsigned long tmp; /* Setting Central Sequence Register for power down mode */ tmp = pmu_raw_readl(S5P_CENTRAL_SEQ_CONFIGURATION); tmp &= ~S5P_CENTRAL_LOWPWR_CFG; pmu_raw_writel(tmp, S5P_CENTRAL_SEQ_CONFIGURATION); } int exynos_pm_central_resume(void) { unsigned long tmp; /* * If PMU failed while entering sleep mode, WFI will be * ignored by PMU and then exiting cpu_do_idle(). * S5P_CENTRAL_LOWPWR_CFG bit will not be set automatically * in this situation. */ tmp = pmu_raw_readl(S5P_CENTRAL_SEQ_CONFIGURATION); if (!(tmp & S5P_CENTRAL_LOWPWR_CFG)) { tmp |= S5P_CENTRAL_LOWPWR_CFG; pmu_raw_writel(tmp, S5P_CENTRAL_SEQ_CONFIGURATION); /* clear the wakeup state register */ pmu_raw_writel(0x0, S5P_WAKEUP_STAT); /* No need to perform below restore code */ return -1; } return 0; } /* Ext-GIC nIRQ/nFIQ is the only wakeup source in AFTR */ static void exynos_set_wakeupmask(long mask) { pmu_raw_writel(mask, S5P_WAKEUP_MASK); if (soc_is_exynos3250()) pmu_raw_writel(0x0, S5P_WAKEUP_MASK2); } static void exynos_cpu_set_boot_vector(long flags) { writel_relaxed(__pa_symbol(exynos_cpu_resume), exynos_boot_vector_addr()); writel_relaxed(flags, exynos_boot_vector_flag()); } static int exynos_aftr_finisher(unsigned long flags) { int ret; exynos_set_wakeupmask(soc_is_exynos3250() ? 0x40003ffe : 0x0000ff3e); /* Set value of power down register for aftr mode */ exynos_sys_powerdown_conf(SYS_AFTR); ret = call_firmware_op(do_idle, FW_DO_IDLE_AFTR); if (ret == -ENOSYS) { if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9) exynos_cpu_save_register(); exynos_cpu_set_boot_vector(S5P_CHECK_AFTR); cpu_do_idle(); } return 1; } void exynos_enter_aftr(void) { unsigned int cpuid = smp_processor_id(); cpu_pm_enter(); if (soc_is_exynos3250()) exynos_set_boot_flag(cpuid, C2_STATE); exynos_pm_central_suspend(); if (soc_is_exynos4212() || soc_is_exynos4412()) { /* Setting SEQ_OPTION register */ pmu_raw_writel(S5P_USE_STANDBY_WFI0 | S5P_USE_STANDBY_WFE0, S5P_CENTRAL_SEQ_OPTION); } cpu_suspend(0, exynos_aftr_finisher); if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9) { exynos_scu_enable(); if (call_firmware_op(resume) == -ENOSYS) exynos_cpu_restore_register(); } exynos_pm_central_resume(); if (soc_is_exynos3250()) exynos_clear_boot_flag(cpuid, C2_STATE); cpu_pm_exit(); } #if defined(CONFIG_SMP) && defined(CONFIG_ARM_EXYNOS_CPUIDLE) static atomic_t cpu1_wakeup = ATOMIC_INIT(0); static int exynos_cpu0_enter_aftr(void) { int ret = -1; /* * If the other cpu is powered on, we have to power it off, because * the AFTR state won't work otherwise */ if (cpu_online(1)) { /* * We reach a sync point with the coupled idle state, we know * the other cpu will power down itself or will abort the * sequence, let's wait for one of these to happen */ while (exynos_cpu_power_state(1)) { unsigned long boot_addr; /* * The other cpu may skip idle and boot back * up again */ if (atomic_read(&cpu1_wakeup)) goto abort; /* * The other cpu may bounce through idle and * boot back up again, getting stuck in the * boot rom code */ ret = exynos_get_boot_addr(1, &boot_addr); if (ret) goto fail; ret = -1; if (boot_addr == 0) goto abort; cpu_relax(); } } exynos_enter_aftr(); ret = 0; abort: if (cpu_online(1)) { unsigned long boot_addr = __pa_symbol(exynos_cpu_resume); /* * Set the boot vector to something non-zero */ ret = exynos_set_boot_addr(1, boot_addr); if (ret) goto fail; dsb(); /* * Turn on cpu1 and wait for it to be on */ exynos_cpu_power_up(1); while (exynos_cpu_power_state(1) != S5P_CORE_LOCAL_PWR_EN) cpu_relax(); if (soc_is_exynos3250()) { while (!pmu_raw_readl(S5P_PMU_SPARE2) && !atomic_read(&cpu1_wakeup)) cpu_relax(); if (!atomic_read(&cpu1_wakeup)) exynos_core_restart(1); } while (!atomic_read(&cpu1_wakeup)) { smp_rmb(); /* * Poke cpu1 out of the boot rom */ ret = exynos_set_boot_addr(1, boot_addr); if (ret) goto fail; call_firmware_op(cpu_boot, 1); dsb_sev(); } } fail: return ret; } static int exynos_wfi_finisher(unsigned long flags) { if (soc_is_exynos3250()) flush_cache_all(); cpu_do_idle(); return -1; } static int exynos_cpu1_powerdown(void) { int ret = -1; /* * Idle sequence for cpu1 */ if (cpu_pm_enter()) goto cpu1_aborted; /* * Turn off cpu 1 */ exynos_cpu_power_down(1); if (soc_is_exynos3250()) pmu_raw_writel(0, S5P_PMU_SPARE2); ret = cpu_suspend(0, exynos_wfi_finisher); cpu_pm_exit(); cpu1_aborted: dsb(); /* * Notify cpu 0 that cpu 1 is awake */ atomic_set(&cpu1_wakeup, 1); return ret; } static void exynos_pre_enter_aftr(void) { unsigned long boot_addr = __pa_symbol(exynos_cpu_resume); (void)exynos_set_boot_addr(1, boot_addr); } static void exynos_post_enter_aftr(void) { atomic_set(&cpu1_wakeup, 0); } struct cpuidle_exynos_data cpuidle_coupled_exynos_data = { .cpu0_enter_aftr = exynos_cpu0_enter_aftr, .cpu1_powerdown = exynos_cpu1_powerdown, .pre_enter_aftr = exynos_pre_enter_aftr, .post_enter_aftr = exynos_post_enter_aftr, }; #endif /* CONFIG_SMP && CONFIG_ARM_EXYNOS_CPUIDLE */
linux-master
arch/arm/mach-exynos/pm.c
// SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2010-2011 Samsung Electronics Co., Ltd. // http://www.samsung.com // // Cloned from linux/arch/arm/mach-vexpress/platsmp.c // // Copyright (C) 2002 ARM Ltd. // All Rights Reserved #include <linux/init.h> #include <linux/errno.h> #include <linux/delay.h> #include <linux/jiffies.h> #include <linux/smp.h> #include <linux/io.h> #include <linux/of_address.h> #include <linux/soc/samsung/exynos-regs-pmu.h> #include <asm/cacheflush.h> #include <asm/cp15.h> #include <asm/smp_plat.h> #include <asm/smp_scu.h> #include <asm/firmware.h> #include "common.h" extern void exynos4_secondary_startup(void); /* XXX exynos_pen_release is cargo culted code - DO NOT COPY XXX */ volatile int exynos_pen_release = -1; #ifdef CONFIG_HOTPLUG_CPU static inline void cpu_leave_lowpower(u32 core_id) { unsigned int v; asm volatile( "mrc p15, 0, %0, c1, c0, 0\n" " orr %0, %0, %1\n" " mcr p15, 0, %0, c1, c0, 0\n" " mrc p15, 0, %0, c1, c0, 1\n" " orr %0, %0, %2\n" " mcr p15, 0, %0, c1, c0, 1\n" : "=&r" (v) : "Ir" (CR_C), "Ir" (0x40) : "cc"); } static inline void platform_do_lowpower(unsigned int cpu, int *spurious) { u32 mpidr = cpu_logical_map(cpu); u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); for (;;) { /* Turn the CPU off on next WFI instruction. */ exynos_cpu_power_down(core_id); wfi(); if (exynos_pen_release == core_id) { /* * OK, proper wakeup, we're done */ break; } /* * Getting here, means that we have come out of WFI without * having been woken up - this shouldn't happen * * Just note it happening - when we're woken, we can report * its occurrence. */ (*spurious)++; } } #endif /* CONFIG_HOTPLUG_CPU */ /** * exynos_cpu_power_down() - power down the specified cpu * @cpu: the cpu to power down * * Power down the specified cpu. The sequence must be finished by a * call to cpu_do_idle() */ void exynos_cpu_power_down(int cpu) { u32 core_conf; if (cpu == 0 && (soc_is_exynos5420() || soc_is_exynos5800())) { /* * Bypass power down for CPU0 during suspend. Check for * the SYS_PWR_REG value to decide if we are suspending * the system. */ int val = pmu_raw_readl(EXYNOS5_ARM_CORE0_SYS_PWR_REG); if (!(val & S5P_CORE_LOCAL_PWR_EN)) return; } core_conf = pmu_raw_readl(EXYNOS_ARM_CORE_CONFIGURATION(cpu)); core_conf &= ~S5P_CORE_LOCAL_PWR_EN; pmu_raw_writel(core_conf, EXYNOS_ARM_CORE_CONFIGURATION(cpu)); } /** * exynos_cpu_power_up() - power up the specified cpu * @cpu: the cpu to power up * * Power up the specified cpu */ void exynos_cpu_power_up(int cpu) { u32 core_conf = S5P_CORE_LOCAL_PWR_EN; if (soc_is_exynos3250()) core_conf |= S5P_CORE_AUTOWAKEUP_EN; pmu_raw_writel(core_conf, EXYNOS_ARM_CORE_CONFIGURATION(cpu)); } /** * exynos_cpu_power_state() - returns the power state of the cpu * @cpu: the cpu to retrieve the power state from */ int exynos_cpu_power_state(int cpu) { return (pmu_raw_readl(EXYNOS_ARM_CORE_STATUS(cpu)) & S5P_CORE_LOCAL_PWR_EN); } /** * exynos_cluster_power_down() - power down the specified cluster * @cluster: the cluster to power down */ void exynos_cluster_power_down(int cluster) { pmu_raw_writel(0, EXYNOS_COMMON_CONFIGURATION(cluster)); } /** * exynos_cluster_power_up() - power up the specified cluster * @cluster: the cluster to power up */ void exynos_cluster_power_up(int cluster) { pmu_raw_writel(S5P_CORE_LOCAL_PWR_EN, EXYNOS_COMMON_CONFIGURATION(cluster)); } /** * exynos_cluster_power_state() - returns the power state of the cluster * @cluster: the cluster to retrieve the power state from * */ int exynos_cluster_power_state(int cluster) { return (pmu_raw_readl(EXYNOS_COMMON_STATUS(cluster)) & S5P_CORE_LOCAL_PWR_EN); } /** * exynos_scu_enable() - enables SCU for Cortex-A9 based system */ void exynos_scu_enable(void) { struct device_node *np; static void __iomem *scu_base; if (!scu_base) { np = of_find_compatible_node(NULL, NULL, "arm,cortex-a9-scu"); if (np) { scu_base = of_iomap(np, 0); of_node_put(np); } else { scu_base = ioremap(scu_a9_get_base(), SZ_4K); } } scu_enable(scu_base); } static void __iomem *cpu_boot_reg_base(void) { if (soc_is_exynos4210() && exynos_rev() == EXYNOS4210_REV_1_1) return pmu_base_addr + S5P_INFORM5; return sysram_base_addr; } static inline void __iomem *cpu_boot_reg(int cpu) { void __iomem *boot_reg; boot_reg = cpu_boot_reg_base(); if (!boot_reg) return IOMEM_ERR_PTR(-ENODEV); if (soc_is_exynos4412()) boot_reg += 4*cpu; else if (soc_is_exynos5420() || soc_is_exynos5800()) boot_reg += 4; return boot_reg; } /* * Set wake up by local power mode and execute software reset for given core. * * Currently this is needed only when booting secondary CPU on Exynos3250. */ void exynos_core_restart(u32 core_id) { unsigned int timeout = 16; u32 val; if (!soc_is_exynos3250()) return; while (timeout && !pmu_raw_readl(S5P_PMU_SPARE2)) { timeout--; udelay(10); } if (timeout == 0) { pr_err("cpu core %u restart failed\n", core_id); return; } udelay(10); val = pmu_raw_readl(EXYNOS_ARM_CORE_STATUS(core_id)); val |= S5P_CORE_WAKEUP_FROM_LOCAL_CFG; pmu_raw_writel(val, EXYNOS_ARM_CORE_STATUS(core_id)); pmu_raw_writel(EXYNOS_CORE_PO_RESET(core_id), EXYNOS_SWRESET); } /* * XXX CARGO CULTED CODE - DO NOT COPY XXX * * Write exynos_pen_release in a way that is guaranteed to be visible to * all observers, irrespective of whether they're taking part in coherency * or not. This is necessary for the hotplug code to work reliably. */ static void exynos_write_pen_release(int val) { exynos_pen_release = val; smp_wmb(); sync_cache_w(&exynos_pen_release); } static DEFINE_SPINLOCK(boot_lock); static void exynos_secondary_init(unsigned int cpu) { /* * let the primary processor know we're out of the * pen, then head off into the C entry point */ exynos_write_pen_release(-1); /* * Synchronise with the boot thread. */ spin_lock(&boot_lock); spin_unlock(&boot_lock); } int exynos_set_boot_addr(u32 core_id, unsigned long boot_addr) { int ret; /* * Try to set boot address using firmware first * and fall back to boot register if it fails. */ ret = call_firmware_op(set_cpu_boot_addr, core_id, boot_addr); if (ret && ret != -ENOSYS) goto fail; if (ret == -ENOSYS) { void __iomem *boot_reg = cpu_boot_reg(core_id); if (IS_ERR(boot_reg)) { ret = PTR_ERR(boot_reg); goto fail; } writel_relaxed(boot_addr, boot_reg); ret = 0; } fail: return ret; } int exynos_get_boot_addr(u32 core_id, unsigned long *boot_addr) { int ret; /* * Try to get boot address using firmware first * and fall back to boot register if it fails. */ ret = call_firmware_op(get_cpu_boot_addr, core_id, boot_addr); if (ret && ret != -ENOSYS) goto fail; if (ret == -ENOSYS) { void __iomem *boot_reg = cpu_boot_reg(core_id); if (IS_ERR(boot_reg)) { ret = PTR_ERR(boot_reg); goto fail; } *boot_addr = readl_relaxed(boot_reg); ret = 0; } fail: return ret; } static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle) { unsigned long timeout; u32 mpidr = cpu_logical_map(cpu); u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); int ret = -ENOSYS; /* * Set synchronisation state between this boot processor * and the secondary one */ spin_lock(&boot_lock); /* * The secondary processor is waiting to be released from * the holding pen - release it, then wait for it to flag * that it has been released by resetting exynos_pen_release. * * Note that "exynos_pen_release" is the hardware CPU core ID, whereas * "cpu" is Linux's internal ID. */ exynos_write_pen_release(core_id); if (!exynos_cpu_power_state(core_id)) { exynos_cpu_power_up(core_id); timeout = 10; /* wait max 10 ms until cpu1 is on */ while (exynos_cpu_power_state(core_id) != S5P_CORE_LOCAL_PWR_EN) { if (timeout == 0) break; timeout--; mdelay(1); } if (timeout == 0) { printk(KERN_ERR "cpu1 power enable failed"); spin_unlock(&boot_lock); return -ETIMEDOUT; } } exynos_core_restart(core_id); /* * Send the secondary CPU a soft interrupt, thereby causing * the boot monitor to read the system wide flags register, * and branch to the address found there. */ timeout = jiffies + (1 * HZ); while (time_before(jiffies, timeout)) { unsigned long boot_addr; smp_rmb(); boot_addr = __pa_symbol(exynos4_secondary_startup); ret = exynos_set_boot_addr(core_id, boot_addr); if (ret) goto fail; call_firmware_op(cpu_boot, core_id); if (soc_is_exynos3250()) dsb_sev(); else arch_send_wakeup_ipi_mask(cpumask_of(cpu)); if (exynos_pen_release == -1) break; udelay(10); } if (exynos_pen_release != -1) ret = -ETIMEDOUT; /* * now the secondary core is starting up let it run its * calibrations, then wait for it to finish */ fail: spin_unlock(&boot_lock); return exynos_pen_release != -1 ? ret : 0; } static void __init exynos_smp_prepare_cpus(unsigned int max_cpus) { exynos_sysram_init(); exynos_set_delayed_reset_assertion(true); if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9) exynos_scu_enable(); } #ifdef CONFIG_HOTPLUG_CPU /* * platform-specific code to shutdown a CPU * * Called with IRQs disabled */ static void exynos_cpu_die(unsigned int cpu) { int spurious = 0; u32 mpidr = cpu_logical_map(cpu); u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); v7_exit_coherency_flush(louis); platform_do_lowpower(cpu, &spurious); /* * bring this CPU back into the world of cache * coherency, and then restore interrupts */ cpu_leave_lowpower(core_id); if (spurious) pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious); } #endif /* CONFIG_HOTPLUG_CPU */ const struct smp_operations exynos_smp_ops __initconst = { .smp_prepare_cpus = exynos_smp_prepare_cpus, .smp_secondary_init = exynos_secondary_init, .smp_boot_secondary = exynos_boot_secondary, #ifdef CONFIG_HOTPLUG_CPU .cpu_die = exynos_cpu_die, #endif };
linux-master
arch/arm/mach-exynos/platsmp.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/arch/arm/mach-pxa/gumstix.c * * Support for the Gumstix motherboards. * * Original Author: Craig Hughes * Created: Feb 14, 2008 * Copyright: Craig Hughes * * Implemented based on lubbock.c by Nicolas Pitre and code from Craig * Hughes */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <linux/gpio/machine.h> #include <linux/gpio.h> #include <linux/err.h> #include <linux/clk.h> #include <asm/setup.h> #include <asm/page.h> #include <asm/mach-types.h> #include <asm/irq.h> #include <linux/sizes.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/irq.h> #include <asm/mach/flash.h> #include "pxa25x.h" #include <linux/platform_data/mmc-pxamci.h> #include "udc.h" #include "gumstix.h" #include "generic.h" static struct resource flash_resource = { .start = 0x00000000, .end = SZ_64M - 1, .flags = IORESOURCE_MEM, }; static struct mtd_partition gumstix_partitions[] = { { .name = "Bootloader", .size = 0x00040000, .offset = 0, .mask_flags = MTD_WRITEABLE /* force read-only */ } , { .name = "rootfs", .size = MTDPART_SIZ_FULL, .offset = MTDPART_OFS_APPEND } }; static struct flash_platform_data gumstix_flash_data = { .map_name = "cfi_probe", .parts = gumstix_partitions, .nr_parts = ARRAY_SIZE(gumstix_partitions), .width = 2, }; static struct platform_device gumstix_flash_device = { .name = "pxa2xx-flash", .id = 0, .dev = { .platform_data = &gumstix_flash_data, }, .resource = &flash_resource, .num_resources = 1, }; static struct platform_device *devices[] __initdata = { &gumstix_flash_device, }; #ifdef CONFIG_MMC_PXA static struct pxamci_platform_data gumstix_mci_platform_data = { .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34, }; static void __init gumstix_mmc_init(void) { pxa_set_mci_info(&gumstix_mci_platform_data); } #else static void __init gumstix_mmc_init(void) { pr_debug("Gumstix mmc disabled\n"); } #endif #ifdef CONFIG_USB_PXA25X static struct gpiod_lookup_table gumstix_gpio_vbus_gpiod_table = { .dev_id = "gpio-vbus", .table = { GPIO_LOOKUP("gpio-pxa", GPIO_GUMSTIX_USB_GPIOn, "vbus", GPIO_ACTIVE_HIGH), GPIO_LOOKUP("gpio-pxa", GPIO_GUMSTIX_USB_GPIOx, "pullup", GPIO_ACTIVE_HIGH), { }, }, }; static struct platform_device gumstix_gpio_vbus = { .name = "gpio-vbus", .id = -1, }; static void __init gumstix_udc_init(void) { gpiod_add_lookup_table(&gumstix_gpio_vbus_gpiod_table); platform_device_register(&gumstix_gpio_vbus); } #else static void gumstix_udc_init(void) { pr_debug("Gumstix udc is disabled\n"); } #endif #ifdef CONFIG_BT /* Normally, the bootloader would have enabled this 32kHz clock but many ** boards still have u-boot 1.1.4 so we check if it has been turned on and ** if not, we turn it on with a warning message. */ static void gumstix_setup_bt_clock(void) { int timeout = 500; if (!(readl(OSCC) & OSCC_OOK)) pr_warn("32kHz clock was not on. Bootloader may need to be updated\n"); else return; writel(readl(OSCC) | OSCC_OON, OSCC); do { if (readl(OSCC) & OSCC_OOK) break; udelay(1); } while (--timeout); if (!timeout) pr_err("Failed to start 32kHz clock\n"); } static void __init gumstix_bluetooth_init(void) { int err; gumstix_setup_bt_clock(); err = gpio_request(GPIO_GUMSTIX_BTRESET, "BTRST"); if (err) { pr_err("gumstix: failed request gpio for bluetooth reset\n"); return; } err = gpio_direction_output(GPIO_GUMSTIX_BTRESET, 1); if (err) { pr_err("gumstix: can't reset bluetooth\n"); return; } gpio_set_value(GPIO_GUMSTIX_BTRESET, 0); udelay(100); gpio_set_value(GPIO_GUMSTIX_BTRESET, 1); } #else static void gumstix_bluetooth_init(void) { pr_debug("Gumstix Bluetooth is disabled\n"); } #endif static unsigned long gumstix_pin_config[] __initdata = { GPIO12_32KHz, /* BTUART */ GPIO42_HWUART_RXD, GPIO43_HWUART_TXD, GPIO44_HWUART_CTS, GPIO45_HWUART_RTS, /* MMC */ GPIO6_MMC_CLK, GPIO53_MMC_CLK, GPIO8_MMC_CS0, }; int __attribute__((weak)) am200_init(void) { return 0; } int __attribute__((weak)) am300_init(void) { return 0; } static void __init carrier_board_init(void) { /* * put carrier/expansion board init here if * they cannot be detected programatically */ am200_init(); am300_init(); } static void __init gumstix_init(void) { pxa2xx_mfp_config(ARRAY_AND_SIZE(gumstix_pin_config)); pxa_set_ffuart_info(NULL); pxa_set_btuart_info(NULL); pxa_set_stuart_info(NULL); pxa_set_hwuart_info(NULL); gumstix_bluetooth_init(); gumstix_udc_init(); gumstix_mmc_init(); (void) platform_add_devices(devices, ARRAY_SIZE(devices)); carrier_board_init(); } MACHINE_START(GUMSTIX, "Gumstix") .atag_offset = 0x100, /* match u-boot bi_boot_params */ .map_io = pxa25x_map_io, .nr_irqs = PXA_NR_IRQS, .init_irq = pxa25x_init_irq, .init_time = pxa_timer_init, .init_machine = gumstix_init, .restart = pxa_restart, MACHINE_END
linux-master
arch/arm/mach-pxa/gumstix.c
// SPDX-License-Identifier: GPL-2.0-only /* * Battery and Power Management code for the Sharp SL-C7xx and SL-Cxx00 * series of PDAs * * Copyright (c) 2004-2005 Richard Purdie * * Based on code written by Sharp for 2.4 kernels */ #undef DEBUG #include <linux/module.h> #include <linux/kernel.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/apm-emulation.h> #include <linux/timer.h> #include <linux/delay.h> #include <linux/leds.h> #include <linux/suspend.h> #include <linux/gpio.h> #include <linux/io.h> #include <asm/mach-types.h> #include "pm.h" #include "pxa2xx-regs.h" #include "regs-rtc.h" #include "sharpsl_pm.h" /* * Constants */ #define SHARPSL_CHARGE_ON_TIME_INTERVAL (msecs_to_jiffies(1*60*1000)) /* 1 min */ #define SHARPSL_CHARGE_FINISH_TIME (msecs_to_jiffies(10*60*1000)) /* 10 min */ #define SHARPSL_BATCHK_TIME (msecs_to_jiffies(15*1000)) /* 15 sec */ #define SHARPSL_BATCHK_TIME_SUSPEND (60*10) /* 10 min */ #define SHARPSL_WAIT_CO_TIME 15 /* 15 sec */ #define SHARPSL_WAIT_DISCHARGE_ON 100 /* 100 msec */ #define SHARPSL_CHECK_BATTERY_WAIT_TIME_TEMP 10 /* 10 msec */ #define SHARPSL_CHECK_BATTERY_WAIT_TIME_VOLT 10 /* 10 msec */ #define SHARPSL_CHECK_BATTERY_WAIT_TIME_ACIN 10 /* 10 msec */ #define SHARPSL_CHARGE_WAIT_TIME 15 /* 15 msec */ #define SHARPSL_CHARGE_CO_CHECK_TIME 5 /* 5 msec */ #define SHARPSL_CHARGE_RETRY_CNT 1 /* eqv. 10 min */ /* * Prototypes */ #ifdef CONFIG_PM static int sharpsl_off_charge_battery(void); static int sharpsl_check_battery_voltage(void); #endif static int sharpsl_check_battery_temp(void); static int sharpsl_ac_check(void); static int sharpsl_average_value(int ad); static void sharpsl_average_clear(void); static void sharpsl_charge_toggle(struct work_struct *private_); static void sharpsl_battery_thread(struct work_struct *private_); /* * Variables */ struct sharpsl_pm_status sharpsl_pm; static DECLARE_DELAYED_WORK(toggle_charger, sharpsl_charge_toggle); static DECLARE_DELAYED_WORK(sharpsl_bat, sharpsl_battery_thread); DEFINE_LED_TRIGGER(sharpsl_charge_led_trigger); struct battery_thresh sharpsl_battery_levels_acin[] = { { 213, 100}, { 212, 98}, { 211, 95}, { 210, 93}, { 209, 90}, { 208, 88}, { 207, 85}, { 206, 83}, { 205, 80}, { 204, 78}, { 203, 75}, { 202, 73}, { 201, 70}, { 200, 68}, { 199, 65}, { 198, 63}, { 197, 60}, { 196, 58}, { 195, 55}, { 194, 53}, { 193, 50}, { 192, 48}, { 192, 45}, { 191, 43}, { 191, 40}, { 190, 38}, { 190, 35}, { 189, 33}, { 188, 30}, { 187, 28}, { 186, 25}, { 185, 23}, { 184, 20}, { 183, 18}, { 182, 15}, { 181, 13}, { 180, 10}, { 179, 8}, { 178, 5}, { 0, 0}, }; struct battery_thresh sharpsl_battery_levels_noac[] = { { 213, 100}, { 212, 98}, { 211, 95}, { 210, 93}, { 209, 90}, { 208, 88}, { 207, 85}, { 206, 83}, { 205, 80}, { 204, 78}, { 203, 75}, { 202, 73}, { 201, 70}, { 200, 68}, { 199, 65}, { 198, 63}, { 197, 60}, { 196, 58}, { 195, 55}, { 194, 53}, { 193, 50}, { 192, 48}, { 191, 45}, { 190, 43}, { 189, 40}, { 188, 38}, { 187, 35}, { 186, 33}, { 185, 30}, { 184, 28}, { 183, 25}, { 182, 23}, { 181, 20}, { 180, 18}, { 179, 15}, { 178, 13}, { 177, 10}, { 176, 8}, { 175, 5}, { 0, 0}, }; /* MAX1111 Commands */ #define MAXCTRL_PD0 (1u << 0) #define MAXCTRL_PD1 (1u << 1) #define MAXCTRL_SGL (1u << 2) #define MAXCTRL_UNI (1u << 3) #define MAXCTRL_SEL_SH 4 #define MAXCTRL_STR (1u << 7) extern int max1111_read_channel(int); /* * Read MAX1111 ADC */ int sharpsl_pm_pxa_read_max1111(int channel) { /* max1111 accepts channels from 0-3, however, * it is encoded from 0-7 here in the code. */ return max1111_read_channel(channel >> 1); } static int get_percentage(int voltage) { int i = sharpsl_pm.machinfo->bat_levels - 1; int bl_status = sharpsl_pm.machinfo->backlight_get_status ? sharpsl_pm.machinfo->backlight_get_status() : 0; struct battery_thresh *thresh; if (sharpsl_pm.charge_mode == CHRG_ON) thresh = bl_status ? sharpsl_pm.machinfo->bat_levels_acin_bl : sharpsl_pm.machinfo->bat_levels_acin; else thresh = bl_status ? sharpsl_pm.machinfo->bat_levels_noac_bl : sharpsl_pm.machinfo->bat_levels_noac; while (i > 0 && (voltage > thresh[i].voltage)) i--; return thresh[i].percentage; } static int get_apm_status(int voltage) { int low_thresh, high_thresh; if (sharpsl_pm.charge_mode == CHRG_ON) { high_thresh = sharpsl_pm.machinfo->status_high_acin; low_thresh = sharpsl_pm.machinfo->status_low_acin; } else { high_thresh = sharpsl_pm.machinfo->status_high_noac; low_thresh = sharpsl_pm.machinfo->status_low_noac; } if (voltage >= high_thresh) return APM_BATTERY_STATUS_HIGH; if (voltage >= low_thresh) return APM_BATTERY_STATUS_LOW; return APM_BATTERY_STATUS_CRITICAL; } void sharpsl_battery_kick(void) { schedule_delayed_work(&sharpsl_bat, msecs_to_jiffies(125)); } static void sharpsl_battery_thread(struct work_struct *private_) { int voltage, percent, apm_status, i; if (!sharpsl_pm.machinfo) return; sharpsl_pm.battstat.ac_status = (sharpsl_pm.machinfo->read_devdata(SHARPSL_STATUS_ACIN) ? APM_AC_ONLINE : APM_AC_OFFLINE); /* Corgi cannot confirm when battery fully charged so periodically kick! */ if (!sharpsl_pm.machinfo->batfull_irq && (sharpsl_pm.charge_mode == CHRG_ON) && time_after(jiffies, sharpsl_pm.charge_start_time + SHARPSL_CHARGE_ON_TIME_INTERVAL)) schedule_delayed_work(&toggle_charger, 0); for (i = 0; i < 5; i++) { voltage = sharpsl_pm.machinfo->read_devdata(SHARPSL_BATT_VOLT); if (voltage > 0) break; } if (voltage <= 0) { voltage = sharpsl_pm.machinfo->bat_levels_noac[0].voltage; dev_warn(sharpsl_pm.dev, "Warning: Cannot read main battery!\n"); } voltage = sharpsl_average_value(voltage); apm_status = get_apm_status(voltage); percent = get_percentage(voltage); /* At low battery voltages, the voltage has a tendency to start creeping back up so we try to avoid this here */ if ((sharpsl_pm.battstat.ac_status == APM_AC_ONLINE) || (apm_status == APM_BATTERY_STATUS_HIGH) || percent <= sharpsl_pm.battstat.mainbat_percent) { sharpsl_pm.battstat.mainbat_voltage = voltage; sharpsl_pm.battstat.mainbat_status = apm_status; sharpsl_pm.battstat.mainbat_percent = percent; } dev_dbg(sharpsl_pm.dev, "Battery: voltage: %d, status: %d, percentage: %d, time: %ld\n", voltage, sharpsl_pm.battstat.mainbat_status, sharpsl_pm.battstat.mainbat_percent, jiffies); /* Suspend if critical battery level */ if ((sharpsl_pm.battstat.ac_status != APM_AC_ONLINE) && (sharpsl_pm.battstat.mainbat_status == APM_BATTERY_STATUS_CRITICAL) && !(sharpsl_pm.flags & SHARPSL_APM_QUEUED)) { sharpsl_pm.flags |= SHARPSL_APM_QUEUED; dev_err(sharpsl_pm.dev, "Fatal Off\n"); apm_queue_event(APM_CRITICAL_SUSPEND); } schedule_delayed_work(&sharpsl_bat, SHARPSL_BATCHK_TIME); } void sharpsl_pm_led(int val) { if (val == SHARPSL_LED_ERROR) { dev_err(sharpsl_pm.dev, "Charging Error!\n"); } else if (val == SHARPSL_LED_ON) { dev_dbg(sharpsl_pm.dev, "Charge LED On\n"); led_trigger_event(sharpsl_charge_led_trigger, LED_FULL); } else { dev_dbg(sharpsl_pm.dev, "Charge LED Off\n"); led_trigger_event(sharpsl_charge_led_trigger, LED_OFF); } } static void sharpsl_charge_on(void) { dev_dbg(sharpsl_pm.dev, "Turning Charger On\n"); sharpsl_pm.full_count = 0; sharpsl_pm.charge_mode = CHRG_ON; schedule_delayed_work(&toggle_charger, msecs_to_jiffies(250)); schedule_delayed_work(&sharpsl_bat, msecs_to_jiffies(500)); } static void sharpsl_charge_off(void) { dev_dbg(sharpsl_pm.dev, "Turning Charger Off\n"); sharpsl_pm.machinfo->charge(0); sharpsl_pm_led(SHARPSL_LED_OFF); sharpsl_pm.charge_mode = CHRG_OFF; schedule_delayed_work(&sharpsl_bat, 0); } static void sharpsl_charge_error(void) { sharpsl_pm_led(SHARPSL_LED_ERROR); sharpsl_pm.machinfo->charge(0); sharpsl_pm.charge_mode = CHRG_ERROR; } static void sharpsl_charge_toggle(struct work_struct *private_) { dev_dbg(sharpsl_pm.dev, "Toggling Charger at time: %lx\n", jiffies); if (!sharpsl_pm.machinfo->read_devdata(SHARPSL_STATUS_ACIN)) { sharpsl_charge_off(); return; } else if ((sharpsl_check_battery_temp() < 0) || (sharpsl_ac_check() < 0)) { sharpsl_charge_error(); return; } sharpsl_pm_led(SHARPSL_LED_ON); sharpsl_pm.machinfo->charge(0); mdelay(SHARPSL_CHARGE_WAIT_TIME); sharpsl_pm.machinfo->charge(1); sharpsl_pm.charge_start_time = jiffies; } static void sharpsl_ac_timer(struct timer_list *unused) { int acin = sharpsl_pm.machinfo->read_devdata(SHARPSL_STATUS_ACIN); dev_dbg(sharpsl_pm.dev, "AC Status: %d\n", acin); sharpsl_average_clear(); if (acin && (sharpsl_pm.charge_mode != CHRG_ON)) sharpsl_charge_on(); else if (sharpsl_pm.charge_mode == CHRG_ON) sharpsl_charge_off(); schedule_delayed_work(&sharpsl_bat, 0); } static irqreturn_t sharpsl_ac_isr(int irq, void *dev_id) { /* Delay the event slightly to debounce */ /* Must be a smaller delay than the chrg_full_isr below */ mod_timer(&sharpsl_pm.ac_timer, jiffies + msecs_to_jiffies(250)); return IRQ_HANDLED; } static void sharpsl_chrg_full_timer(struct timer_list *unused) { dev_dbg(sharpsl_pm.dev, "Charge Full at time: %lx\n", jiffies); sharpsl_pm.full_count++; if (!sharpsl_pm.machinfo->read_devdata(SHARPSL_STATUS_ACIN)) { dev_dbg(sharpsl_pm.dev, "Charge Full: AC removed - stop charging!\n"); if (sharpsl_pm.charge_mode == CHRG_ON) sharpsl_charge_off(); } else if (sharpsl_pm.full_count < 2) { dev_dbg(sharpsl_pm.dev, "Charge Full: Count too low\n"); schedule_delayed_work(&toggle_charger, 0); } else if (time_after(jiffies, sharpsl_pm.charge_start_time + SHARPSL_CHARGE_FINISH_TIME)) { dev_dbg(sharpsl_pm.dev, "Charge Full: Interrupt generated too slowly - retry.\n"); schedule_delayed_work(&toggle_charger, 0); } else { sharpsl_charge_off(); sharpsl_pm.charge_mode = CHRG_DONE; dev_dbg(sharpsl_pm.dev, "Charge Full: Charging Finished\n"); } } /* Charging Finished Interrupt (Not present on Corgi) */ /* Can trigger at the same time as an AC status change so delay until after that has been processed */ static irqreturn_t sharpsl_chrg_full_isr(int irq, void *dev_id) { if (sharpsl_pm.flags & SHARPSL_SUSPENDED) return IRQ_HANDLED; /* delay until after any ac interrupt */ mod_timer(&sharpsl_pm.chrg_full_timer, jiffies + msecs_to_jiffies(500)); return IRQ_HANDLED; } static irqreturn_t sharpsl_fatal_isr(int irq, void *dev_id) { int is_fatal = 0; if (!sharpsl_pm.machinfo->read_devdata(SHARPSL_STATUS_LOCK)) { dev_err(sharpsl_pm.dev, "Battery now Unlocked! Suspending.\n"); is_fatal = 1; } if (!sharpsl_pm.machinfo->read_devdata(SHARPSL_STATUS_FATAL)) { dev_err(sharpsl_pm.dev, "Fatal Batt Error! Suspending.\n"); is_fatal = 1; } if (!(sharpsl_pm.flags & SHARPSL_APM_QUEUED) && is_fatal) { sharpsl_pm.flags |= SHARPSL_APM_QUEUED; apm_queue_event(APM_CRITICAL_SUSPEND); } return IRQ_HANDLED; } /* * Maintain an average of the last 10 readings */ #define SHARPSL_CNV_VALUE_NUM 10 static int sharpsl_ad_index; static void sharpsl_average_clear(void) { sharpsl_ad_index = 0; } static int sharpsl_average_value(int ad) { int i, ad_val = 0; static int sharpsl_ad[SHARPSL_CNV_VALUE_NUM+1]; if (sharpsl_pm.battstat.mainbat_status != APM_BATTERY_STATUS_HIGH) { sharpsl_ad_index = 0; return ad; } sharpsl_ad[sharpsl_ad_index] = ad; sharpsl_ad_index++; if (sharpsl_ad_index >= SHARPSL_CNV_VALUE_NUM) { for (i = 0; i < (SHARPSL_CNV_VALUE_NUM-1); i++) sharpsl_ad[i] = sharpsl_ad[i+1]; sharpsl_ad_index = SHARPSL_CNV_VALUE_NUM - 1; } for (i = 0; i < sharpsl_ad_index; i++) ad_val += sharpsl_ad[i]; return ad_val / sharpsl_ad_index; } /* * Take an array of 5 integers, remove the maximum and minimum values * and return the average. */ static int get_select_val(int *val) { int i, j, k, temp, sum = 0; /* Find MAX val */ temp = val[0]; j = 0; for (i = 1; i < 5; i++) { if (temp < val[i]) { temp = val[i]; j = i; } } /* Find MIN val */ temp = val[4]; k = 4; for (i = 3; i >= 0; i--) { if (temp > val[i]) { temp = val[i]; k = i; } } for (i = 0; i < 5; i++) if (i != j && i != k) sum += val[i]; dev_dbg(sharpsl_pm.dev, "Average: %d from values: %d, %d, %d, %d, %d\n", sum/3, val[0], val[1], val[2], val[3], val[4]); return sum/3; } static int sharpsl_check_battery_temp(void) { int val, i, buff[5]; /* Check battery temperature */ for (i = 0; i < 5; i++) { mdelay(SHARPSL_CHECK_BATTERY_WAIT_TIME_TEMP); sharpsl_pm.machinfo->measure_temp(1); mdelay(SHARPSL_CHECK_BATTERY_WAIT_TIME_TEMP); buff[i] = sharpsl_pm.machinfo->read_devdata(SHARPSL_BATT_TEMP); sharpsl_pm.machinfo->measure_temp(0); } val = get_select_val(buff); dev_dbg(sharpsl_pm.dev, "Temperature: %d\n", val); if (val > sharpsl_pm.machinfo->charge_on_temp) { printk(KERN_WARNING "Not charging: temperature out of limits.\n"); return -1; } return 0; } #ifdef CONFIG_PM static int sharpsl_check_battery_voltage(void) { int val, i, buff[5]; /* disable charge, enable discharge */ sharpsl_pm.machinfo->charge(0); sharpsl_pm.machinfo->discharge(1); mdelay(SHARPSL_WAIT_DISCHARGE_ON); if (sharpsl_pm.machinfo->discharge1) sharpsl_pm.machinfo->discharge1(1); /* Check battery voltage */ for (i = 0; i < 5; i++) { buff[i] = sharpsl_pm.machinfo->read_devdata(SHARPSL_BATT_VOLT); mdelay(SHARPSL_CHECK_BATTERY_WAIT_TIME_VOLT); } if (sharpsl_pm.machinfo->discharge1) sharpsl_pm.machinfo->discharge1(0); sharpsl_pm.machinfo->discharge(0); val = get_select_val(buff); dev_dbg(sharpsl_pm.dev, "Battery Voltage: %d\n", val); if (val < sharpsl_pm.machinfo->charge_on_volt) return -1; return 0; } #endif static int sharpsl_ac_check(void) { int temp, i, buff[5]; for (i = 0; i < 5; i++) { buff[i] = sharpsl_pm.machinfo->read_devdata(SHARPSL_ACIN_VOLT); mdelay(SHARPSL_CHECK_BATTERY_WAIT_TIME_ACIN); } temp = get_select_val(buff); dev_dbg(sharpsl_pm.dev, "AC Voltage: %d\n", temp); if ((temp > sharpsl_pm.machinfo->charge_acin_high) || (temp < sharpsl_pm.machinfo->charge_acin_low)) { dev_err(sharpsl_pm.dev, "Error: AC check failed: voltage %d.\n", temp); return -1; } return 0; } #ifdef CONFIG_PM static int sharpsl_pm_suspend(struct platform_device *pdev, pm_message_t state) { sharpsl_pm.flags |= SHARPSL_SUSPENDED; flush_delayed_work(&toggle_charger); flush_delayed_work(&sharpsl_bat); if (sharpsl_pm.charge_mode == CHRG_ON) sharpsl_pm.flags |= SHARPSL_DO_OFFLINE_CHRG; else sharpsl_pm.flags &= ~SHARPSL_DO_OFFLINE_CHRG; return 0; } static int sharpsl_pm_resume(struct platform_device *pdev) { /* Clear the reset source indicators as they break the bootloader upon reboot */ RCSR = 0x0f; sharpsl_average_clear(); sharpsl_pm.flags &= ~SHARPSL_APM_QUEUED; sharpsl_pm.flags &= ~SHARPSL_SUSPENDED; return 0; } static void corgi_goto_sleep(unsigned long alarm_time, unsigned int alarm_enable, suspend_state_t state) { dev_dbg(sharpsl_pm.dev, "Time is: %08x\n", RCNR); dev_dbg(sharpsl_pm.dev, "Offline Charge Activate = %d\n", sharpsl_pm.flags & SHARPSL_DO_OFFLINE_CHRG); /* not charging and AC-IN! */ if ((sharpsl_pm.flags & SHARPSL_DO_OFFLINE_CHRG) && (sharpsl_pm.machinfo->read_devdata(SHARPSL_STATUS_ACIN))) { dev_dbg(sharpsl_pm.dev, "Activating Offline Charger...\n"); sharpsl_pm.charge_mode = CHRG_OFF; sharpsl_pm.flags &= ~SHARPSL_DO_OFFLINE_CHRG; sharpsl_off_charge_battery(); } sharpsl_pm.machinfo->presuspend(); PEDR = 0xffffffff; /* clear it */ sharpsl_pm.flags &= ~SHARPSL_ALARM_ACTIVE; if ((sharpsl_pm.charge_mode == CHRG_ON) && ((alarm_enable && ((alarm_time - RCNR) > (SHARPSL_BATCHK_TIME_SUSPEND + 30))) || !alarm_enable)) { RTSR &= RTSR_ALE; RTAR = RCNR + SHARPSL_BATCHK_TIME_SUSPEND; dev_dbg(sharpsl_pm.dev, "Charging alarm at: %08x\n", RTAR); sharpsl_pm.flags |= SHARPSL_ALARM_ACTIVE; } else if (alarm_enable) { RTSR &= RTSR_ALE; RTAR = alarm_time; dev_dbg(sharpsl_pm.dev, "User alarm at: %08x\n", RTAR); } else { dev_dbg(sharpsl_pm.dev, "No alarms set.\n"); } pxa_pm_enter(state); sharpsl_pm.machinfo->postsuspend(); dev_dbg(sharpsl_pm.dev, "Corgi woken up from suspend: %08x\n", PEDR); } static int corgi_enter_suspend(unsigned long alarm_time, unsigned int alarm_enable, suspend_state_t state) { if (!sharpsl_pm.machinfo->should_wakeup(!(sharpsl_pm.flags & SHARPSL_ALARM_ACTIVE) && alarm_enable)) { if (!(sharpsl_pm.flags & SHARPSL_ALARM_ACTIVE)) { dev_dbg(sharpsl_pm.dev, "No user triggered wakeup events and not charging. Strange. Suspend.\n"); corgi_goto_sleep(alarm_time, alarm_enable, state); return 1; } if (sharpsl_off_charge_battery()) { dev_dbg(sharpsl_pm.dev, "Charging. Suspend...\n"); corgi_goto_sleep(alarm_time, alarm_enable, state); return 1; } dev_dbg(sharpsl_pm.dev, "User triggered wakeup in offline charger.\n"); } if ((!sharpsl_pm.machinfo->read_devdata(SHARPSL_STATUS_LOCK)) || (!sharpsl_pm.machinfo->read_devdata(SHARPSL_STATUS_FATAL))) { dev_err(sharpsl_pm.dev, "Fatal condition. Suspend.\n"); corgi_goto_sleep(alarm_time, alarm_enable, state); return 1; } return 0; } static int corgi_pxa_pm_enter(suspend_state_t state) { unsigned long alarm_time = RTAR; unsigned int alarm_status = ((RTSR & RTSR_ALE) != 0); dev_dbg(sharpsl_pm.dev, "SharpSL suspending for first time.\n"); corgi_goto_sleep(alarm_time, alarm_status, state); while (corgi_enter_suspend(alarm_time, alarm_status, state)) {} if (sharpsl_pm.machinfo->earlyresume) sharpsl_pm.machinfo->earlyresume(); dev_dbg(sharpsl_pm.dev, "SharpSL resuming...\n"); return 0; } static int sharpsl_off_charge_error(void) { dev_err(sharpsl_pm.dev, "Offline Charger: Error occurred.\n"); sharpsl_pm.machinfo->charge(0); sharpsl_pm_led(SHARPSL_LED_ERROR); sharpsl_pm.charge_mode = CHRG_ERROR; return 1; } /* * Charging Control while suspended * Return 1 - go straight to sleep * Return 0 - sleep or wakeup depending on other factors */ static int sharpsl_off_charge_battery(void) { int time; dev_dbg(sharpsl_pm.dev, "Charge Mode: %d\n", sharpsl_pm.charge_mode); if (sharpsl_pm.charge_mode == CHRG_OFF) { dev_dbg(sharpsl_pm.dev, "Offline Charger: Step 1\n"); /* AC Check */ if ((sharpsl_ac_check() < 0) || (sharpsl_check_battery_temp() < 0)) return sharpsl_off_charge_error(); /* Start Charging */ sharpsl_pm_led(SHARPSL_LED_ON); sharpsl_pm.machinfo->charge(0); mdelay(SHARPSL_CHARGE_WAIT_TIME); sharpsl_pm.machinfo->charge(1); sharpsl_pm.charge_mode = CHRG_ON; sharpsl_pm.full_count = 0; return 1; } else if (sharpsl_pm.charge_mode != CHRG_ON) { return 1; } if (sharpsl_pm.full_count == 0) { int time; dev_dbg(sharpsl_pm.dev, "Offline Charger: Step 2\n"); if ((sharpsl_check_battery_temp() < 0) || (sharpsl_check_battery_voltage() < 0)) return sharpsl_off_charge_error(); sharpsl_pm.machinfo->charge(0); mdelay(SHARPSL_CHARGE_WAIT_TIME); sharpsl_pm.machinfo->charge(1); sharpsl_pm.charge_mode = CHRG_ON; mdelay(SHARPSL_CHARGE_CO_CHECK_TIME); time = RCNR; while (1) { /* Check if any wakeup event had occurred */ if (sharpsl_pm.machinfo->charger_wakeup()) return 0; /* Check for timeout */ if ((RCNR - time) > SHARPSL_WAIT_CO_TIME) return 1; if (sharpsl_pm.machinfo->read_devdata(SHARPSL_STATUS_CHRGFULL)) { dev_dbg(sharpsl_pm.dev, "Offline Charger: Charge full occurred. Retrying to check\n"); sharpsl_pm.full_count++; sharpsl_pm.machinfo->charge(0); mdelay(SHARPSL_CHARGE_WAIT_TIME); sharpsl_pm.machinfo->charge(1); return 1; } } } dev_dbg(sharpsl_pm.dev, "Offline Charger: Step 3\n"); mdelay(SHARPSL_CHARGE_CO_CHECK_TIME); time = RCNR; while (1) { /* Check if any wakeup event had occurred */ if (sharpsl_pm.machinfo->charger_wakeup()) return 0; /* Check for timeout */ if ((RCNR-time) > SHARPSL_WAIT_CO_TIME) { if (sharpsl_pm.full_count > SHARPSL_CHARGE_RETRY_CNT) { dev_dbg(sharpsl_pm.dev, "Offline Charger: Not charged sufficiently. Retrying.\n"); sharpsl_pm.full_count = 0; } sharpsl_pm.full_count++; return 1; } if (sharpsl_pm.machinfo->read_devdata(SHARPSL_STATUS_CHRGFULL)) { dev_dbg(sharpsl_pm.dev, "Offline Charger: Charging complete.\n"); sharpsl_pm_led(SHARPSL_LED_OFF); sharpsl_pm.machinfo->charge(0); sharpsl_pm.charge_mode = CHRG_DONE; return 1; } } } #else #define sharpsl_pm_suspend NULL #define sharpsl_pm_resume NULL #endif static ssize_t battery_percentage_show(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "%d\n", sharpsl_pm.battstat.mainbat_percent); } static ssize_t battery_voltage_show(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "%d\n", sharpsl_pm.battstat.mainbat_voltage); } static DEVICE_ATTR_RO(battery_percentage); static DEVICE_ATTR_RO(battery_voltage); extern void (*apm_get_power_status)(struct apm_power_info *); static void sharpsl_apm_get_power_status(struct apm_power_info *info) { info->ac_line_status = sharpsl_pm.battstat.ac_status; if (sharpsl_pm.charge_mode == CHRG_ON) info->battery_status = APM_BATTERY_STATUS_CHARGING; else info->battery_status = sharpsl_pm.battstat.mainbat_status; info->battery_flag = (1 << info->battery_status); info->battery_life = sharpsl_pm.battstat.mainbat_percent; } #ifdef CONFIG_PM static const struct platform_suspend_ops sharpsl_pm_ops = { .prepare = pxa_pm_prepare, .finish = pxa_pm_finish, .enter = corgi_pxa_pm_enter, .valid = suspend_valid_only_mem, }; #endif static int sharpsl_pm_probe(struct platform_device *pdev) { int ret, irq; if (!pdev->dev.platform_data) return -EINVAL; sharpsl_pm.dev = &pdev->dev; sharpsl_pm.machinfo = pdev->dev.platform_data; sharpsl_pm.charge_mode = CHRG_OFF; sharpsl_pm.flags = 0; timer_setup(&sharpsl_pm.ac_timer, sharpsl_ac_timer, 0); timer_setup(&sharpsl_pm.chrg_full_timer, sharpsl_chrg_full_timer, 0); led_trigger_register_simple("sharpsl-charge", &sharpsl_charge_led_trigger); sharpsl_pm.machinfo->init(); gpio_request(sharpsl_pm.machinfo->gpio_acin, "AC IN"); gpio_direction_input(sharpsl_pm.machinfo->gpio_acin); gpio_request(sharpsl_pm.machinfo->gpio_batfull, "Battery Full"); gpio_direction_input(sharpsl_pm.machinfo->gpio_batfull); gpio_request(sharpsl_pm.machinfo->gpio_batlock, "Battery Lock"); gpio_direction_input(sharpsl_pm.machinfo->gpio_batlock); /* Register interrupt handlers */ irq = gpio_to_irq(sharpsl_pm.machinfo->gpio_acin); if (request_irq(irq, sharpsl_ac_isr, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, "AC Input Detect", sharpsl_ac_isr)) { dev_err(sharpsl_pm.dev, "Could not get irq %d.\n", irq); } irq = gpio_to_irq(sharpsl_pm.machinfo->gpio_batlock); if (request_irq(irq, sharpsl_fatal_isr, IRQF_TRIGGER_FALLING, "Battery Cover", sharpsl_fatal_isr)) { dev_err(sharpsl_pm.dev, "Could not get irq %d.\n", irq); } if (sharpsl_pm.machinfo->gpio_fatal) { irq = gpio_to_irq(sharpsl_pm.machinfo->gpio_fatal); if (request_irq(irq, sharpsl_fatal_isr, IRQF_TRIGGER_FALLING, "Fatal Battery", sharpsl_fatal_isr)) { dev_err(sharpsl_pm.dev, "Could not get irq %d.\n", irq); } } if (sharpsl_pm.machinfo->batfull_irq) { /* Register interrupt handler. */ irq = gpio_to_irq(sharpsl_pm.machinfo->gpio_batfull); if (request_irq(irq, sharpsl_chrg_full_isr, IRQF_TRIGGER_RISING, "CO", sharpsl_chrg_full_isr)) { dev_err(sharpsl_pm.dev, "Could not get irq %d.\n", irq); } } ret = device_create_file(&pdev->dev, &dev_attr_battery_percentage); ret |= device_create_file(&pdev->dev, &dev_attr_battery_voltage); if (ret != 0) dev_warn(&pdev->dev, "Failed to register attributes (%d)\n", ret); apm_get_power_status = sharpsl_apm_get_power_status; #ifdef CONFIG_PM suspend_set_ops(&sharpsl_pm_ops); #endif mod_timer(&sharpsl_pm.ac_timer, jiffies + msecs_to_jiffies(250)); return 0; } static void sharpsl_pm_remove(struct platform_device *pdev) { suspend_set_ops(NULL); device_remove_file(&pdev->dev, &dev_attr_battery_percentage); device_remove_file(&pdev->dev, &dev_attr_battery_voltage); led_trigger_unregister_simple(sharpsl_charge_led_trigger); free_irq(gpio_to_irq(sharpsl_pm.machinfo->gpio_acin), sharpsl_ac_isr); free_irq(gpio_to_irq(sharpsl_pm.machinfo->gpio_batlock), sharpsl_fatal_isr); if (sharpsl_pm.machinfo->gpio_fatal) free_irq(gpio_to_irq(sharpsl_pm.machinfo->gpio_fatal), sharpsl_fatal_isr); if (sharpsl_pm.machinfo->batfull_irq) free_irq(gpio_to_irq(sharpsl_pm.machinfo->gpio_batfull), sharpsl_chrg_full_isr); gpio_free(sharpsl_pm.machinfo->gpio_batlock); gpio_free(sharpsl_pm.machinfo->gpio_batfull); gpio_free(sharpsl_pm.machinfo->gpio_acin); if (sharpsl_pm.machinfo->exit) sharpsl_pm.machinfo->exit(); del_timer_sync(&sharpsl_pm.chrg_full_timer); del_timer_sync(&sharpsl_pm.ac_timer); } static struct platform_driver sharpsl_pm_driver = { .probe = sharpsl_pm_probe, .remove_new = sharpsl_pm_remove, .suspend = sharpsl_pm_suspend, .resume = sharpsl_pm_resume, .driver = { .name = "sharpsl-pm", }, }; static int sharpsl_pm_init(void) { return platform_driver_register(&sharpsl_pm_driver); } static void sharpsl_pm_exit(void) { platform_driver_unregister(&sharpsl_pm_driver); } late_initcall(sharpsl_pm_init); module_exit(sharpsl_pm_exit);
linux-master
arch/arm/mach-pxa/sharpsl_pm.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/arch/arm/mach-pxa/irq.c * * Generic PXA IRQ handling * * Author: Nicolas Pitre * Created: Jun 15, 2001 * Copyright: MontaVista Software Inc. */ #include <linux/bitops.h> #include <linux/init.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/syscore_ops.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/soc/pxa/cpu.h> #include <asm/exception.h> #include "irqs.h" #include "generic.h" #include "pxa-regs.h" #define ICIP (0x000) #define ICMR (0x004) #define ICLR (0x008) #define ICFR (0x00c) #define ICPR (0x010) #define ICCR (0x014) #define ICHP (0x018) #define IPR(i) (((i) < 32) ? (0x01c + ((i) << 2)) : \ ((i) < 64) ? (0x0b0 + (((i) - 32) << 2)) : \ (0x144 + (((i) - 64) << 2))) #define ICHP_VAL_IRQ (1 << 31) #define ICHP_IRQ(i) (((i) >> 16) & 0x7fff) #define IPR_VALID (1 << 31) #define MAX_INTERNAL_IRQS 128 /* * This is for peripheral IRQs internal to the PXA chip. */ static void __iomem *pxa_irq_base; static int pxa_internal_irq_nr; static bool cpu_has_ipr; static struct irq_domain *pxa_irq_domain; static inline void __iomem *irq_base(int i) { static unsigned long phys_base_offset[] = { 0x0, 0x9c, 0x130, }; return pxa_irq_base + phys_base_offset[i]; } void pxa_mask_irq(struct irq_data *d) { void __iomem *base = irq_data_get_irq_chip_data(d); irq_hw_number_t irq = irqd_to_hwirq(d); uint32_t icmr = __raw_readl(base + ICMR); icmr &= ~BIT(irq & 0x1f); __raw_writel(icmr, base + ICMR); } void pxa_unmask_irq(struct irq_data *d) { void __iomem *base = irq_data_get_irq_chip_data(d); irq_hw_number_t irq = irqd_to_hwirq(d); uint32_t icmr = __raw_readl(base + ICMR); icmr |= BIT(irq & 0x1f); __raw_writel(icmr, base + ICMR); } static struct irq_chip pxa_internal_irq_chip = { .name = "SC", .irq_ack = pxa_mask_irq, .irq_mask = pxa_mask_irq, .irq_unmask = pxa_unmask_irq, }; asmlinkage void __exception_irq_entry icip_handle_irq(struct pt_regs *regs) { uint32_t icip, icmr, mask; do { icip = __raw_readl(pxa_irq_base + ICIP); icmr = __raw_readl(pxa_irq_base + ICMR); mask = icip & icmr; if (mask == 0) break; handle_IRQ(PXA_IRQ(fls(mask) - 1), regs); } while (1); } asmlinkage void __exception_irq_entry ichp_handle_irq(struct pt_regs *regs) { uint32_t ichp; do { __asm__ __volatile__("mrc p6, 0, %0, c5, c0, 0\n": "=r"(ichp)); if ((ichp & ICHP_VAL_IRQ) == 0) break; handle_IRQ(PXA_IRQ(ICHP_IRQ(ichp)), regs); } while (1); } static int pxa_irq_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { void __iomem *base = irq_base(hw / 32); /* initialize interrupt priority */ if (cpu_has_ipr) __raw_writel(hw | IPR_VALID, pxa_irq_base + IPR(hw)); irq_set_chip_and_handler(virq, &pxa_internal_irq_chip, handle_level_irq); irq_set_chip_data(virq, base); return 0; } static const struct irq_domain_ops pxa_irq_ops = { .map = pxa_irq_map, .xlate = irq_domain_xlate_onecell, }; static __init void pxa_init_irq_common(struct device_node *node, int irq_nr, int (*fn)(struct irq_data *, unsigned int)) { int n; pxa_internal_irq_nr = irq_nr; pxa_irq_domain = irq_domain_add_legacy(node, irq_nr, PXA_IRQ(0), 0, &pxa_irq_ops, NULL); if (!pxa_irq_domain) panic("Unable to add PXA IRQ domain\n"); irq_set_default_host(pxa_irq_domain); for (n = 0; n < irq_nr; n += 32) { void __iomem *base = irq_base(n >> 5); __raw_writel(0, base + ICMR); /* disable all IRQs */ __raw_writel(0, base + ICLR); /* all IRQs are IRQ, not FIQ */ } /* only unmasked interrupts kick us out of idle */ __raw_writel(1, irq_base(0) + ICCR); pxa_internal_irq_chip.irq_set_wake = fn; } void __init pxa_init_irq(int irq_nr, int (*fn)(struct irq_data *, unsigned int)) { BUG_ON(irq_nr > MAX_INTERNAL_IRQS); pxa_irq_base = io_p2v(0x40d00000); cpu_has_ipr = !cpu_is_pxa25x(); pxa_init_irq_common(NULL, irq_nr, fn); } #ifdef CONFIG_PM static unsigned long saved_icmr[MAX_INTERNAL_IRQS/32]; static unsigned long saved_ipr[MAX_INTERNAL_IRQS]; static int pxa_irq_suspend(void) { int i; for (i = 0; i < DIV_ROUND_UP(pxa_internal_irq_nr, 32); i++) { void __iomem *base = irq_base(i); saved_icmr[i] = __raw_readl(base + ICMR); __raw_writel(0, base + ICMR); } if (cpu_has_ipr) { for (i = 0; i < pxa_internal_irq_nr; i++) saved_ipr[i] = __raw_readl(pxa_irq_base + IPR(i)); } return 0; } static void pxa_irq_resume(void) { int i; for (i = 0; i < DIV_ROUND_UP(pxa_internal_irq_nr, 32); i++) { void __iomem *base = irq_base(i); __raw_writel(saved_icmr[i], base + ICMR); __raw_writel(0, base + ICLR); } if (cpu_has_ipr) for (i = 0; i < pxa_internal_irq_nr; i++) __raw_writel(saved_ipr[i], pxa_irq_base + IPR(i)); __raw_writel(1, pxa_irq_base + ICCR); } #else #define pxa_irq_suspend NULL #define pxa_irq_resume NULL #endif struct syscore_ops pxa_irq_syscore_ops = { .suspend = pxa_irq_suspend, .resume = pxa_irq_resume, }; #ifdef CONFIG_OF static const struct of_device_id intc_ids[] __initconst = { { .compatible = "marvell,pxa-intc", }, {} }; void __init pxa_dt_irq_init(int (*fn)(struct irq_data *, unsigned int)) { struct device_node *node; struct resource res; int ret; node = of_find_matching_node(NULL, intc_ids); if (!node) { pr_err("Failed to find interrupt controller in arch-pxa\n"); return; } ret = of_property_read_u32(node, "marvell,intc-nr-irqs", &pxa_internal_irq_nr); if (ret) { pr_err("Not found marvell,intc-nr-irqs property\n"); return; } ret = of_address_to_resource(node, 0, &res); if (ret < 0) { pr_err("No registers defined for node\n"); return; } pxa_irq_base = io_p2v(res.start); cpu_has_ipr = of_property_read_bool(node, "marvell,intc-priority"); ret = irq_alloc_descs(-1, 0, pxa_internal_irq_nr, 0); if (ret < 0) { pr_err("Failed to allocate IRQ numbers\n"); return; } pxa_init_irq_common(node, pxa_internal_irq_nr, fn); } #endif /* CONFIG_OF */
linux-master
arch/arm/mach-pxa/irq.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/arch/arm/mach-pxa/pxa300.c * * Code specific to PXA300/PXA310 * * Copyright (C) 2007 Marvell Internation Ltd. * * 2007-08-21: eric miao <[email protected]> * initial version */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/soc/pxa/cpu.h> #include "pxa300.h" #include "generic.h" #include "devices.h" static struct mfp_addr_map pxa300_mfp_addr_map[] __initdata = { MFP_ADDR_X(GPIO0, GPIO2, 0x00b4), MFP_ADDR_X(GPIO3, GPIO26, 0x027c), MFP_ADDR_X(GPIO27, GPIO98, 0x0400), MFP_ADDR_X(GPIO99, GPIO127, 0x0600), MFP_ADDR_X(GPIO0_2, GPIO1_2, 0x0674), MFP_ADDR_X(GPIO2_2, GPIO6_2, 0x02dc), MFP_ADDR(nBE0, 0x0204), MFP_ADDR(nBE1, 0x0208), MFP_ADDR(nLUA, 0x0244), MFP_ADDR(nLLA, 0x0254), MFP_ADDR(DF_CLE_nOE, 0x0240), MFP_ADDR(DF_nRE_nOE, 0x0200), MFP_ADDR(DF_ALE_nWE, 0x020C), MFP_ADDR(DF_INT_RnB, 0x00C8), MFP_ADDR(DF_nCS0, 0x0248), MFP_ADDR(DF_nCS1, 0x0278), MFP_ADDR(DF_nWE, 0x00CC), MFP_ADDR(DF_ADDR0, 0x0210), MFP_ADDR(DF_ADDR1, 0x0214), MFP_ADDR(DF_ADDR2, 0x0218), MFP_ADDR(DF_ADDR3, 0x021C), MFP_ADDR(DF_IO0, 0x0220), MFP_ADDR(DF_IO1, 0x0228), MFP_ADDR(DF_IO2, 0x0230), MFP_ADDR(DF_IO3, 0x0238), MFP_ADDR(DF_IO4, 0x0258), MFP_ADDR(DF_IO5, 0x0260), MFP_ADDR(DF_IO6, 0x0268), MFP_ADDR(DF_IO7, 0x0270), MFP_ADDR(DF_IO8, 0x0224), MFP_ADDR(DF_IO9, 0x022C), MFP_ADDR(DF_IO10, 0x0234), MFP_ADDR(DF_IO11, 0x023C), MFP_ADDR(DF_IO12, 0x025C), MFP_ADDR(DF_IO13, 0x0264), MFP_ADDR(DF_IO14, 0x026C), MFP_ADDR(DF_IO15, 0x0274), MFP_ADDR_END, }; /* override pxa300 MFP register addresses */ static struct mfp_addr_map pxa310_mfp_addr_map[] __initdata = { MFP_ADDR_X(GPIO30, GPIO98, 0x0418), MFP_ADDR_X(GPIO7_2, GPIO12_2, 0x052C), MFP_ADDR(ULPI_STP, 0x040C), MFP_ADDR(ULPI_NXT, 0x0410), MFP_ADDR(ULPI_DIR, 0x0414), MFP_ADDR_END, }; static int __init pxa300_init(void) { if (cpu_is_pxa300() || cpu_is_pxa310()) { mfp_init_base(io_p2v(MFPR_BASE)); mfp_init_addr(pxa300_mfp_addr_map); } if (cpu_is_pxa310()) mfp_init_addr(pxa310_mfp_addr_map); return 0; } core_initcall(pxa300_init);
linux-master
arch/arm/mach-pxa/pxa300.c
/* * am200epd.c -- Platform device for AM200 EPD kit * * Copyright (C) 2008, Jaya Kumar * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. * * Layout is based on skeletonfb.c by James Simmons and Geert Uytterhoeven. * * This work was made possible by help and equipment support from E-Ink * Corporation. http://support.eink.com/community * * This driver is written to be used with the Metronome display controller. * on the AM200 EPD prototype kit/development kit with an E-Ink 800x600 * Vizplex EPD on a Gumstix board using the Lyre interface board. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/fb.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/irq.h> #include <linux/gpio.h> #include "pxa25x.h" #include "gumstix.h" #include <linux/platform_data/video-pxafb.h> #include "generic.h" #include <video/metronomefb.h> static unsigned int panel_type = 6; static struct platform_device *am200_device; static struct metronome_board am200_board; static struct pxafb_mode_info am200_fb_mode_9inch7 = { .pixclock = 40000, .xres = 1200, .yres = 842, .bpp = 16, .hsync_len = 2, .left_margin = 2, .right_margin = 2, .vsync_len = 1, .upper_margin = 2, .lower_margin = 25, .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, }; static struct pxafb_mode_info am200_fb_mode_8inch = { .pixclock = 40000, .xres = 1088, .yres = 791, .bpp = 16, .hsync_len = 28, .left_margin = 8, .right_margin = 30, .vsync_len = 8, .upper_margin = 10, .lower_margin = 8, .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, }; static struct pxafb_mode_info am200_fb_mode_6inch = { .pixclock = 40189, .xres = 832, .yres = 622, .bpp = 16, .hsync_len = 28, .left_margin = 34, .right_margin = 34, .vsync_len = 25, .upper_margin = 0, .lower_margin = 2, .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, }; static struct pxafb_mach_info am200_fb_info = { .modes = &am200_fb_mode_6inch, .num_modes = 1, .lcd_conn = LCD_TYPE_COLOR_TFT | LCD_PCLK_EDGE_FALL | LCD_AC_BIAS_FREQ(24), }; /* register offsets for gpio control */ #define LED_GPIO_PIN 51 #define STDBY_GPIO_PIN 48 #define RST_GPIO_PIN 49 #define RDY_GPIO_PIN 32 #define ERR_GPIO_PIN 17 #define PCBPWR_GPIO_PIN 16 static int gpios[] = { LED_GPIO_PIN , STDBY_GPIO_PIN , RST_GPIO_PIN, RDY_GPIO_PIN, ERR_GPIO_PIN, PCBPWR_GPIO_PIN }; static char *gpio_names[] = { "LED" , "STDBY" , "RST", "RDY", "ERR", "PCBPWR" }; static int am200_init_gpio_regs(struct metronomefb_par *par) { int i; int err; for (i = 0; i < ARRAY_SIZE(gpios); i++) { err = gpio_request(gpios[i], gpio_names[i]); if (err) { dev_err(&am200_device->dev, "failed requesting " "gpio %s, err=%d\n", gpio_names[i], err); goto err_req_gpio; } } gpio_direction_output(LED_GPIO_PIN, 0); gpio_direction_output(STDBY_GPIO_PIN, 0); gpio_direction_output(RST_GPIO_PIN, 0); gpio_direction_input(RDY_GPIO_PIN); gpio_direction_input(ERR_GPIO_PIN); gpio_direction_output(PCBPWR_GPIO_PIN, 0); return 0; err_req_gpio: while (--i >= 0) gpio_free(gpios[i]); return err; } static void am200_cleanup(struct metronomefb_par *par) { int i; free_irq(PXA_GPIO_TO_IRQ(RDY_GPIO_PIN), par); for (i = 0; i < ARRAY_SIZE(gpios); i++) gpio_free(gpios[i]); } static int am200_share_video_mem(struct fb_info *info) { /* rough check if this is our desired fb and not something else */ if ((info->var.xres != am200_fb_info.modes->xres) || (info->var.yres != am200_fb_info.modes->yres)) return 0; /* we've now been notified that we have our new fb */ am200_board.metromem = info->screen_base; am200_board.host_fbinfo = info; /* try to refcount host drv since we are the consumer after this */ if (!try_module_get(info->fbops->owner)) return -ENODEV; return 0; } static int am200_unshare_video_mem(struct fb_info *info) { dev_dbg(&am200_device->dev, "ENTER %s\n", __func__); if (info != am200_board.host_fbinfo) return 0; module_put(am200_board.host_fbinfo->fbops->owner); return 0; } static int am200_fb_notifier_callback(struct notifier_block *self, unsigned long event, void *data) { struct fb_event *evdata = data; struct fb_info *info = evdata->info; dev_dbg(&am200_device->dev, "ENTER %s\n", __func__); if (event == FB_EVENT_FB_REGISTERED) return am200_share_video_mem(info); else if (event == FB_EVENT_FB_UNREGISTERED) return am200_unshare_video_mem(info); return 0; } static struct notifier_block am200_fb_notif = { .notifier_call = am200_fb_notifier_callback, }; /* this gets called as part of our init. these steps must be done now so * that we can use pxa_set_fb_info */ static void __init am200_presetup_fb(void) { int fw; int fh; int padding_size; int totalsize; switch (panel_type) { case 6: am200_fb_info.modes = &am200_fb_mode_6inch; break; case 8: am200_fb_info.modes = &am200_fb_mode_8inch; break; case 97: am200_fb_info.modes = &am200_fb_mode_9inch7; break; default: dev_err(&am200_device->dev, "invalid panel_type selection," " setting to 6\n"); am200_fb_info.modes = &am200_fb_mode_6inch; break; } /* the frame buffer is divided as follows: command | CRC | padding 16kb waveform data | CRC | padding image data | CRC */ fw = am200_fb_info.modes->xres; fh = am200_fb_info.modes->yres; /* waveform must be 16k + 2 for checksum */ am200_board.wfm_size = roundup(16*1024 + 2, fw); padding_size = PAGE_SIZE + (4 * fw); /* total is 1 cmd , 1 wfm, padding and image */ totalsize = fw + am200_board.wfm_size + padding_size + (fw*fh); /* save this off because we're manipulating fw after this and * we'll need it when we're ready to setup the framebuffer */ am200_board.fw = fw; am200_board.fh = fh; /* the reason we do this adjustment is because we want to acquire * more framebuffer memory without imposing custom awareness on the * underlying pxafb driver */ am200_fb_info.modes->yres = DIV_ROUND_UP(totalsize, fw); /* we divide since we told the LCD controller we're 16bpp */ am200_fb_info.modes->xres /= 2; pxa_set_fb_info(NULL, &am200_fb_info); } /* this gets called by metronomefb as part of its init, in our case, we * have already completed initial framebuffer init in presetup_fb so we * can just setup the fb access pointers */ static int am200_setup_fb(struct metronomefb_par *par) { int fw; int fh; fw = am200_board.fw; fh = am200_board.fh; /* metromem was set up by the notifier in share_video_mem so now * we can use its value to calculate the other entries */ par->metromem_cmd = (struct metromem_cmd *) am200_board.metromem; par->metromem_wfm = am200_board.metromem + fw; par->metromem_img = par->metromem_wfm + am200_board.wfm_size; par->metromem_img_csum = (u16 *) (par->metromem_img + (fw * fh)); par->metromem_dma = am200_board.host_fbinfo->fix.smem_start; return 0; } static int am200_get_panel_type(void) { return panel_type; } static irqreturn_t am200_handle_irq(int irq, void *dev_id) { struct metronomefb_par *par = dev_id; wake_up_interruptible(&par->waitq); return IRQ_HANDLED; } static int am200_setup_irq(struct fb_info *info) { int ret; ret = request_irq(PXA_GPIO_TO_IRQ(RDY_GPIO_PIN), am200_handle_irq, IRQF_TRIGGER_FALLING, "AM200", info->par); if (ret) dev_err(&am200_device->dev, "request_irq failed: %d\n", ret); return ret; } static void am200_set_rst(struct metronomefb_par *par, int state) { gpio_set_value(RST_GPIO_PIN, state); } static void am200_set_stdby(struct metronomefb_par *par, int state) { gpio_set_value(STDBY_GPIO_PIN, state); } static int am200_wait_event(struct metronomefb_par *par) { return wait_event_timeout(par->waitq, gpio_get_value(RDY_GPIO_PIN), HZ); } static int am200_wait_event_intr(struct metronomefb_par *par) { return wait_event_interruptible_timeout(par->waitq, gpio_get_value(RDY_GPIO_PIN), HZ); } static struct metronome_board am200_board = { .owner = THIS_MODULE, .setup_irq = am200_setup_irq, .setup_io = am200_init_gpio_regs, .setup_fb = am200_setup_fb, .set_rst = am200_set_rst, .set_stdby = am200_set_stdby, .met_wait_event = am200_wait_event, .met_wait_event_intr = am200_wait_event_intr, .get_panel_type = am200_get_panel_type, .cleanup = am200_cleanup, }; static unsigned long am200_pin_config[] __initdata = { GPIO51_GPIO, GPIO49_GPIO, GPIO48_GPIO, GPIO32_GPIO, GPIO17_GPIO, GPIO16_GPIO, }; int __init am200_init(void) { int ret; /* * Before anything else, we request notification for any fb * creation events. * * FIXME: This is terrible and needs to be nuked. The notifier is used * to get at the fb base address from the boot splash fb driver, which * is then passed to metronomefb. Instaed of metronomfb or this board * support file here figuring this out on their own. * * See also the #ifdef in fbmem.c. */ fb_register_client(&am200_fb_notif); pxa2xx_mfp_config(ARRAY_AND_SIZE(am200_pin_config)); /* request our platform independent driver */ request_module("metronomefb"); am200_device = platform_device_alloc("metronomefb", -1); if (!am200_device) return -ENOMEM; /* the am200_board that will be seen by metronomefb is a copy */ platform_device_add_data(am200_device, &am200_board, sizeof(am200_board)); /* this _add binds metronomefb to am200. metronomefb refcounts am200 */ ret = platform_device_add(am200_device); if (ret) { platform_device_put(am200_device); fb_unregister_client(&am200_fb_notif); return ret; } am200_presetup_fb(); return 0; } module_param(panel_type, uint, 0); MODULE_PARM_DESC(panel_type, "Select the panel type: 6, 8, 97"); MODULE_DESCRIPTION("board driver for am200 metronome epd kit"); MODULE_AUTHOR("Jaya Kumar"); MODULE_LICENSE("GPL");
linux-master
arch/arm/mach-pxa/am200epd.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/arch/arm/mach-pxa/pxa25x.c * * Author: Nicolas Pitre * Created: Jun 15, 2001 * Copyright: MontaVista Software Inc. * * Code specific to PXA21x/25x/26x variants. * * Since this file should be linked before any other machine specific file, * the __initcall() here will be executed first. This serves as default * initialization stuff for PXA machines which can be overridden later if * need be. */ #include <linux/dmaengine.h> #include <linux/dma/pxa-dma.h> #include <linux/gpio.h> #include <linux/gpio-pxa.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/suspend.h> #include <linux/syscore_ops.h> #include <linux/irq.h> #include <linux/irqchip.h> #include <linux/platform_data/mmp_dma.h> #include <linux/soc/pxa/cpu.h> #include <linux/soc/pxa/smemc.h> #include <asm/mach/map.h> #include <asm/suspend.h> #include "irqs.h" #include "pxa25x.h" #include "reset.h" #include "pm.h" #include "addr-map.h" #include "smemc.h" #include "generic.h" #include "devices.h" /* * Various clock factors driven by the CCCR register. */ #ifdef CONFIG_PM #define SAVE(x) sleep_save[SLEEP_SAVE_##x] = x #define RESTORE(x) x = sleep_save[SLEEP_SAVE_##x] /* * List of global PXA peripheral registers to preserve. * More ones like CP and general purpose register values are preserved * with the stack pointer in sleep.S. */ enum { SLEEP_SAVE_PSTR, SLEEP_SAVE_COUNT }; static void pxa25x_cpu_pm_save(unsigned long *sleep_save) { SAVE(PSTR); } static void pxa25x_cpu_pm_restore(unsigned long *sleep_save) { RESTORE(PSTR); } static void pxa25x_cpu_pm_enter(suspend_state_t state) { /* Clear reset status */ RCSR = RCSR_HWR | RCSR_WDR | RCSR_SMR | RCSR_GPR; switch (state) { case PM_SUSPEND_MEM: cpu_suspend(PWRMODE_SLEEP, pxa25x_finish_suspend); break; } } static int pxa25x_cpu_pm_prepare(void) { /* set resume return address */ PSPR = __pa_symbol(cpu_resume); return 0; } static void pxa25x_cpu_pm_finish(void) { /* ensure not to come back here if it wasn't intended */ PSPR = 0; } static struct pxa_cpu_pm_fns pxa25x_cpu_pm_fns = { .save_count = SLEEP_SAVE_COUNT, .valid = suspend_valid_only_mem, .save = pxa25x_cpu_pm_save, .restore = pxa25x_cpu_pm_restore, .enter = pxa25x_cpu_pm_enter, .prepare = pxa25x_cpu_pm_prepare, .finish = pxa25x_cpu_pm_finish, }; static void __init pxa25x_init_pm(void) { pxa_cpu_pm_fns = &pxa25x_cpu_pm_fns; } #else static inline void pxa25x_init_pm(void) {} #endif /* PXA25x: supports wakeup from GPIO0..GPIO15 and RTC alarm */ static int pxa25x_set_wake(struct irq_data *d, unsigned int on) { int gpio = pxa_irq_to_gpio(d->irq); uint32_t mask = 0; if (gpio >= 0 && gpio < 85) return gpio_set_wake(gpio, on); if (d->irq == IRQ_RTCAlrm) { mask = PWER_RTC; goto set_pwer; } return -EINVAL; set_pwer: if (on) PWER |= mask; else PWER &=~mask; return 0; } void __init pxa25x_init_irq(void) { pxa_init_irq(32, pxa25x_set_wake); set_handle_irq(pxa25x_handle_irq); } static int __init __init pxa25x_dt_init_irq(struct device_node *node, struct device_node *parent) { pxa_dt_irq_init(pxa25x_set_wake); set_handle_irq(icip_handle_irq); return 0; } IRQCHIP_DECLARE(pxa25x_intc, "marvell,pxa-intc", pxa25x_dt_init_irq); static struct map_desc pxa25x_io_desc[] __initdata = { { /* Mem Ctl */ .virtual = (unsigned long)SMEMC_VIRT, .pfn = __phys_to_pfn(PXA2XX_SMEMC_BASE), .length = SMEMC_SIZE, .type = MT_DEVICE }, { /* UNCACHED_PHYS_0 */ .virtual = UNCACHED_PHYS_0, .pfn = __phys_to_pfn(0x00000000), .length = UNCACHED_PHYS_0_SIZE, .type = MT_DEVICE }, }; void __init pxa25x_map_io(void) { pxa_map_io(); iotable_init(ARRAY_AND_SIZE(pxa25x_io_desc)); pxa25x_get_clk_frequency_khz(1); } static struct pxa_gpio_platform_data pxa25x_gpio_info __initdata = { .irq_base = PXA_GPIO_TO_IRQ(0), .gpio_set_wake = gpio_set_wake, }; static struct platform_device *pxa25x_devices[] __initdata = { &pxa25x_device_udc, &pxa_device_pmu, &pxa_device_i2s, &sa1100_device_rtc, &pxa25x_device_ssp, &pxa25x_device_nssp, &pxa25x_device_assp, &pxa25x_device_pwm0, &pxa25x_device_pwm1, &pxa_device_asoc_platform, }; static const struct dma_slave_map pxa25x_slave_map[] = { /* PXA25x, PXA27x and PXA3xx common entries */ { "pxa2xx-ac97", "pcm_pcm_mic_mono", PDMA_FILTER_PARAM(LOWEST, 8) }, { "pxa2xx-ac97", "pcm_pcm_aux_mono_in", PDMA_FILTER_PARAM(LOWEST, 9) }, { "pxa2xx-ac97", "pcm_pcm_aux_mono_out", PDMA_FILTER_PARAM(LOWEST, 10) }, { "pxa2xx-ac97", "pcm_pcm_stereo_in", PDMA_FILTER_PARAM(LOWEST, 11) }, { "pxa2xx-ac97", "pcm_pcm_stereo_out", PDMA_FILTER_PARAM(LOWEST, 12) }, { "pxa-ssp-dai.1", "rx", PDMA_FILTER_PARAM(LOWEST, 13) }, { "pxa-ssp-dai.1", "tx", PDMA_FILTER_PARAM(LOWEST, 14) }, { "pxa-ssp-dai.2", "rx", PDMA_FILTER_PARAM(LOWEST, 15) }, { "pxa-ssp-dai.2", "tx", PDMA_FILTER_PARAM(LOWEST, 16) }, { "pxa2xx-ir", "rx", PDMA_FILTER_PARAM(LOWEST, 17) }, { "pxa2xx-ir", "tx", PDMA_FILTER_PARAM(LOWEST, 18) }, { "pxa2xx-mci.0", "rx", PDMA_FILTER_PARAM(LOWEST, 21) }, { "pxa2xx-mci.0", "tx", PDMA_FILTER_PARAM(LOWEST, 22) }, /* PXA25x specific map */ { "pxa25x-ssp.0", "rx", PDMA_FILTER_PARAM(LOWEST, 13) }, { "pxa25x-ssp.0", "tx", PDMA_FILTER_PARAM(LOWEST, 14) }, { "pxa25x-nssp.1", "rx", PDMA_FILTER_PARAM(LOWEST, 15) }, { "pxa25x-nssp.1", "tx", PDMA_FILTER_PARAM(LOWEST, 16) }, { "pxa25x-nssp.2", "rx", PDMA_FILTER_PARAM(LOWEST, 23) }, { "pxa25x-nssp.2", "tx", PDMA_FILTER_PARAM(LOWEST, 24) }, }; static struct mmp_dma_platdata pxa25x_dma_pdata = { .dma_channels = 16, .nb_requestors = 40, .slave_map = pxa25x_slave_map, .slave_map_cnt = ARRAY_SIZE(pxa25x_slave_map), }; static int __init pxa25x_init(void) { int ret = 0; if (cpu_is_pxa25x()) { pxa_register_wdt(RCSR); pxa25x_init_pm(); register_syscore_ops(&pxa_irq_syscore_ops); register_syscore_ops(&pxa2xx_mfp_syscore_ops); if (!of_have_populated_dt()) { pxa2xx_set_dmac_info(&pxa25x_dma_pdata); pxa_register_device(&pxa25x_device_gpio, &pxa25x_gpio_info); ret = platform_add_devices(pxa25x_devices, ARRAY_SIZE(pxa25x_devices)); } } return ret; } postcore_initcall(pxa25x_init);
linux-master
arch/arm/mach-pxa/pxa25x.c
// SPDX-License-Identifier: GPL-2.0-only /* * Support for Sharp SL-Cxx00 Series of PDAs * Models: SL-C3000 (Spitz), SL-C1000 (Akita) and SL-C3100 (Borzoi) * * Copyright (c) 2005 Richard Purdie * * Based on Sharp's 2.4 kernel patches/lubbock.c */ #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/gpio_keys.h> #include <linux/gpio.h> #include <linux/gpio/machine.h> #include <linux/leds.h> #include <linux/i2c.h> #include <linux/platform_data/i2c-pxa.h> #include <linux/platform_data/pca953x.h> #include <linux/spi/spi.h> #include <linux/spi/ads7846.h> #include <linux/spi/corgi_lcd.h> #include <linux/spi/pxa2xx_spi.h> #include <linux/mtd/sharpsl.h> #include <linux/mtd/physmap.h> #include <linux/input-event-codes.h> #include <linux/input/matrix_keypad.h> #include <linux/regulator/machine.h> #include <linux/io.h> #include <linux/reboot.h> #include <linux/memblock.h> #include <asm/setup.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/sharpsl_param.h> #include <asm/hardware/scoop.h> #include "pxa27x.h" #include "pxa27x-udc.h" #include "reset.h" #include <linux/platform_data/mmc-pxamci.h> #include <linux/platform_data/usb-ohci-pxa27x.h> #include <linux/platform_data/video-pxafb.h> #include "spitz.h" #include "sharpsl_pm.h" #include "smemc.h" #include "generic.h" #include "devices.h" /****************************************************************************** * Pin configuration ******************************************************************************/ static unsigned long spitz_pin_config[] __initdata = { /* Chip Selects */ GPIO78_nCS_2, /* SCOOP #2 */ GPIO79_nCS_3, /* NAND */ GPIO80_nCS_4, /* SCOOP #1 */ /* LCD - 16bpp Active TFT */ GPIOxx_LCD_TFT_16BPP, /* PC Card */ GPIO48_nPOE, GPIO49_nPWE, GPIO50_nPIOR, GPIO51_nPIOW, GPIO85_nPCE_1, GPIO54_nPCE_2, GPIO55_nPREG, GPIO56_nPWAIT, GPIO57_nIOIS16, GPIO104_PSKTSEL, /* I2S */ GPIO28_I2S_BITCLK_OUT, GPIO29_I2S_SDATA_IN, GPIO30_I2S_SDATA_OUT, GPIO31_I2S_SYNC, /* MMC */ GPIO32_MMC_CLK, GPIO112_MMC_CMD, GPIO92_MMC_DAT_0, GPIO109_MMC_DAT_1, GPIO110_MMC_DAT_2, GPIO111_MMC_DAT_3, /* GPIOs */ GPIO9_GPIO, /* SPITZ_GPIO_nSD_DETECT */ GPIO16_GPIO, /* SPITZ_GPIO_SYNC */ GPIO81_GPIO, /* SPITZ_GPIO_nSD_WP */ GPIO41_GPIO, /* SPITZ_GPIO_USB_CONNECT */ GPIO37_GPIO, /* SPITZ_GPIO_USB_HOST */ GPIO35_GPIO, /* SPITZ_GPIO_USB_DEVICE */ GPIO22_GPIO, /* SPITZ_GPIO_HSYNC */ GPIO94_GPIO, /* SPITZ_GPIO_CF_CD */ GPIO105_GPIO, /* SPITZ_GPIO_CF_IRQ */ GPIO106_GPIO, /* SPITZ_GPIO_CF2_IRQ */ /* GPIO matrix keypad */ GPIO88_GPIO, /* column 0 */ GPIO23_GPIO, /* column 1 */ GPIO24_GPIO, /* column 2 */ GPIO25_GPIO, /* column 3 */ GPIO26_GPIO, /* column 4 */ GPIO27_GPIO, /* column 5 */ GPIO52_GPIO, /* column 6 */ GPIO103_GPIO, /* column 7 */ GPIO107_GPIO, /* column 8 */ GPIO108_GPIO, /* column 9 */ GPIO114_GPIO, /* column 10 */ GPIO12_GPIO, /* row 0 */ GPIO17_GPIO, /* row 1 */ GPIO91_GPIO, /* row 2 */ GPIO34_GPIO, /* row 3 */ GPIO36_GPIO, /* row 4 */ GPIO38_GPIO, /* row 5 */ GPIO39_GPIO, /* row 6 */ /* I2C */ GPIO117_I2C_SCL, GPIO118_I2C_SDA, GPIO0_GPIO | WAKEUP_ON_EDGE_RISE, /* SPITZ_GPIO_KEY_INT */ GPIO1_GPIO | WAKEUP_ON_EDGE_FALL, /* SPITZ_GPIO_RESET */ }; /****************************************************************************** * Scoop GPIO expander ******************************************************************************/ #if defined(CONFIG_SHARP_SCOOP) || defined(CONFIG_SHARP_SCOOP_MODULE) /* SCOOP Device #1 */ static struct resource spitz_scoop_1_resources[] = { [0] = { .start = 0x10800000, .end = 0x10800fff, .flags = IORESOURCE_MEM, }, }; static struct scoop_config spitz_scoop_1_setup = { .io_dir = SPITZ_SCP_IO_DIR, .io_out = SPITZ_SCP_IO_OUT, .suspend_clr = SPITZ_SCP_SUS_CLR, .suspend_set = SPITZ_SCP_SUS_SET, .gpio_base = SPITZ_SCP_GPIO_BASE, }; struct platform_device spitz_scoop_1_device = { .name = "sharp-scoop", .id = 0, .dev = { .platform_data = &spitz_scoop_1_setup, }, .num_resources = ARRAY_SIZE(spitz_scoop_1_resources), .resource = spitz_scoop_1_resources, }; /* SCOOP Device #2 */ static struct resource spitz_scoop_2_resources[] = { [0] = { .start = 0x08800040, .end = 0x08800fff, .flags = IORESOURCE_MEM, }, }; static struct scoop_config spitz_scoop_2_setup = { .io_dir = SPITZ_SCP2_IO_DIR, .io_out = SPITZ_SCP2_IO_OUT, .suspend_clr = SPITZ_SCP2_SUS_CLR, .suspend_set = SPITZ_SCP2_SUS_SET, .gpio_base = SPITZ_SCP2_GPIO_BASE, }; struct platform_device spitz_scoop_2_device = { .name = "sharp-scoop", .id = 1, .dev = { .platform_data = &spitz_scoop_2_setup, }, .num_resources = ARRAY_SIZE(spitz_scoop_2_resources), .resource = spitz_scoop_2_resources, }; static void __init spitz_scoop_init(void) { platform_device_register(&spitz_scoop_1_device); /* Akita doesn't have the second SCOOP chip */ if (!machine_is_akita()) platform_device_register(&spitz_scoop_2_device); } /* Power control is shared with between one of the CF slots and SD */ static void __maybe_unused spitz_card_pwr_ctrl(uint8_t enable, uint8_t new_cpr) { unsigned short cpr; unsigned long flags; if (new_cpr & 0x7) { gpio_set_value(SPITZ_GPIO_CF_POWER, 1); mdelay(5); } local_irq_save(flags); cpr = read_scoop_reg(&spitz_scoop_1_device.dev, SCOOP_CPR); if (enable & new_cpr) cpr |= new_cpr; else cpr &= ~enable; write_scoop_reg(&spitz_scoop_1_device.dev, SCOOP_CPR, cpr); local_irq_restore(flags); if (!(cpr & 0x7)) { mdelay(1); gpio_set_value(SPITZ_GPIO_CF_POWER, 0); } } #else static inline void spitz_scoop_init(void) {} static inline void spitz_card_pwr_ctrl(uint8_t enable, uint8_t new_cpr) {} #endif /****************************************************************************** * PCMCIA ******************************************************************************/ #if defined(CONFIG_PCMCIA_PXA2XX) || defined(CONFIG_PCMCIA_PXA2XX_MODULE) static void spitz_pcmcia_pwr(struct device *scoop, uint16_t cpr, int nr) { /* Only need to override behaviour for slot 0 */ if (nr == 0) spitz_card_pwr_ctrl( cpr & (SCOOP_CPR_CF_3V | SCOOP_CPR_CF_XV), cpr); else write_scoop_reg(scoop, SCOOP_CPR, cpr); } static struct scoop_pcmcia_dev spitz_pcmcia_scoop[] = { { .dev = &spitz_scoop_1_device.dev, .irq = SPITZ_IRQ_GPIO_CF_IRQ, .cd_irq = SPITZ_IRQ_GPIO_CF_CD, .cd_irq_str = "PCMCIA0 CD", }, { .dev = &spitz_scoop_2_device.dev, .irq = SPITZ_IRQ_GPIO_CF2_IRQ, .cd_irq = -1, }, }; static struct scoop_pcmcia_config spitz_pcmcia_config = { .devs = &spitz_pcmcia_scoop[0], .num_devs = 2, .power_ctrl = spitz_pcmcia_pwr, }; static void __init spitz_pcmcia_init(void) { /* Akita has only one PCMCIA slot used */ if (machine_is_akita()) spitz_pcmcia_config.num_devs = 1; platform_scoop_config = &spitz_pcmcia_config; } #else static inline void spitz_pcmcia_init(void) {} #endif /****************************************************************************** * GPIO keyboard ******************************************************************************/ #if defined(CONFIG_KEYBOARD_MATRIX) || defined(CONFIG_KEYBOARD_MATRIX_MODULE) #define SPITZ_KEY_CALENDAR KEY_F1 #define SPITZ_KEY_ADDRESS KEY_F2 #define SPITZ_KEY_FN KEY_F3 #define SPITZ_KEY_CANCEL KEY_F4 #define SPITZ_KEY_EXOK KEY_F5 #define SPITZ_KEY_EXCANCEL KEY_F6 #define SPITZ_KEY_EXJOGDOWN KEY_F7 #define SPITZ_KEY_EXJOGUP KEY_F8 #define SPITZ_KEY_JAP1 KEY_LEFTALT #define SPITZ_KEY_JAP2 KEY_RIGHTCTRL #define SPITZ_KEY_SYNC KEY_F9 #define SPITZ_KEY_MAIL KEY_F10 #define SPITZ_KEY_OK KEY_F11 #define SPITZ_KEY_MENU KEY_F12 static const uint32_t spitz_keymap[] = { KEY(0, 0, KEY_LEFTCTRL), KEY(0, 1, KEY_1), KEY(0, 2, KEY_3), KEY(0, 3, KEY_5), KEY(0, 4, KEY_6), KEY(0, 5, KEY_7), KEY(0, 6, KEY_9), KEY(0, 7, KEY_0), KEY(0, 8, KEY_BACKSPACE), KEY(0, 9, SPITZ_KEY_EXOK), /* EXOK */ KEY(0, 10, SPITZ_KEY_EXCANCEL), /* EXCANCEL */ KEY(1, 1, KEY_2), KEY(1, 2, KEY_4), KEY(1, 3, KEY_R), KEY(1, 4, KEY_Y), KEY(1, 5, KEY_8), KEY(1, 6, KEY_I), KEY(1, 7, KEY_O), KEY(1, 8, KEY_P), KEY(1, 9, SPITZ_KEY_EXJOGDOWN), /* EXJOGDOWN */ KEY(1, 10, SPITZ_KEY_EXJOGUP), /* EXJOGUP */ KEY(2, 0, KEY_TAB), KEY(2, 1, KEY_Q), KEY(2, 2, KEY_E), KEY(2, 3, KEY_T), KEY(2, 4, KEY_G), KEY(2, 5, KEY_U), KEY(2, 6, KEY_J), KEY(2, 7, KEY_K), KEY(3, 0, SPITZ_KEY_ADDRESS), /* ADDRESS */ KEY(3, 1, KEY_W), KEY(3, 2, KEY_S), KEY(3, 3, KEY_F), KEY(3, 4, KEY_V), KEY(3, 5, KEY_H), KEY(3, 6, KEY_M), KEY(3, 7, KEY_L), KEY(3, 9, KEY_RIGHTSHIFT), KEY(4, 0, SPITZ_KEY_CALENDAR), /* CALENDAR */ KEY(4, 1, KEY_A), KEY(4, 2, KEY_D), KEY(4, 3, KEY_C), KEY(4, 4, KEY_B), KEY(4, 5, KEY_N), KEY(4, 6, KEY_DOT), KEY(4, 8, KEY_ENTER), KEY(4, 9, KEY_LEFTSHIFT), KEY(5, 0, SPITZ_KEY_MAIL), /* MAIL */ KEY(5, 1, KEY_Z), KEY(5, 2, KEY_X), KEY(5, 3, KEY_MINUS), KEY(5, 4, KEY_SPACE), KEY(5, 5, KEY_COMMA), KEY(5, 7, KEY_UP), KEY(5, 10, SPITZ_KEY_FN), /* FN */ KEY(6, 0, KEY_SYSRQ), KEY(6, 1, SPITZ_KEY_JAP1), /* JAP1 */ KEY(6, 2, SPITZ_KEY_JAP2), /* JAP2 */ KEY(6, 3, SPITZ_KEY_CANCEL), /* CANCEL */ KEY(6, 4, SPITZ_KEY_OK), /* OK */ KEY(6, 5, SPITZ_KEY_MENU), /* MENU */ KEY(6, 6, KEY_LEFT), KEY(6, 7, KEY_DOWN), KEY(6, 8, KEY_RIGHT), }; static const struct matrix_keymap_data spitz_keymap_data = { .keymap = spitz_keymap, .keymap_size = ARRAY_SIZE(spitz_keymap), }; static const uint32_t spitz_row_gpios[] = { 12, 17, 91, 34, 36, 38, 39 }; static const uint32_t spitz_col_gpios[] = { 88, 23, 24, 25, 26, 27, 52, 103, 107, 108, 114 }; static struct matrix_keypad_platform_data spitz_mkp_pdata = { .keymap_data = &spitz_keymap_data, .row_gpios = spitz_row_gpios, .col_gpios = spitz_col_gpios, .num_row_gpios = ARRAY_SIZE(spitz_row_gpios), .num_col_gpios = ARRAY_SIZE(spitz_col_gpios), .col_scan_delay_us = 10, .debounce_ms = 10, .wakeup = 1, }; static struct platform_device spitz_mkp_device = { .name = "matrix-keypad", .id = -1, .dev = { .platform_data = &spitz_mkp_pdata, }, }; static void __init spitz_mkp_init(void) { platform_device_register(&spitz_mkp_device); } #else static inline void spitz_mkp_init(void) {} #endif /****************************************************************************** * GPIO keys ******************************************************************************/ #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) static struct gpio_keys_button spitz_gpio_keys[] = { { .type = EV_PWR, .code = KEY_SUSPEND, .gpio = SPITZ_GPIO_ON_KEY, .desc = "On Off", .wakeup = 1, }, /* Two buttons detecting the lid state */ { .type = EV_SW, .code = 0, .gpio = SPITZ_GPIO_SWA, .desc = "Display Down", }, { .type = EV_SW, .code = 1, .gpio = SPITZ_GPIO_SWB, .desc = "Lid Closed", }, }; static struct gpio_keys_platform_data spitz_gpio_keys_platform_data = { .buttons = spitz_gpio_keys, .nbuttons = ARRAY_SIZE(spitz_gpio_keys), }; static struct platform_device spitz_gpio_keys_device = { .name = "gpio-keys", .id = -1, .dev = { .platform_data = &spitz_gpio_keys_platform_data, }, }; static void __init spitz_keys_init(void) { platform_device_register(&spitz_gpio_keys_device); } #else static inline void spitz_keys_init(void) {} #endif /****************************************************************************** * LEDs ******************************************************************************/ #if defined(CONFIG_LEDS_GPIO) || defined(CONFIG_LEDS_GPIO_MODULE) static struct gpio_led spitz_gpio_leds[] = { { .name = "spitz:amber:charge", .default_trigger = "sharpsl-charge", .gpio = SPITZ_GPIO_LED_ORANGE, }, { .name = "spitz:green:hddactivity", .default_trigger = "disk-activity", .gpio = SPITZ_GPIO_LED_GREEN, }, }; static struct gpio_led_platform_data spitz_gpio_leds_info = { .leds = spitz_gpio_leds, .num_leds = ARRAY_SIZE(spitz_gpio_leds), }; static struct platform_device spitz_led_device = { .name = "leds-gpio", .id = -1, .dev = { .platform_data = &spitz_gpio_leds_info, }, }; static void __init spitz_leds_init(void) { platform_device_register(&spitz_led_device); } #else static inline void spitz_leds_init(void) {} #endif /****************************************************************************** * SSP Devices ******************************************************************************/ #if defined(CONFIG_SPI_PXA2XX) || defined(CONFIG_SPI_PXA2XX_MODULE) static void spitz_ads7846_wait_for_hsync(void) { while (gpio_get_value(SPITZ_GPIO_HSYNC)) cpu_relax(); while (!gpio_get_value(SPITZ_GPIO_HSYNC)) cpu_relax(); } static struct ads7846_platform_data spitz_ads7846_info = { .model = 7846, .vref_delay_usecs = 100, .x_plate_ohms = 419, .y_plate_ohms = 486, .pressure_max = 1024, .wait_for_sync = spitz_ads7846_wait_for_hsync, }; static struct gpiod_lookup_table spitz_ads7846_gpio_table = { .dev_id = "spi2.0", .table = { GPIO_LOOKUP("gpio-pxa", SPITZ_GPIO_TP_INT, "pendown", GPIO_ACTIVE_LOW), { } }, }; static struct gpiod_lookup_table spitz_lcdcon_gpio_table = { .dev_id = "spi2.1", .table = { GPIO_LOOKUP("gpio-pxa", SPITZ_GPIO_BACKLIGHT_CONT, "BL_CONT", GPIO_ACTIVE_LOW), GPIO_LOOKUP("gpio-pxa", SPITZ_GPIO_BACKLIGHT_ON, "BL_ON", GPIO_ACTIVE_HIGH), { }, }, }; static struct gpiod_lookup_table akita_lcdcon_gpio_table = { .dev_id = "spi2.1", .table = { GPIO_LOOKUP("gpio-pxa", AKITA_GPIO_BACKLIGHT_CONT, "BL_CONT", GPIO_ACTIVE_LOW), GPIO_LOOKUP("gpio-pxa", AKITA_GPIO_BACKLIGHT_ON, "BL_ON", GPIO_ACTIVE_HIGH), { }, }, }; static struct corgi_lcd_platform_data spitz_lcdcon_info = { .init_mode = CORGI_LCD_MODE_VGA, .max_intensity = 0x2f, .default_intensity = 0x1f, .limit_mask = 0x0b, .kick_battery = sharpsl_battery_kick, }; static struct spi_board_info spitz_spi_devices[] = { { .modalias = "ads7846", .max_speed_hz = 1200000, .bus_num = 2, .chip_select = 0, .platform_data = &spitz_ads7846_info, .irq = PXA_GPIO_TO_IRQ(SPITZ_GPIO_TP_INT), }, { .modalias = "corgi-lcd", .max_speed_hz = 50000, .bus_num = 2, .chip_select = 1, .platform_data = &spitz_lcdcon_info, }, { .modalias = "max1111", .max_speed_hz = 450000, .bus_num = 2, .chip_select = 2, }, }; static struct pxa2xx_spi_controller spitz_spi_info = { .num_chipselect = 3, }; static struct gpiod_lookup_table spitz_spi_gpio_table = { .dev_id = "spi2", .table = { GPIO_LOOKUP_IDX("gpio-pxa", SPITZ_GPIO_ADS7846_CS, "cs", 0, GPIO_ACTIVE_LOW), GPIO_LOOKUP_IDX("gpio-pxa", SPITZ_GPIO_LCDCON_CS, "cs", 1, GPIO_ACTIVE_LOW), GPIO_LOOKUP_IDX("gpio-pxa", SPITZ_GPIO_MAX1111_CS, "cs", 2, GPIO_ACTIVE_LOW), { }, }, }; static void __init spitz_spi_init(void) { if (machine_is_akita()) gpiod_add_lookup_table(&akita_lcdcon_gpio_table); else gpiod_add_lookup_table(&spitz_lcdcon_gpio_table); gpiod_add_lookup_table(&spitz_ads7846_gpio_table); gpiod_add_lookup_table(&spitz_spi_gpio_table); pxa2xx_set_spi_info(2, &spitz_spi_info); spi_register_board_info(ARRAY_AND_SIZE(spitz_spi_devices)); } #else static inline void spitz_spi_init(void) {} #endif /****************************************************************************** * SD/MMC card controller ******************************************************************************/ #if defined(CONFIG_MMC_PXA) || defined(CONFIG_MMC_PXA_MODULE) /* * NOTE: The card detect interrupt isn't debounced so we delay it by 250ms to * give the card a chance to fully insert/eject. */ static int spitz_mci_setpower(struct device *dev, unsigned int vdd) { struct pxamci_platform_data* p_d = dev->platform_data; if ((1 << vdd) & p_d->ocr_mask) spitz_card_pwr_ctrl(SCOOP_CPR_SD_3V, SCOOP_CPR_SD_3V); else spitz_card_pwr_ctrl(SCOOP_CPR_SD_3V, 0x0); return 0; } static struct pxamci_platform_data spitz_mci_platform_data = { .detect_delay_ms = 250, .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34, .setpower = spitz_mci_setpower, }; static struct gpiod_lookup_table spitz_mci_gpio_table = { .dev_id = "pxa2xx-mci.0", .table = { GPIO_LOOKUP("gpio-pxa", SPITZ_GPIO_nSD_DETECT, "cd", GPIO_ACTIVE_LOW), GPIO_LOOKUP("gpio-pxa", SPITZ_GPIO_nSD_WP, "wp", GPIO_ACTIVE_LOW), { }, }, }; static void __init spitz_mmc_init(void) { gpiod_add_lookup_table(&spitz_mci_gpio_table); pxa_set_mci_info(&spitz_mci_platform_data); } #else static inline void spitz_mmc_init(void) {} #endif /****************************************************************************** * USB Host ******************************************************************************/ #if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE) static int spitz_ohci_init(struct device *dev) { int err; err = gpio_request(SPITZ_GPIO_USB_HOST, "USB_HOST"); if (err) return err; /* Only Port 2 is connected, setup USB Port 2 Output Control Register */ UP2OCR = UP2OCR_HXS | UP2OCR_HXOE | UP2OCR_DPPDE | UP2OCR_DMPDE; return gpio_direction_output(SPITZ_GPIO_USB_HOST, 1); } static void spitz_ohci_exit(struct device *dev) { gpio_free(SPITZ_GPIO_USB_HOST); } static struct pxaohci_platform_data spitz_ohci_platform_data = { .port_mode = PMM_NPS_MODE, .init = spitz_ohci_init, .exit = spitz_ohci_exit, .flags = ENABLE_PORT_ALL | NO_OC_PROTECTION, .power_budget = 150, }; static void __init spitz_uhc_init(void) { pxa_set_ohci_info(&spitz_ohci_platform_data); } #else static inline void spitz_uhc_init(void) {} #endif /****************************************************************************** * Framebuffer ******************************************************************************/ #if defined(CONFIG_FB_PXA) || defined(CONFIG_FB_PXA_MODULE) static struct pxafb_mode_info spitz_pxafb_modes[] = { { .pixclock = 19231, .xres = 480, .yres = 640, .bpp = 16, .hsync_len = 40, .left_margin = 46, .right_margin = 125, .vsync_len = 3, .upper_margin = 1, .lower_margin = 0, .sync = 0, }, { .pixclock = 134617, .xres = 240, .yres = 320, .bpp = 16, .hsync_len = 20, .left_margin = 20, .right_margin = 46, .vsync_len = 2, .upper_margin = 1, .lower_margin = 0, .sync = 0, }, }; static struct pxafb_mach_info spitz_pxafb_info = { .modes = spitz_pxafb_modes, .num_modes = ARRAY_SIZE(spitz_pxafb_modes), .fixed_modes = 1, .lcd_conn = LCD_COLOR_TFT_16BPP | LCD_ALTERNATE_MAPPING, }; static void __init spitz_lcd_init(void) { pxa_set_fb_info(NULL, &spitz_pxafb_info); } #else static inline void spitz_lcd_init(void) {} #endif /****************************************************************************** * NAND Flash ******************************************************************************/ #if defined(CONFIG_MTD_NAND_SHARPSL) || defined(CONFIG_MTD_NAND_SHARPSL_MODULE) static uint8_t scan_ff_pattern[] = { 0xff, 0xff }; static struct nand_bbt_descr spitz_nand_bbt = { .options = 0, .offs = 4, .len = 2, .pattern = scan_ff_pattern }; static int akita_ooblayout_ecc(struct mtd_info *mtd, int section, struct mtd_oob_region *oobregion) { if (section > 12) return -ERANGE; switch (section % 3) { case 0: oobregion->offset = 5; oobregion->length = 1; break; case 1: oobregion->offset = 1; oobregion->length = 3; break; case 2: oobregion->offset = 6; oobregion->length = 2; break; } oobregion->offset += (section / 3) * 0x10; return 0; } static int akita_ooblayout_free(struct mtd_info *mtd, int section, struct mtd_oob_region *oobregion) { if (section) return -ERANGE; oobregion->offset = 8; oobregion->length = 9; return 0; } static const struct mtd_ooblayout_ops akita_ooblayout_ops = { .ecc = akita_ooblayout_ecc, .free = akita_ooblayout_free, }; static const char * const probes[] = { "cmdlinepart", "ofpart", "sharpslpart", NULL, }; static struct sharpsl_nand_platform_data spitz_nand_pdata = { .badblock_pattern = &spitz_nand_bbt, .part_parsers = probes, }; static struct resource spitz_nand_resources[] = { { .start = PXA_CS3_PHYS, .end = PXA_CS3_PHYS + SZ_4K - 1, .flags = IORESOURCE_MEM, }, }; static struct platform_device spitz_nand_device = { .name = "sharpsl-nand", .id = -1, .resource = spitz_nand_resources, .num_resources = ARRAY_SIZE(spitz_nand_resources), .dev = { .platform_data = &spitz_nand_pdata, } }; static void __init spitz_nand_init(void) { if (machine_is_akita() || machine_is_borzoi()) { spitz_nand_bbt.len = 1; spitz_nand_pdata.ecc_layout = &akita_ooblayout_ops; } platform_device_register(&spitz_nand_device); } #else static inline void spitz_nand_init(void) {} #endif /****************************************************************************** * NOR Flash ******************************************************************************/ #if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) static struct mtd_partition spitz_rom_parts[] = { { .name ="Boot PROM Filesystem", .offset = 0x00140000, .size = MTDPART_SIZ_FULL, }, }; static struct physmap_flash_data spitz_rom_data = { .width = 2, .nr_parts = ARRAY_SIZE(spitz_rom_parts), .parts = spitz_rom_parts, }; static struct resource spitz_rom_resources[] = { { .start = PXA_CS0_PHYS, .end = PXA_CS0_PHYS + SZ_8M - 1, .flags = IORESOURCE_MEM, }, }; static struct platform_device spitz_rom_device = { .name = "physmap-flash", .id = -1, .resource = spitz_rom_resources, .num_resources = ARRAY_SIZE(spitz_rom_resources), .dev = { .platform_data = &spitz_rom_data, }, }; static void __init spitz_nor_init(void) { platform_device_register(&spitz_rom_device); } #else static inline void spitz_nor_init(void) {} #endif /****************************************************************************** * I2C devices ******************************************************************************/ #if defined(CONFIG_I2C_PXA) || defined(CONFIG_I2C_PXA_MODULE) static struct pca953x_platform_data akita_pca953x_pdata = { .gpio_base = AKITA_IOEXP_GPIO_BASE, }; static struct i2c_board_info spitz_i2c_devs[] = { { .type = "wm8750", .addr = 0x1b, }, { .type = "max7310", .addr = 0x18, .platform_data = &akita_pca953x_pdata, }, }; static struct regulator_consumer_supply isl6271a_consumers[] = { REGULATOR_SUPPLY("vcc_core", NULL), }; static struct regulator_init_data isl6271a_info[] = { { .constraints = { .name = "vcc_core range", .min_uV = 850000, .max_uV = 1600000, .always_on = 1, .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE, }, .consumer_supplies = isl6271a_consumers, .num_consumer_supplies = ARRAY_SIZE(isl6271a_consumers), } }; static struct i2c_board_info spitz_pi2c_devs[] = { { .type = "isl6271a", .addr = 0x0c, .platform_data = &isl6271a_info, }, }; static void __init spitz_i2c_init(void) { int size = ARRAY_SIZE(spitz_i2c_devs); /* Only Akita has the max7310 chip */ if (!machine_is_akita()) size--; pxa_set_i2c_info(NULL); pxa27x_set_i2c_power_info(NULL); i2c_register_board_info(0, spitz_i2c_devs, size); i2c_register_board_info(1, ARRAY_AND_SIZE(spitz_pi2c_devs)); } #else static inline void spitz_i2c_init(void) {} #endif static struct gpiod_lookup_table spitz_audio_gpio_table = { .dev_id = "spitz-audio", .table = { GPIO_LOOKUP("sharp-scoop.0", SPITZ_GPIO_MUTE_L - SPITZ_SCP_GPIO_BASE, "mute-l", GPIO_ACTIVE_HIGH), GPIO_LOOKUP("sharp-scoop.0", SPITZ_GPIO_MUTE_R - SPITZ_SCP_GPIO_BASE, "mute-r", GPIO_ACTIVE_HIGH), GPIO_LOOKUP("sharp-scoop.1", SPITZ_GPIO_MIC_BIAS - SPITZ_SCP2_GPIO_BASE, "mic", GPIO_ACTIVE_HIGH), { }, }, }; static struct gpiod_lookup_table akita_audio_gpio_table = { .dev_id = "spitz-audio", .table = { GPIO_LOOKUP("sharp-scoop.0", SPITZ_GPIO_MUTE_L - SPITZ_SCP_GPIO_BASE, "mute-l", GPIO_ACTIVE_HIGH), GPIO_LOOKUP("sharp-scoop.0", SPITZ_GPIO_MUTE_R - SPITZ_SCP_GPIO_BASE, "mute-r", GPIO_ACTIVE_HIGH), GPIO_LOOKUP("i2c-max7310", AKITA_GPIO_MIC_BIAS - AKITA_IOEXP_GPIO_BASE, "mic", GPIO_ACTIVE_HIGH), { }, }, }; /****************************************************************************** * Audio devices ******************************************************************************/ static inline void spitz_audio_init(void) { if (machine_is_akita()) gpiod_add_lookup_table(&akita_audio_gpio_table); else gpiod_add_lookup_table(&spitz_audio_gpio_table); platform_device_register_simple("spitz-audio", -1, NULL, 0); } /****************************************************************************** * Machine init ******************************************************************************/ static void spitz_poweroff(void) { pxa_restart(REBOOT_GPIO, NULL); } static void spitz_restart(enum reboot_mode mode, const char *cmd) { uint32_t msc0 = __raw_readl(MSC0); /* Bootloader magic for a reboot */ if ((msc0 & 0xffff0000) == 0x7ff00000) __raw_writel((msc0 & 0xffff) | 0x7ee00000, MSC0); spitz_poweroff(); } static void __init spitz_init(void) { init_gpio_reset(SPITZ_GPIO_ON_RESET, 1, 0); pm_power_off = spitz_poweroff; PMCR = 0x00; /* Stop 3.6MHz and drive HIGH to PCMCIA and CS */ PCFR |= PCFR_OPDE; pxa2xx_mfp_config(ARRAY_AND_SIZE(spitz_pin_config)); pxa_set_ffuart_info(NULL); pxa_set_btuart_info(NULL); pxa_set_stuart_info(NULL); spitz_spi_init(); spitz_scoop_init(); spitz_mkp_init(); spitz_keys_init(); spitz_leds_init(); spitz_mmc_init(); spitz_pcmcia_init(); spitz_uhc_init(); spitz_lcd_init(); spitz_nor_init(); spitz_nand_init(); spitz_i2c_init(); spitz_audio_init(); regulator_has_full_constraints(); } static void __init spitz_fixup(struct tag *tags, char **cmdline) { sharpsl_save_param(); memblock_add(0xa0000000, SZ_64M); } #ifdef CONFIG_MACH_SPITZ MACHINE_START(SPITZ, "SHARP Spitz") .fixup = spitz_fixup, .map_io = pxa27x_map_io, .nr_irqs = PXA_NR_IRQS, .init_irq = pxa27x_init_irq, .init_machine = spitz_init, .init_time = pxa_timer_init, .restart = spitz_restart, MACHINE_END #endif #ifdef CONFIG_MACH_BORZOI MACHINE_START(BORZOI, "SHARP Borzoi") .fixup = spitz_fixup, .map_io = pxa27x_map_io, .nr_irqs = PXA_NR_IRQS, .init_irq = pxa27x_init_irq, .init_machine = spitz_init, .init_time = pxa_timer_init, .restart = spitz_restart, MACHINE_END #endif #ifdef CONFIG_MACH_AKITA MACHINE_START(AKITA, "SHARP Akita") .fixup = spitz_fixup, .map_io = pxa27x_map_io, .nr_irqs = PXA_NR_IRQS, .init_irq = pxa27x_init_irq, .init_machine = spitz_init, .init_time = pxa_timer_init, .restart = spitz_restart, MACHINE_END #endif
linux-master
arch/arm/mach-pxa/spitz.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/arch/arm/mach-pxa/mfp-pxa2xx.c * * PXA2xx pin mux configuration support * * The GPIOs on PXA2xx can be configured as one of many alternate * functions, this is by concept samilar to the MFP configuration * on PXA3xx, what's more important, the low power pin state and * wakeup detection are also supported by the same framework. */ #include <linux/gpio.h> #include <linux/gpio-pxa.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/io.h> #include <linux/syscore_ops.h> #include <linux/soc/pxa/cpu.h> #include "pxa2xx-regs.h" #include "mfp-pxa2xx.h" #include "mfp-pxa27x.h" #include "generic.h" #define PGSR(x) __REG2(0x40F00020, (x) << 2) #define __GAFR(u, x) __REG2((u) ? 0x40E00058 : 0x40E00054, (x) << 3) #define GAFR_L(x) __GAFR(0, x) #define GAFR_U(x) __GAFR(1, x) #define BANK_OFF(n) (((n) < 3) ? (n) << 2 : 0x100 + (((n) - 3) << 2)) #define GPLR(x) __REG2(0x40E00000, BANK_OFF((x) >> 5)) #define GPDR(x) __REG2(0x40E00000, BANK_OFF((x) >> 5) + 0x0c) #define GPSR(x) __REG2(0x40E00000, BANK_OFF((x) >> 5) + 0x18) #define GPCR(x) __REG2(0x40E00000, BANK_OFF((x) >> 5) + 0x24) #define PWER_WE35 (1 << 24) struct gpio_desc { unsigned valid : 1; unsigned can_wakeup : 1; unsigned keypad_gpio : 1; unsigned dir_inverted : 1; unsigned int mask; /* bit mask in PWER or PKWR */ unsigned int mux_mask; /* bit mask of muxed gpio bits, 0 if no mux */ unsigned long config; }; static struct gpio_desc gpio_desc[MFP_PIN_GPIO127 + 1]; static unsigned long gpdr_lpm[4]; static int __mfp_config_gpio(unsigned gpio, unsigned long c) { unsigned long gafr, mask = GPIO_bit(gpio); int bank = gpio_to_bank(gpio); int uorl = !!(gpio & 0x10); /* GAFRx_U or GAFRx_L ? */ int shft = (gpio & 0xf) << 1; int fn = MFP_AF(c); int is_out = (c & MFP_DIR_OUT) ? 1 : 0; if (fn > 3) return -EINVAL; /* alternate function and direction at run-time */ gafr = (uorl == 0) ? GAFR_L(bank) : GAFR_U(bank); gafr = (gafr & ~(0x3 << shft)) | (fn << shft); if (uorl == 0) GAFR_L(bank) = gafr; else GAFR_U(bank) = gafr; if (is_out ^ gpio_desc[gpio].dir_inverted) GPDR(gpio) |= mask; else GPDR(gpio) &= ~mask; /* alternate function and direction at low power mode */ switch (c & MFP_LPM_STATE_MASK) { case MFP_LPM_DRIVE_HIGH: PGSR(bank) |= mask; is_out = 1; break; case MFP_LPM_DRIVE_LOW: PGSR(bank) &= ~mask; is_out = 1; break; case MFP_LPM_INPUT: case MFP_LPM_DEFAULT: break; default: /* warning and fall through, treat as MFP_LPM_DEFAULT */ pr_warn("%s: GPIO%d: unsupported low power mode\n", __func__, gpio); break; } if (is_out ^ gpio_desc[gpio].dir_inverted) gpdr_lpm[bank] |= mask; else gpdr_lpm[bank] &= ~mask; /* give early warning if MFP_LPM_CAN_WAKEUP is set on the * configurations of those pins not able to wakeup */ if ((c & MFP_LPM_CAN_WAKEUP) && !gpio_desc[gpio].can_wakeup) { pr_warn("%s: GPIO%d unable to wakeup\n", __func__, gpio); return -EINVAL; } if ((c & MFP_LPM_CAN_WAKEUP) && is_out) { pr_warn("%s: output GPIO%d unable to wakeup\n", __func__, gpio); return -EINVAL; } return 0; } static inline int __mfp_validate(int mfp) { int gpio = mfp_to_gpio(mfp); if ((mfp > MFP_PIN_GPIO127) || !gpio_desc[gpio].valid) { pr_warn("%s: GPIO%d is invalid pin\n", __func__, gpio); return -1; } return gpio; } void pxa2xx_mfp_config(unsigned long *mfp_cfgs, int num) { unsigned long flags; unsigned long *c; int i, gpio; for (i = 0, c = mfp_cfgs; i < num; i++, c++) { gpio = __mfp_validate(MFP_PIN(*c)); if (gpio < 0) continue; local_irq_save(flags); gpio_desc[gpio].config = *c; __mfp_config_gpio(gpio, *c); local_irq_restore(flags); } } void pxa2xx_mfp_set_lpm(int mfp, unsigned long lpm) { unsigned long flags, c; int gpio; gpio = __mfp_validate(mfp); if (gpio < 0) return; local_irq_save(flags); c = gpio_desc[gpio].config; c = (c & ~MFP_LPM_STATE_MASK) | lpm; __mfp_config_gpio(gpio, c); local_irq_restore(flags); } int gpio_set_wake(unsigned int gpio, unsigned int on) { struct gpio_desc *d; unsigned long c, mux_taken; if (gpio > mfp_to_gpio(MFP_PIN_GPIO127)) return -EINVAL; d = &gpio_desc[gpio]; c = d->config; if (!d->valid) return -EINVAL; /* Allow keypad GPIOs to wakeup system when * configured as generic GPIOs. */ if (d->keypad_gpio && (MFP_AF(d->config) == 0) && (d->config & MFP_LPM_CAN_WAKEUP)) { if (on) PKWR |= d->mask; else PKWR &= ~d->mask; return 0; } mux_taken = (PWER & d->mux_mask) & (~d->mask); if (on && mux_taken) return -EBUSY; if (d->can_wakeup && (c & MFP_LPM_CAN_WAKEUP)) { if (on) { PWER = (PWER & ~d->mux_mask) | d->mask; if (c & MFP_LPM_EDGE_RISE) PRER |= d->mask; else PRER &= ~d->mask; if (c & MFP_LPM_EDGE_FALL) PFER |= d->mask; else PFER &= ~d->mask; } else { PWER &= ~d->mask; PRER &= ~d->mask; PFER &= ~d->mask; } } return 0; } #ifdef CONFIG_PXA25x static void __init pxa25x_mfp_init(void) { int i; /* running before pxa_gpio_probe() */ pxa_last_gpio = 84; for (i = 0; i <= pxa_last_gpio; i++) gpio_desc[i].valid = 1; for (i = 0; i <= 15; i++) { gpio_desc[i].can_wakeup = 1; gpio_desc[i].mask = GPIO_bit(i); } /* PXA26x has additional 4 GPIOs (86/87/88/89) which has the * direction bit inverted in GPDR2. See PXA26x DM 4.1.1. */ for (i = 86; i <= pxa_last_gpio; i++) gpio_desc[i].dir_inverted = 1; } #else static inline void pxa25x_mfp_init(void) {} #endif /* CONFIG_PXA25x */ #ifdef CONFIG_PXA27x static int pxa27x_pkwr_gpio[] = { 13, 16, 17, 34, 36, 37, 38, 39, 90, 91, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102 }; int keypad_set_wake(unsigned int on) { unsigned int i, gpio, mask = 0; struct gpio_desc *d; for (i = 0; i < ARRAY_SIZE(pxa27x_pkwr_gpio); i++) { gpio = pxa27x_pkwr_gpio[i]; d = &gpio_desc[gpio]; /* skip if configured as generic GPIO */ if (MFP_AF(d->config) == 0) continue; if (d->config & MFP_LPM_CAN_WAKEUP) mask |= gpio_desc[gpio].mask; } if (on) PKWR |= mask; else PKWR &= ~mask; return 0; } #define PWER_WEMUX2_GPIO38 (1 << 16) #define PWER_WEMUX2_GPIO53 (2 << 16) #define PWER_WEMUX2_GPIO40 (3 << 16) #define PWER_WEMUX2_GPIO36 (4 << 16) #define PWER_WEMUX2_MASK (7 << 16) #define PWER_WEMUX3_GPIO31 (1 << 19) #define PWER_WEMUX3_GPIO113 (2 << 19) #define PWER_WEMUX3_MASK (3 << 19) #define INIT_GPIO_DESC_MUXED(mux, gpio) \ do { \ gpio_desc[(gpio)].can_wakeup = 1; \ gpio_desc[(gpio)].mask = PWER_ ## mux ## _GPIO ##gpio; \ gpio_desc[(gpio)].mux_mask = PWER_ ## mux ## _MASK; \ } while (0) static void __init pxa27x_mfp_init(void) { int i, gpio; pxa_last_gpio = 120; /* running before pxa_gpio_probe() */ for (i = 0; i <= pxa_last_gpio; i++) { /* skip GPIO2, 5, 6, 7, 8, they are not * valid pins allow configuration */ if (i == 2 || i == 5 || i == 6 || i == 7 || i == 8) continue; gpio_desc[i].valid = 1; } /* Keypad GPIOs */ for (i = 0; i < ARRAY_SIZE(pxa27x_pkwr_gpio); i++) { gpio = pxa27x_pkwr_gpio[i]; gpio_desc[gpio].can_wakeup = 1; gpio_desc[gpio].keypad_gpio = 1; gpio_desc[gpio].mask = 1 << i; } /* Overwrite GPIO13 as a PWER wakeup source */ for (i = 0; i <= 15; i++) { /* skip GPIO2, 5, 6, 7, 8 */ if (GPIO_bit(i) & 0x1e4) continue; gpio_desc[i].can_wakeup = 1; gpio_desc[i].mask = GPIO_bit(i); } gpio_desc[35].can_wakeup = 1; gpio_desc[35].mask = PWER_WE35; INIT_GPIO_DESC_MUXED(WEMUX3, 31); INIT_GPIO_DESC_MUXED(WEMUX3, 113); INIT_GPIO_DESC_MUXED(WEMUX2, 38); INIT_GPIO_DESC_MUXED(WEMUX2, 53); INIT_GPIO_DESC_MUXED(WEMUX2, 40); INIT_GPIO_DESC_MUXED(WEMUX2, 36); } #else static inline void pxa27x_mfp_init(void) {} #endif /* CONFIG_PXA27x */ #ifdef CONFIG_PM static unsigned long saved_gafr[2][4]; static unsigned long saved_gpdr[4]; static unsigned long saved_gplr[4]; static unsigned long saved_pgsr[4]; static int pxa2xx_mfp_suspend(void) { int i; /* set corresponding PGSR bit of those marked MFP_LPM_KEEP_OUTPUT */ for (i = 0; i < pxa_last_gpio; i++) { if ((gpio_desc[i].config & MFP_LPM_KEEP_OUTPUT) && (GPDR(i) & GPIO_bit(i))) { if (GPLR(i) & GPIO_bit(i)) PGSR(gpio_to_bank(i)) |= GPIO_bit(i); else PGSR(gpio_to_bank(i)) &= ~GPIO_bit(i); } } for (i = 0; i <= gpio_to_bank(pxa_last_gpio); i++) { saved_gafr[0][i] = GAFR_L(i); saved_gafr[1][i] = GAFR_U(i); saved_gpdr[i] = GPDR(i * 32); saved_gplr[i] = GPLR(i * 32); saved_pgsr[i] = PGSR(i); GPSR(i * 32) = PGSR(i); GPCR(i * 32) = ~PGSR(i); } /* set GPDR bits taking into account MFP_LPM_KEEP_OUTPUT */ for (i = 0; i < pxa_last_gpio; i++) { if ((gpdr_lpm[gpio_to_bank(i)] & GPIO_bit(i)) || ((gpio_desc[i].config & MFP_LPM_KEEP_OUTPUT) && (saved_gpdr[gpio_to_bank(i)] & GPIO_bit(i)))) GPDR(i) |= GPIO_bit(i); else GPDR(i) &= ~GPIO_bit(i); } return 0; } static void pxa2xx_mfp_resume(void) { int i; for (i = 0; i <= gpio_to_bank(pxa_last_gpio); i++) { GAFR_L(i) = saved_gafr[0][i]; GAFR_U(i) = saved_gafr[1][i]; GPSR(i * 32) = saved_gplr[i]; GPCR(i * 32) = ~saved_gplr[i]; GPDR(i * 32) = saved_gpdr[i]; PGSR(i) = saved_pgsr[i]; } PSSR = PSSR_RDH | PSSR_PH; } #else #define pxa2xx_mfp_suspend NULL #define pxa2xx_mfp_resume NULL #endif struct syscore_ops pxa2xx_mfp_syscore_ops = { .suspend = pxa2xx_mfp_suspend, .resume = pxa2xx_mfp_resume, }; static int __init pxa2xx_mfp_init(void) { int i; if (!cpu_is_pxa2xx()) return 0; if (cpu_is_pxa25x()) pxa25x_mfp_init(); if (cpu_is_pxa27x()) pxa27x_mfp_init(); /* clear RDH bit to enable GPIO receivers after reset/sleep exit */ PSSR = PSSR_RDH; /* initialize gafr_run[], pgsr_lpm[] from existing values */ for (i = 0; i <= gpio_to_bank(pxa_last_gpio); i++) gpdr_lpm[i] = GPDR(i * 32); return 0; } postcore_initcall(pxa2xx_mfp_init);
linux-master
arch/arm/mach-pxa/mfp-pxa2xx.c
// SPDX-License-Identifier: GPL-2.0-only #include <linux/kernel.h> #include <linux/module.h> #include <linux/delay.h> #include <linux/gpio.h> #include <linux/io.h> #include <asm/proc-fns.h> #include <asm/system_misc.h> #include "regs-ost.h" #include "reset.h" #include "smemc.h" #include "generic.h" static void do_hw_reset(void); static int reset_gpio = -1; int init_gpio_reset(int gpio, int output, int level) { int rc; rc = gpio_request(gpio, "reset generator"); if (rc) { printk(KERN_ERR "Can't request reset_gpio\n"); goto out; } if (output) rc = gpio_direction_output(gpio, level); else rc = gpio_direction_input(gpio); if (rc) { printk(KERN_ERR "Can't configure reset_gpio\n"); gpio_free(gpio); goto out; } out: if (!rc) reset_gpio = gpio; return rc; } /* * Trigger GPIO reset. * This covers various types of logic connecting gpio pin * to RESET pins (nRESET or GPIO_RESET): */ static void do_gpio_reset(void) { BUG_ON(reset_gpio == -1); /* drive it low */ gpio_direction_output(reset_gpio, 0); mdelay(2); /* rising edge or drive high */ gpio_set_value(reset_gpio, 1); mdelay(2); /* falling edge */ gpio_set_value(reset_gpio, 0); /* give it some time */ mdelay(10); WARN_ON(1); /* fallback */ do_hw_reset(); } static void do_hw_reset(void) { /* Initialize the watchdog and let it fire */ writel_relaxed(OWER_WME, OWER); writel_relaxed(OSSR_M3, OSSR); /* ... in 100 ms */ writel_relaxed(readl_relaxed(OSCR) + 368640, OSMR3); /* * SDRAM hangs on watchdog reset on Marvell PXA270 (erratum 71) * we put SDRAM into self-refresh to prevent that */ while (1) writel_relaxed(MDREFR_SLFRSH, MDREFR); } void pxa_restart(enum reboot_mode mode, const char *cmd) { local_irq_disable(); local_fiq_disable(); clear_reset_status(RESET_STATUS_ALL); switch (mode) { case REBOOT_SOFT: /* Jump into ROM at address 0 */ soft_restart(0); break; case REBOOT_GPIO: do_gpio_reset(); break; case REBOOT_HARD: default: do_hw_reset(); break; } }
linux-master
arch/arm/mach-pxa/reset.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/arch/arm/mach-pxa/mfp.c * * PXA3xx Multi-Function Pin Support * * Copyright (C) 2007 Marvell Internation Ltd. * * 2007-08-21: eric miao <[email protected]> * initial version */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/io.h> #include <linux/syscore_ops.h> #include "mfp-pxa3xx.h" #include "pxa3xx-regs.h" #ifdef CONFIG_PM /* * Configure the MFPs appropriately for suspend/resume. * FIXME: this should probably depend on which system state we're * entering - for instance, we might not want to place MFP pins in * a pull-down mode if they're an active low chip select, and we're * just entering standby. */ static int pxa3xx_mfp_suspend(void) { mfp_config_lpm(); return 0; } static void pxa3xx_mfp_resume(void) { mfp_config_run(); /* clear RDH bit when MFP settings are restored * * NOTE: the last 3 bits DxS are write-1-to-clear so carefully * preserve them here in case they will be referenced later */ ASCR &= ~(ASCR_RDH | ASCR_D1S | ASCR_D2S | ASCR_D3S); } #else #define pxa3xx_mfp_suspend NULL #define pxa3xx_mfp_resume NULL #endif struct syscore_ops pxa3xx_mfp_syscore_ops = { .suspend = pxa3xx_mfp_suspend, .resume = pxa3xx_mfp_resume, };
linux-master
arch/arm/mach-pxa/mfp-pxa3xx.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/arch/arm/mach-pxa/pxa320.c * * Code specific to PXA320 * * Copyright (C) 2007 Marvell Internation Ltd. * * 2007-08-21: eric miao <[email protected]> * initial version */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/soc/pxa/cpu.h> #include "pxa320.h" #include "generic.h" #include "devices.h" static struct mfp_addr_map pxa320_mfp_addr_map[] __initdata = { MFP_ADDR_X(GPIO0, GPIO4, 0x0124), MFP_ADDR_X(GPIO5, GPIO9, 0x028C), MFP_ADDR(GPIO10, 0x0458), MFP_ADDR_X(GPIO11, GPIO26, 0x02A0), MFP_ADDR_X(GPIO27, GPIO48, 0x0400), MFP_ADDR_X(GPIO49, GPIO62, 0x045C), MFP_ADDR_X(GPIO63, GPIO73, 0x04B4), MFP_ADDR_X(GPIO74, GPIO98, 0x04F0), MFP_ADDR_X(GPIO99, GPIO127, 0x0600), MFP_ADDR_X(GPIO0_2, GPIO5_2, 0x0674), MFP_ADDR_X(GPIO6_2, GPIO13_2, 0x0494), MFP_ADDR_X(GPIO14_2, GPIO17_2, 0x04E0), MFP_ADDR(nXCVREN, 0x0138), MFP_ADDR(DF_CLE_nOE, 0x0204), MFP_ADDR(DF_nADV1_ALE, 0x0208), MFP_ADDR(DF_SCLK_S, 0x020C), MFP_ADDR(DF_SCLK_E, 0x0210), MFP_ADDR(nBE0, 0x0214), MFP_ADDR(nBE1, 0x0218), MFP_ADDR(DF_nADV2_ALE, 0x021C), MFP_ADDR(DF_INT_RnB, 0x0220), MFP_ADDR(DF_nCS0, 0x0224), MFP_ADDR(DF_nCS1, 0x0228), MFP_ADDR(DF_nWE, 0x022C), MFP_ADDR(DF_nRE_nOE, 0x0230), MFP_ADDR(nLUA, 0x0234), MFP_ADDR(nLLA, 0x0238), MFP_ADDR(DF_ADDR0, 0x023C), MFP_ADDR(DF_ADDR1, 0x0240), MFP_ADDR(DF_ADDR2, 0x0244), MFP_ADDR(DF_ADDR3, 0x0248), MFP_ADDR(DF_IO0, 0x024C), MFP_ADDR(DF_IO8, 0x0250), MFP_ADDR(DF_IO1, 0x0254), MFP_ADDR(DF_IO9, 0x0258), MFP_ADDR(DF_IO2, 0x025C), MFP_ADDR(DF_IO10, 0x0260), MFP_ADDR(DF_IO3, 0x0264), MFP_ADDR(DF_IO11, 0x0268), MFP_ADDR(DF_IO4, 0x026C), MFP_ADDR(DF_IO12, 0x0270), MFP_ADDR(DF_IO5, 0x0274), MFP_ADDR(DF_IO13, 0x0278), MFP_ADDR(DF_IO6, 0x027C), MFP_ADDR(DF_IO14, 0x0280), MFP_ADDR(DF_IO7, 0x0284), MFP_ADDR(DF_IO15, 0x0288), MFP_ADDR_END, }; static int __init pxa320_init(void) { if (cpu_is_pxa320()) { mfp_init_base(io_p2v(MFPR_BASE)); mfp_init_addr(pxa320_mfp_addr_map); } return 0; } core_initcall(pxa320_init);
linux-master
arch/arm/mach-pxa/pxa320.c
/* * PXA250/210 Power Management Routines * * Original code for the SA11x0: * Copyright (c) 2001 Cliff Brake <[email protected]> * * Modified for the PXA250 by Nicolas Pitre: * Copyright (c) 2002 Monta Vista Software, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License. */ #include <linux/init.h> #include <linux/module.h> #include <linux/suspend.h> #include <linux/errno.h> #include <linux/slab.h> #include "pm.h" struct pxa_cpu_pm_fns *pxa_cpu_pm_fns; static unsigned long *sleep_save; int pxa_pm_enter(suspend_state_t state) { unsigned long sleep_save_checksum = 0, checksum = 0; int i; #ifdef CONFIG_IWMMXT /* force any iWMMXt context to ram **/ if (elf_hwcap & HWCAP_IWMMXT) iwmmxt_task_disable(NULL); #endif /* skip registers saving for standby */ if (state != PM_SUSPEND_STANDBY && pxa_cpu_pm_fns->save) { pxa_cpu_pm_fns->save(sleep_save); /* before sleeping, calculate and save a checksum */ for (i = 0; i < pxa_cpu_pm_fns->save_count - 1; i++) sleep_save_checksum += sleep_save[i]; } /* *** go zzz *** */ pxa_cpu_pm_fns->enter(state); if (state != PM_SUSPEND_STANDBY && pxa_cpu_pm_fns->restore) { /* after sleeping, validate the checksum */ for (i = 0; i < pxa_cpu_pm_fns->save_count - 1; i++) checksum += sleep_save[i]; /* if invalid, display message and wait for a hardware reset */ if (checksum != sleep_save_checksum) { while (1) pxa_cpu_pm_fns->enter(state); } pxa_cpu_pm_fns->restore(sleep_save); } pr_debug("*** made it back from resume\n"); return 0; } EXPORT_SYMBOL_GPL(pxa_pm_enter); static int pxa_pm_valid(suspend_state_t state) { if (pxa_cpu_pm_fns) return pxa_cpu_pm_fns->valid(state); return -EINVAL; } int pxa_pm_prepare(void) { int ret = 0; if (pxa_cpu_pm_fns && pxa_cpu_pm_fns->prepare) ret = pxa_cpu_pm_fns->prepare(); return ret; } void pxa_pm_finish(void) { if (pxa_cpu_pm_fns && pxa_cpu_pm_fns->finish) pxa_cpu_pm_fns->finish(); } static const struct platform_suspend_ops pxa_pm_ops = { .valid = pxa_pm_valid, .enter = pxa_pm_enter, .prepare = pxa_pm_prepare, .finish = pxa_pm_finish, }; static int __init pxa_pm_init(void) { if (!pxa_cpu_pm_fns) { printk(KERN_ERR "no valid pxa_cpu_pm_fns defined\n"); return -EINVAL; } sleep_save = kmalloc_array(pxa_cpu_pm_fns->save_count, sizeof(*sleep_save), GFP_KERNEL); if (!sleep_save) return -ENOMEM; suspend_set_ops(&pxa_pm_ops); return 0; } device_initcall(pxa_pm_init);
linux-master
arch/arm/mach-pxa/pm.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/clkdev.h> #include <linux/clk-provider.h> #include <linux/dma-mapping.h> #include <linux/dmaengine.h> #include <linux/spi/pxa2xx_spi.h> #include <linux/platform_data/i2c-pxa.h> #include <linux/soc/pxa/cpu.h> #include "udc.h" #include <linux/platform_data/video-pxafb.h> #include <linux/platform_data/mmc-pxamci.h> #include "irqs.h" #include <linux/platform_data/usb-ohci-pxa27x.h> #include <linux/platform_data/mmp_dma.h> #include "regs-ost.h" #include "reset.h" #include "devices.h" #include "generic.h" void __init pxa_register_device(struct platform_device *dev, void *data) { int ret; dev->dev.platform_data = data; ret = platform_device_register(dev); if (ret) dev_err(&dev->dev, "unable to register device: %d\n", ret); } static struct resource pxa_resource_pmu = { .start = IRQ_PMU, .end = IRQ_PMU, .flags = IORESOURCE_IRQ, }; struct platform_device pxa_device_pmu = { .name = "xscale-pmu", .id = -1, .resource = &pxa_resource_pmu, .num_resources = 1, }; static struct resource pxamci_resources[] = { [0] = { .start = 0x41100000, .end = 0x41100fff, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_MMC, .end = IRQ_MMC, .flags = IORESOURCE_IRQ, }, }; static u64 pxamci_dmamask = 0xffffffffUL; struct platform_device pxa_device_mci = { .name = "pxa2xx-mci", .id = 0, .dev = { .dma_mask = &pxamci_dmamask, .coherent_dma_mask = 0xffffffff, }, .num_resources = ARRAY_SIZE(pxamci_resources), .resource = pxamci_resources, }; void __init pxa_set_mci_info(struct pxamci_platform_data *info) { pxa_register_device(&pxa_device_mci, info); } static struct pxa2xx_udc_mach_info pxa_udc_info = { .gpio_pullup = -1, }; static struct resource pxa2xx_udc_resources[] = { [0] = { .start = 0x40600000, .end = 0x4060ffff, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_USB, .end = IRQ_USB, .flags = IORESOURCE_IRQ, }, }; static u64 udc_dma_mask = ~(u32)0; struct platform_device pxa25x_device_udc = { .name = "pxa25x-udc", .id = -1, .resource = pxa2xx_udc_resources, .num_resources = ARRAY_SIZE(pxa2xx_udc_resources), .dev = { .platform_data = &pxa_udc_info, .dma_mask = &udc_dma_mask, } }; struct platform_device pxa27x_device_udc = { .name = "pxa27x-udc", .id = -1, .resource = pxa2xx_udc_resources, .num_resources = ARRAY_SIZE(pxa2xx_udc_resources), .dev = { .platform_data = &pxa_udc_info, .dma_mask = &udc_dma_mask, } }; static struct resource pxafb_resources[] = { [0] = { .start = 0x44000000, .end = 0x4400ffff, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_LCD, .end = IRQ_LCD, .flags = IORESOURCE_IRQ, }, }; static u64 fb_dma_mask = ~(u64)0; struct platform_device pxa_device_fb = { .name = "pxa2xx-fb", .id = -1, .dev = { .dma_mask = &fb_dma_mask, .coherent_dma_mask = 0xffffffff, }, .num_resources = ARRAY_SIZE(pxafb_resources), .resource = pxafb_resources, }; void __init pxa_set_fb_info(struct device *parent, struct pxafb_mach_info *info) { pxa_device_fb.dev.parent = parent; pxa_register_device(&pxa_device_fb, info); } static struct resource pxa_resource_ffuart[] = { { .start = 0x40100000, .end = 0x40100023, .flags = IORESOURCE_MEM, }, { .start = IRQ_FFUART, .end = IRQ_FFUART, .flags = IORESOURCE_IRQ, } }; struct platform_device pxa_device_ffuart = { .name = "pxa2xx-uart", .id = 0, .resource = pxa_resource_ffuart, .num_resources = ARRAY_SIZE(pxa_resource_ffuart), }; void __init pxa_set_ffuart_info(void *info) { pxa_register_device(&pxa_device_ffuart, info); } static struct resource pxa_resource_btuart[] = { { .start = 0x40200000, .end = 0x40200023, .flags = IORESOURCE_MEM, }, { .start = IRQ_BTUART, .end = IRQ_BTUART, .flags = IORESOURCE_IRQ, } }; struct platform_device pxa_device_btuart = { .name = "pxa2xx-uart", .id = 1, .resource = pxa_resource_btuart, .num_resources = ARRAY_SIZE(pxa_resource_btuart), }; void __init pxa_set_btuart_info(void *info) { pxa_register_device(&pxa_device_btuart, info); } static struct resource pxa_resource_stuart[] = { { .start = 0x40700000, .end = 0x40700023, .flags = IORESOURCE_MEM, }, { .start = IRQ_STUART, .end = IRQ_STUART, .flags = IORESOURCE_IRQ, } }; struct platform_device pxa_device_stuart = { .name = "pxa2xx-uart", .id = 2, .resource = pxa_resource_stuart, .num_resources = ARRAY_SIZE(pxa_resource_stuart), }; void __init pxa_set_stuart_info(void *info) { pxa_register_device(&pxa_device_stuart, info); } static struct resource pxa_resource_hwuart[] = { { .start = 0x41600000, .end = 0x4160002F, .flags = IORESOURCE_MEM, }, { .start = IRQ_HWUART, .end = IRQ_HWUART, .flags = IORESOURCE_IRQ, } }; struct platform_device pxa_device_hwuart = { .name = "pxa2xx-uart", .id = 3, .resource = pxa_resource_hwuart, .num_resources = ARRAY_SIZE(pxa_resource_hwuart), }; void __init pxa_set_hwuart_info(void *info) { if (cpu_is_pxa255()) pxa_register_device(&pxa_device_hwuart, info); else pr_info("UART: Ignoring attempt to register HWUART on non-PXA255 hardware"); } static struct resource pxai2c_resources[] = { { .start = 0x40301680, .end = 0x403016a3, .flags = IORESOURCE_MEM, }, { .start = IRQ_I2C, .end = IRQ_I2C, .flags = IORESOURCE_IRQ, }, }; struct platform_device pxa_device_i2c = { .name = "pxa2xx-i2c", .id = 0, .resource = pxai2c_resources, .num_resources = ARRAY_SIZE(pxai2c_resources), }; void __init pxa_set_i2c_info(struct i2c_pxa_platform_data *info) { pxa_register_device(&pxa_device_i2c, info); } #ifdef CONFIG_PXA27x static struct resource pxa27x_resources_i2c_power[] = { { .start = 0x40f00180, .end = 0x40f001a3, .flags = IORESOURCE_MEM, }, { .start = IRQ_PWRI2C, .end = IRQ_PWRI2C, .flags = IORESOURCE_IRQ, }, }; struct platform_device pxa27x_device_i2c_power = { .name = "pxa2xx-i2c", .id = 1, .resource = pxa27x_resources_i2c_power, .num_resources = ARRAY_SIZE(pxa27x_resources_i2c_power), }; #endif static struct resource pxai2s_resources[] = { { .start = 0x40400000, .end = 0x40400083, .flags = IORESOURCE_MEM, }, { .start = IRQ_I2S, .end = IRQ_I2S, .flags = IORESOURCE_IRQ, }, }; struct platform_device pxa_device_i2s = { .name = "pxa2xx-i2s", .id = -1, .resource = pxai2s_resources, .num_resources = ARRAY_SIZE(pxai2s_resources), }; struct platform_device pxa_device_asoc_ssp1 = { .name = "pxa-ssp-dai", .id = 0, }; struct platform_device pxa_device_asoc_ssp2= { .name = "pxa-ssp-dai", .id = 1, }; struct platform_device pxa_device_asoc_ssp3 = { .name = "pxa-ssp-dai", .id = 2, }; struct platform_device pxa_device_asoc_ssp4 = { .name = "pxa-ssp-dai", .id = 3, }; struct platform_device pxa_device_asoc_platform = { .name = "pxa-pcm-audio", .id = -1, }; static struct resource pxa_rtc_resources[] = { [0] = { .start = 0x40900000, .end = 0x40900000 + 0x3b, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_RTC1Hz, .end = IRQ_RTC1Hz, .name = "rtc 1Hz", .flags = IORESOURCE_IRQ, }, [2] = { .start = IRQ_RTCAlrm, .end = IRQ_RTCAlrm, .name = "rtc alarm", .flags = IORESOURCE_IRQ, }, }; struct platform_device pxa_device_rtc = { .name = "pxa-rtc", .id = -1, .num_resources = ARRAY_SIZE(pxa_rtc_resources), .resource = pxa_rtc_resources, }; struct platform_device sa1100_device_rtc = { .name = "sa1100-rtc", .id = -1, .num_resources = ARRAY_SIZE(pxa_rtc_resources), .resource = pxa_rtc_resources, }; #ifdef CONFIG_PXA25x static struct resource pxa25x_resource_pwm0[] = { [0] = { .start = 0x40b00000, .end = 0x40b0000f, .flags = IORESOURCE_MEM, }, }; struct platform_device pxa25x_device_pwm0 = { .name = "pxa25x-pwm", .id = 0, .resource = pxa25x_resource_pwm0, .num_resources = ARRAY_SIZE(pxa25x_resource_pwm0), }; static struct resource pxa25x_resource_pwm1[] = { [0] = { .start = 0x40c00000, .end = 0x40c0000f, .flags = IORESOURCE_MEM, }, }; struct platform_device pxa25x_device_pwm1 = { .name = "pxa25x-pwm", .id = 1, .resource = pxa25x_resource_pwm1, .num_resources = ARRAY_SIZE(pxa25x_resource_pwm1), }; static u64 pxa25x_ssp_dma_mask = DMA_BIT_MASK(32); static struct resource pxa25x_resource_ssp[] = { [0] = { .start = 0x41000000, .end = 0x4100001f, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_SSP, .end = IRQ_SSP, .flags = IORESOURCE_IRQ, }, }; struct platform_device pxa25x_device_ssp = { .name = "pxa25x-ssp", .id = 0, .dev = { .dma_mask = &pxa25x_ssp_dma_mask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .resource = pxa25x_resource_ssp, .num_resources = ARRAY_SIZE(pxa25x_resource_ssp), }; static u64 pxa25x_nssp_dma_mask = DMA_BIT_MASK(32); static struct resource pxa25x_resource_nssp[] = { [0] = { .start = 0x41400000, .end = 0x4140002f, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_NSSP, .end = IRQ_NSSP, .flags = IORESOURCE_IRQ, }, }; struct platform_device pxa25x_device_nssp = { .name = "pxa25x-nssp", .id = 1, .dev = { .dma_mask = &pxa25x_nssp_dma_mask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .resource = pxa25x_resource_nssp, .num_resources = ARRAY_SIZE(pxa25x_resource_nssp), }; static u64 pxa25x_assp_dma_mask = DMA_BIT_MASK(32); static struct resource pxa25x_resource_assp[] = { [0] = { .start = 0x41500000, .end = 0x4150002f, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_ASSP, .end = IRQ_ASSP, .flags = IORESOURCE_IRQ, }, }; struct platform_device pxa25x_device_assp = { /* ASSP is basically equivalent to NSSP */ .name = "pxa25x-nssp", .id = 2, .dev = { .dma_mask = &pxa25x_assp_dma_mask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .resource = pxa25x_resource_assp, .num_resources = ARRAY_SIZE(pxa25x_resource_assp), }; #endif /* CONFIG_PXA25x */ #if defined(CONFIG_PXA27x) || defined(CONFIG_PXA3xx) static u64 pxa27x_ohci_dma_mask = DMA_BIT_MASK(32); static struct resource pxa27x_resource_ohci[] = { [0] = { .start = 0x4C000000, .end = 0x4C00ff6f, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_USBH1, .end = IRQ_USBH1, .flags = IORESOURCE_IRQ, }, }; struct platform_device pxa27x_device_ohci = { .name = "pxa27x-ohci", .id = -1, .dev = { .dma_mask = &pxa27x_ohci_dma_mask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .num_resources = ARRAY_SIZE(pxa27x_resource_ohci), .resource = pxa27x_resource_ohci, }; void __init pxa_set_ohci_info(struct pxaohci_platform_data *info) { pxa_register_device(&pxa27x_device_ohci, info); } #endif /* CONFIG_PXA27x || CONFIG_PXA3xx */ #if defined(CONFIG_PXA27x) || defined(CONFIG_PXA3xx) static u64 pxa27x_ssp1_dma_mask = DMA_BIT_MASK(32); static struct resource pxa27x_resource_ssp1[] = { [0] = { .start = 0x41000000, .end = 0x4100003f, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_SSP, .end = IRQ_SSP, .flags = IORESOURCE_IRQ, }, }; struct platform_device pxa27x_device_ssp1 = { .name = "pxa27x-ssp", .id = 0, .dev = { .dma_mask = &pxa27x_ssp1_dma_mask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .resource = pxa27x_resource_ssp1, .num_resources = ARRAY_SIZE(pxa27x_resource_ssp1), }; static u64 pxa27x_ssp2_dma_mask = DMA_BIT_MASK(32); static struct resource pxa27x_resource_ssp2[] = { [0] = { .start = 0x41700000, .end = 0x4170003f, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_SSP2, .end = IRQ_SSP2, .flags = IORESOURCE_IRQ, }, }; struct platform_device pxa27x_device_ssp2 = { .name = "pxa27x-ssp", .id = 1, .dev = { .dma_mask = &pxa27x_ssp2_dma_mask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .resource = pxa27x_resource_ssp2, .num_resources = ARRAY_SIZE(pxa27x_resource_ssp2), }; static u64 pxa27x_ssp3_dma_mask = DMA_BIT_MASK(32); static struct resource pxa27x_resource_ssp3[] = { [0] = { .start = 0x41900000, .end = 0x4190003f, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_SSP3, .end = IRQ_SSP3, .flags = IORESOURCE_IRQ, }, }; struct platform_device pxa27x_device_ssp3 = { .name = "pxa27x-ssp", .id = 2, .dev = { .dma_mask = &pxa27x_ssp3_dma_mask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .resource = pxa27x_resource_ssp3, .num_resources = ARRAY_SIZE(pxa27x_resource_ssp3), }; static struct resource pxa27x_resource_pwm0[] = { [0] = { .start = 0x40b00000, .end = 0x40b0001f, .flags = IORESOURCE_MEM, }, }; struct platform_device pxa27x_device_pwm0 = { .name = "pxa27x-pwm", .id = 0, .resource = pxa27x_resource_pwm0, .num_resources = ARRAY_SIZE(pxa27x_resource_pwm0), }; static struct resource pxa27x_resource_pwm1[] = { [0] = { .start = 0x40c00000, .end = 0x40c0001f, .flags = IORESOURCE_MEM, }, }; struct platform_device pxa27x_device_pwm1 = { .name = "pxa27x-pwm", .id = 1, .resource = pxa27x_resource_pwm1, .num_resources = ARRAY_SIZE(pxa27x_resource_pwm1), }; #endif /* CONFIG_PXA27x || CONFIG_PXA3xx */ struct resource pxa_resource_gpio[] = { { .start = 0x40e00000, .end = 0x40e0ffff, .flags = IORESOURCE_MEM, }, { .start = IRQ_GPIO0, .end = IRQ_GPIO0, .name = "gpio0", .flags = IORESOURCE_IRQ, }, { .start = IRQ_GPIO1, .end = IRQ_GPIO1, .name = "gpio1", .flags = IORESOURCE_IRQ, }, { .start = IRQ_GPIO_2_x, .end = IRQ_GPIO_2_x, .name = "gpio_mux", .flags = IORESOURCE_IRQ, }, }; struct platform_device pxa25x_device_gpio = { .name = "pxa25x-gpio", .id = -1, .num_resources = ARRAY_SIZE(pxa_resource_gpio), .resource = pxa_resource_gpio, }; struct platform_device pxa27x_device_gpio = { .name = "pxa27x-gpio", .id = -1, .num_resources = ARRAY_SIZE(pxa_resource_gpio), .resource = pxa_resource_gpio, }; /* pxa2xx-spi platform-device ID equals respective SSP platform-device ID + 1. * See comment in arch/arm/mach-pxa/ssp.c::ssp_probe() */ void __init pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_controller *info) { struct platform_device *pd; pd = platform_device_alloc("pxa2xx-spi", id); if (pd == NULL) { printk(KERN_ERR "pxa2xx-spi: failed to allocate device id %d\n", id); return; } pd->dev.platform_data = info; platform_device_add(pd); } static struct resource pxa_dma_resource[] = { [0] = { .start = 0x40000000, .end = 0x4000ffff, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_DMA, .end = IRQ_DMA, .flags = IORESOURCE_IRQ, }, }; static u64 pxadma_dmamask = 0xffffffffUL; static struct platform_device pxa2xx_pxa_dma = { .name = "pxa-dma", .id = 0, .dev = { .dma_mask = &pxadma_dmamask, .coherent_dma_mask = 0xffffffff, }, .num_resources = ARRAY_SIZE(pxa_dma_resource), .resource = pxa_dma_resource, }; void __init pxa2xx_set_dmac_info(struct mmp_dma_platdata *dma_pdata) { pxa_register_device(&pxa2xx_pxa_dma, dma_pdata); } void __init pxa_register_wdt(unsigned int reset_status) { struct resource res = DEFINE_RES_MEM(OST_PHYS, OST_LEN); reset_status &= RESET_STATUS_WATCHDOG; platform_device_register_resndata(NULL, "sa1100_wdt", -1, &res, 1, &reset_status, sizeof(reset_status)); }
linux-master
arch/arm/mach-pxa/devices.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/arch/arm/mach-pxa/generic.c * * Author: Nicolas Pitre * Created: Jun 15, 2001 * Copyright: MontaVista Software Inc. * * Code common to all PXA machines. * * Since this file should be linked before any other machine specific file, * the __initcall() here will be executed first. This serves as default * initialization stuff for PXA machines which can be overridden later if * need be. */ #include <linux/gpio.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/soc/pxa/cpu.h> #include <linux/soc/pxa/smemc.h> #include <linux/clk/pxa.h> #include <asm/mach/map.h> #include <asm/mach-types.h> #include "addr-map.h" #include "irqs.h" #include "reset.h" #include "smemc.h" #include "pxa3xx-regs.h" #include "generic.h" #include <clocksource/pxa.h> void clear_reset_status(unsigned int mask) { if (cpu_is_pxa2xx()) pxa2xx_clear_reset_status(mask); else { /* RESET_STATUS_* has a 1:1 mapping with ARSR */ ARSR = mask; } } /* * For non device-tree builds, keep legacy timer init */ void __init pxa_timer_init(void) { if (cpu_is_pxa25x()) pxa25x_clocks_init(io_p2v(0x41300000)); if (cpu_is_pxa27x()) pxa27x_clocks_init(io_p2v(0x41300000)); if (cpu_is_pxa3xx()) pxa3xx_clocks_init(io_p2v(0x41340000), io_p2v(0x41350000)); pxa_timer_nodt_init(IRQ_OST0, io_p2v(0x40a00000)); } void pxa_smemc_set_pcmcia_timing(int sock, u32 mcmem, u32 mcatt, u32 mcio) { __raw_writel(mcmem, MCMEM(sock)); __raw_writel(mcatt, MCATT(sock)); __raw_writel(mcio, MCIO(sock)); } EXPORT_SYMBOL_GPL(pxa_smemc_set_pcmcia_timing); void pxa_smemc_set_pcmcia_socket(int nr) { switch (nr) { case 0: __raw_writel(0, MECR); break; case 1: /* * We have at least one socket, so set MECR:CIT * (Card Is There) */ __raw_writel(MECR_CIT, MECR); break; case 2: /* Set CIT and MECR:NOS (Number Of Sockets) */ __raw_writel(MECR_CIT | MECR_NOS, MECR); break; } } EXPORT_SYMBOL_GPL(pxa_smemc_set_pcmcia_socket); void __iomem *pxa_smemc_get_mdrefr(void) { return MDREFR; } /* * Intel PXA2xx internal register mapping. * * Note: virtual 0xfffe0000-0xffffffff is reserved for the vector table * and cache flush area. */ static struct map_desc common_io_desc[] __initdata = { { /* Devs */ .virtual = (unsigned long)PERIPH_VIRT, .pfn = __phys_to_pfn(PERIPH_PHYS), .length = PERIPH_SIZE, .type = MT_DEVICE } }; void __init pxa_map_io(void) { debug_ll_io_init(); iotable_init(ARRAY_AND_SIZE(common_io_desc)); }
linux-master
arch/arm/mach-pxa/generic.c
// SPDX-License-Identifier: GPL-2.0-only /* * Battery and Power Management code for the Sharp SL-Cxx00 * * Copyright (c) 2005 Richard Purdie */ #include <linux/module.h> #include <linux/stat.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/gpio.h> #include <linux/gpio-pxa.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/apm-emulation.h> #include <linux/spi/corgi_lcd.h> #include <asm/irq.h> #include <asm/mach-types.h> #include "spitz.h" #include "pxa27x.h" #include "sharpsl_pm.h" #include "generic.h" #define SHARPSL_CHARGE_ON_VOLT 0x99 /* 2.9V */ #define SHARPSL_CHARGE_ON_TEMP 0xe0 /* 2.9V */ #define SHARPSL_CHARGE_ON_ACIN_HIGH 0x9b /* 6V */ #define SHARPSL_CHARGE_ON_ACIN_LOW 0x34 /* 2V */ #define SHARPSL_FATAL_ACIN_VOLT 182 /* 3.45V */ #define SHARPSL_FATAL_NOACIN_VOLT 170 /* 3.40V */ static int spitz_last_ac_status; static struct gpio spitz_charger_gpios[] = { { SPITZ_GPIO_KEY_INT, GPIOF_IN, "Keyboard Interrupt" }, { SPITZ_GPIO_SYNC, GPIOF_IN, "Sync" }, { SPITZ_GPIO_AC_IN, GPIOF_IN, "Charger Detection" }, { SPITZ_GPIO_ADC_TEMP_ON, GPIOF_OUT_INIT_LOW, "ADC Temp On" }, { SPITZ_GPIO_JK_B, GPIOF_OUT_INIT_LOW, "JK B" }, { SPITZ_GPIO_CHRG_ON, GPIOF_OUT_INIT_LOW, "Charger On" }, }; static void spitz_charger_init(void) { gpio_request_array(ARRAY_AND_SIZE(spitz_charger_gpios)); } static void spitz_measure_temp(int on) { gpio_set_value(SPITZ_GPIO_ADC_TEMP_ON, on); } static void spitz_charge(int on) { if (on) { if (sharpsl_pm.flags & SHARPSL_SUSPENDED) { gpio_set_value(SPITZ_GPIO_JK_B, 1); gpio_set_value(SPITZ_GPIO_CHRG_ON, 0); } else { gpio_set_value(SPITZ_GPIO_JK_B, 0); gpio_set_value(SPITZ_GPIO_CHRG_ON, 0); } } else { gpio_set_value(SPITZ_GPIO_JK_B, 0); gpio_set_value(SPITZ_GPIO_CHRG_ON, 1); } } static void spitz_discharge(int on) { gpio_set_value(SPITZ_GPIO_JK_A, on); } /* HACK - For unknown reasons, accurate voltage readings are only made with a load on the power bus which the green led on spitz provides */ static void spitz_discharge1(int on) { gpio_set_value(SPITZ_GPIO_LED_GREEN, on); } static unsigned long gpio18_config = GPIO18_GPIO; static void spitz_presuspend(void) { spitz_last_ac_status = sharpsl_pm.machinfo->read_devdata(SHARPSL_STATUS_ACIN); /* GPIO Sleep Register */ PGSR0 = 0x00144018; PGSR1 = 0x00EF0000; if (machine_is_akita()) { PGSR2 = 0x2121C000; PGSR3 = 0x00600400; } else { PGSR2 = 0x0121C000; PGSR3 = 0x00600000; } PGSR0 &= ~SPITZ_GPIO_G0_STROBE_BIT; PGSR1 &= ~SPITZ_GPIO_G1_STROBE_BIT; PGSR2 &= ~SPITZ_GPIO_G2_STROBE_BIT; PGSR3 &= ~SPITZ_GPIO_G3_STROBE_BIT; PGSR2 |= GPIO_bit(SPITZ_GPIO_KEY_STROBE0); pxa2xx_mfp_config(&gpio18_config, 1); gpio_request_one(18, GPIOF_OUT_INIT_HIGH, "Unknown"); gpio_free(18); PRER = GPIO_bit(SPITZ_GPIO_KEY_INT); PFER = GPIO_bit(SPITZ_GPIO_KEY_INT) | GPIO_bit(SPITZ_GPIO_RESET); PWER = GPIO_bit(SPITZ_GPIO_KEY_INT) | GPIO_bit(SPITZ_GPIO_RESET) | PWER_RTC; PKWR = GPIO_bit(SPITZ_GPIO_SYNC) | GPIO_bit(SPITZ_GPIO_KEY_INT) | GPIO_bit(SPITZ_GPIO_RESET); PKSR = 0xffffffff; /* clear */ /* nRESET_OUT Disable */ PSLR |= PSLR_SL_ROD; /* Stop 3.6MHz and drive HIGH to PCMCIA and CS */ PCFR = PCFR_GPR_EN | PCFR_OPDE; } static void spitz_postsuspend(void) { } static int spitz_should_wakeup(unsigned int resume_on_alarm) { int is_resume = 0; int acin = sharpsl_pm.machinfo->read_devdata(SHARPSL_STATUS_ACIN); if (spitz_last_ac_status != acin) { if (acin) { /* charge on */ sharpsl_pm.flags |= SHARPSL_DO_OFFLINE_CHRG; dev_dbg(sharpsl_pm.dev, "AC Inserted\n"); } else { /* charge off */ dev_dbg(sharpsl_pm.dev, "AC Removed\n"); sharpsl_pm_led(SHARPSL_LED_OFF); sharpsl_pm.machinfo->charge(0); sharpsl_pm.charge_mode = CHRG_OFF; } spitz_last_ac_status = acin; /* Return to suspend as this must be what we were woken for */ return 0; } if (PEDR & GPIO_bit(SPITZ_GPIO_KEY_INT)) is_resume |= GPIO_bit(SPITZ_GPIO_KEY_INT); if (PKSR & GPIO_bit(SPITZ_GPIO_SYNC)) is_resume |= GPIO_bit(SPITZ_GPIO_SYNC); if (resume_on_alarm && (PEDR & PWER_RTC)) is_resume |= PWER_RTC; dev_dbg(sharpsl_pm.dev, "is_resume: %x\n", is_resume); return is_resume; } static bool spitz_charger_wakeup(void) { return !gpio_get_value(SPITZ_GPIO_KEY_INT) || gpio_get_value(SPITZ_GPIO_SYNC); } static unsigned long spitzpm_read_devdata(int type) { switch (type) { case SHARPSL_STATUS_ACIN: return !gpio_get_value(SPITZ_GPIO_AC_IN); case SHARPSL_STATUS_LOCK: return gpio_get_value(sharpsl_pm.machinfo->gpio_batlock); case SHARPSL_STATUS_CHRGFULL: return gpio_get_value(sharpsl_pm.machinfo->gpio_batfull); case SHARPSL_STATUS_FATAL: return gpio_get_value(sharpsl_pm.machinfo->gpio_fatal); case SHARPSL_ACIN_VOLT: return sharpsl_pm_pxa_read_max1111(MAX1111_ACIN_VOLT); case SHARPSL_BATT_TEMP: return sharpsl_pm_pxa_read_max1111(MAX1111_BATT_TEMP); case SHARPSL_BATT_VOLT: default: return sharpsl_pm_pxa_read_max1111(MAX1111_BATT_VOLT); } } struct sharpsl_charger_machinfo spitz_pm_machinfo = { .init = spitz_charger_init, .exit = NULL, .gpio_batlock = SPITZ_GPIO_BAT_COVER, .gpio_acin = SPITZ_GPIO_AC_IN, .gpio_batfull = SPITZ_GPIO_CHRG_FULL, .batfull_irq = 1, .gpio_fatal = SPITZ_GPIO_FATAL_BAT, .discharge = spitz_discharge, .discharge1 = spitz_discharge1, .charge = spitz_charge, .measure_temp = spitz_measure_temp, .presuspend = spitz_presuspend, .postsuspend = spitz_postsuspend, .read_devdata = spitzpm_read_devdata, .charger_wakeup = spitz_charger_wakeup, .should_wakeup = spitz_should_wakeup, #if defined(CONFIG_LCD_CORGI) .backlight_limit = corgi_lcd_limit_intensity, #endif .charge_on_volt = SHARPSL_CHARGE_ON_VOLT, .charge_on_temp = SHARPSL_CHARGE_ON_TEMP, .charge_acin_high = SHARPSL_CHARGE_ON_ACIN_HIGH, .charge_acin_low = SHARPSL_CHARGE_ON_ACIN_LOW, .fatal_acin_volt = SHARPSL_FATAL_ACIN_VOLT, .fatal_noacin_volt= SHARPSL_FATAL_NOACIN_VOLT, .bat_levels = 40, .bat_levels_noac = sharpsl_battery_levels_noac, .bat_levels_acin = sharpsl_battery_levels_acin, .status_high_acin = 188, .status_low_acin = 178, .status_high_noac = 185, .status_low_noac = 175, }; static struct platform_device *spitzpm_device; static int spitzpm_init(void) { int ret; if (!machine_is_spitz() && !machine_is_akita() && !machine_is_borzoi()) return -ENODEV; spitzpm_device = platform_device_alloc("sharpsl-pm", -1); if (!spitzpm_device) return -ENOMEM; spitzpm_device->dev.platform_data = &spitz_pm_machinfo; ret = platform_device_add(spitzpm_device); if (ret) platform_device_put(spitzpm_device); return ret; } static void spitzpm_exit(void) { platform_device_unregister(spitzpm_device); } module_init(spitzpm_init); module_exit(spitzpm_exit);
linux-master
arch/arm/mach-pxa/spitz_pm.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/arch/arm/mach-pxa/pxa27x.c * * Author: Nicolas Pitre * Created: Nov 05, 2002 * Copyright: MontaVista Software Inc. * * Code specific to PXA27x aka Bulverde. */ #include <linux/dmaengine.h> #include <linux/dma/pxa-dma.h> #include <linux/gpio.h> #include <linux/gpio-pxa.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/irqchip.h> #include <linux/suspend.h> #include <linux/platform_device.h> #include <linux/syscore_ops.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/platform_data/i2c-pxa.h> #include <linux/platform_data/mmp_dma.h> #include <linux/soc/pxa/cpu.h> #include <linux/soc/pxa/smemc.h> #include <asm/mach/map.h> #include <asm/irq.h> #include <asm/suspend.h> #include "irqs.h" #include "pxa27x.h" #include "reset.h" #include <linux/platform_data/pxa2xx_udc.h> #include <linux/platform_data/usb-ohci-pxa27x.h> #include <linux/platform_data/asoc-pxa.h> #include "pm.h" #include "addr-map.h" #include "smemc.h" #include "generic.h" #include "devices.h" #include <linux/clk-provider.h> #include <linux/clkdev.h> void pxa27x_clear_otgph(void) { if (cpu_is_pxa27x() && (PSSR & PSSR_OTGPH)) PSSR |= PSSR_OTGPH; } EXPORT_SYMBOL(pxa27x_clear_otgph); static unsigned long ac97_reset_config[] = { GPIO113_AC97_nRESET_GPIO_HIGH, GPIO113_AC97_nRESET, GPIO95_AC97_nRESET_GPIO_HIGH, GPIO95_AC97_nRESET, }; void pxa27x_configure_ac97reset(int reset_gpio, bool to_gpio) { /* * This helper function is used to work around a bug in the pxa27x's * ac97 controller during a warm reset. The configuration of the * reset_gpio is changed as follows: * to_gpio == true: configured to generic output gpio and driven high * to_gpio == false: configured to ac97 controller alt fn AC97_nRESET */ if (reset_gpio == 113) pxa2xx_mfp_config(to_gpio ? &ac97_reset_config[0] : &ac97_reset_config[1], 1); if (reset_gpio == 95) pxa2xx_mfp_config(to_gpio ? &ac97_reset_config[2] : &ac97_reset_config[3], 1); } EXPORT_SYMBOL_GPL(pxa27x_configure_ac97reset); #ifdef CONFIG_PM #define SAVE(x) sleep_save[SLEEP_SAVE_##x] = x #define RESTORE(x) x = sleep_save[SLEEP_SAVE_##x] /* * allow platforms to override default PWRMODE setting used for PM_SUSPEND_MEM */ static unsigned int pwrmode = PWRMODE_SLEEP; /* * List of global PXA peripheral registers to preserve. * More ones like CP and general purpose register values are preserved * with the stack pointer in sleep.S. */ enum { SLEEP_SAVE_PSTR, SLEEP_SAVE_MDREFR, SLEEP_SAVE_PCFR, SLEEP_SAVE_COUNT }; static void pxa27x_cpu_pm_save(unsigned long *sleep_save) { sleep_save[SLEEP_SAVE_MDREFR] = __raw_readl(MDREFR); SAVE(PCFR); SAVE(PSTR); } static void pxa27x_cpu_pm_restore(unsigned long *sleep_save) { __raw_writel(sleep_save[SLEEP_SAVE_MDREFR], MDREFR); RESTORE(PCFR); PSSR = PSSR_RDH | PSSR_PH; RESTORE(PSTR); } static void pxa27x_cpu_pm_enter(suspend_state_t state) { extern void pxa_cpu_standby(void); #ifndef CONFIG_IWMMXT u64 acc0; #ifndef CONFIG_AS_IS_LLVM asm volatile(".arch_extension xscale\n\t" "mra %Q0, %R0, acc0" : "=r" (acc0)); #else asm volatile("mrrc p0, 0, %Q0, %R0, c0" : "=r" (acc0)); #endif #endif /* ensure voltage-change sequencer not initiated, which hangs */ PCFR &= ~PCFR_FVC; /* Clear edge-detect status register. */ PEDR = 0xDF12FE1B; /* Clear reset status */ RCSR = RCSR_HWR | RCSR_WDR | RCSR_SMR | RCSR_GPR; switch (state) { case PM_SUSPEND_STANDBY: pxa_cpu_standby(); break; case PM_SUSPEND_MEM: cpu_suspend(pwrmode, pxa27x_finish_suspend); #ifndef CONFIG_IWMMXT #ifndef CONFIG_AS_IS_LLVM asm volatile(".arch_extension xscale\n\t" "mar acc0, %Q0, %R0" : "=r" (acc0)); #else asm volatile("mcrr p0, 0, %Q0, %R0, c0" :: "r" (acc0)); #endif #endif break; } } static int pxa27x_cpu_pm_valid(suspend_state_t state) { return state == PM_SUSPEND_MEM || state == PM_SUSPEND_STANDBY; } static int pxa27x_cpu_pm_prepare(void) { /* set resume return address */ PSPR = __pa_symbol(cpu_resume); return 0; } static void pxa27x_cpu_pm_finish(void) { /* ensure not to come back here if it wasn't intended */ PSPR = 0; } static struct pxa_cpu_pm_fns pxa27x_cpu_pm_fns = { .save_count = SLEEP_SAVE_COUNT, .save = pxa27x_cpu_pm_save, .restore = pxa27x_cpu_pm_restore, .valid = pxa27x_cpu_pm_valid, .enter = pxa27x_cpu_pm_enter, .prepare = pxa27x_cpu_pm_prepare, .finish = pxa27x_cpu_pm_finish, }; static void __init pxa27x_init_pm(void) { pxa_cpu_pm_fns = &pxa27x_cpu_pm_fns; } #else static inline void pxa27x_init_pm(void) {} #endif /* PXA27x: Various gpios can issue wakeup events. This logic only * handles the simple cases, not the WEMUX2 and WEMUX3 options */ static int pxa27x_set_wake(struct irq_data *d, unsigned int on) { int gpio = pxa_irq_to_gpio(d->irq); uint32_t mask; if (gpio >= 0 && gpio < 128) return gpio_set_wake(gpio, on); if (d->irq == IRQ_KEYPAD) return keypad_set_wake(on); switch (d->irq) { case IRQ_RTCAlrm: mask = PWER_RTC; break; case IRQ_USB: mask = 1u << 26; break; default: return -EINVAL; } if (on) PWER |= mask; else PWER &=~mask; return 0; } void __init pxa27x_init_irq(void) { pxa_init_irq(34, pxa27x_set_wake); set_handle_irq(pxa27x_handle_irq); } static int __init pxa27x_dt_init_irq(struct device_node *node, struct device_node *parent) { pxa_dt_irq_init(pxa27x_set_wake); set_handle_irq(ichp_handle_irq); return 0; } IRQCHIP_DECLARE(pxa27x_intc, "marvell,pxa-intc", pxa27x_dt_init_irq); static struct map_desc pxa27x_io_desc[] __initdata = { { /* Mem Ctl */ .virtual = (unsigned long)SMEMC_VIRT, .pfn = __phys_to_pfn(PXA2XX_SMEMC_BASE), .length = SMEMC_SIZE, .type = MT_DEVICE }, { /* UNCACHED_PHYS_0 */ .virtual = UNCACHED_PHYS_0, .pfn = __phys_to_pfn(0x00000000), .length = UNCACHED_PHYS_0_SIZE, .type = MT_DEVICE }, }; void __init pxa27x_map_io(void) { pxa_map_io(); iotable_init(ARRAY_AND_SIZE(pxa27x_io_desc)); pxa27x_get_clk_frequency_khz(1); } /* * device registration specific to PXA27x. */ void __init pxa27x_set_i2c_power_info(struct i2c_pxa_platform_data *info) { local_irq_disable(); PCFR |= PCFR_PI2CEN; local_irq_enable(); pxa_register_device(&pxa27x_device_i2c_power, info); } static struct pxa_gpio_platform_data pxa27x_gpio_info __initdata = { .irq_base = PXA_GPIO_TO_IRQ(0), .gpio_set_wake = gpio_set_wake, }; static struct platform_device *devices[] __initdata = { &pxa27x_device_udc, &pxa_device_pmu, &pxa_device_i2s, &pxa_device_asoc_ssp1, &pxa_device_asoc_ssp2, &pxa_device_asoc_ssp3, &pxa_device_asoc_platform, &pxa_device_rtc, &pxa27x_device_ssp1, &pxa27x_device_ssp2, &pxa27x_device_ssp3, &pxa27x_device_pwm0, &pxa27x_device_pwm1, }; static const struct dma_slave_map pxa27x_slave_map[] = { /* PXA25x, PXA27x and PXA3xx common entries */ { "pxa2xx-ac97", "pcm_pcm_mic_mono", PDMA_FILTER_PARAM(LOWEST, 8) }, { "pxa2xx-ac97", "pcm_pcm_aux_mono_in", PDMA_FILTER_PARAM(LOWEST, 9) }, { "pxa2xx-ac97", "pcm_pcm_aux_mono_out", PDMA_FILTER_PARAM(LOWEST, 10) }, { "pxa2xx-ac97", "pcm_pcm_stereo_in", PDMA_FILTER_PARAM(LOWEST, 11) }, { "pxa2xx-ac97", "pcm_pcm_stereo_out", PDMA_FILTER_PARAM(LOWEST, 12) }, { "pxa-ssp-dai.0", "rx", PDMA_FILTER_PARAM(LOWEST, 13) }, { "pxa-ssp-dai.0", "tx", PDMA_FILTER_PARAM(LOWEST, 14) }, { "pxa-ssp-dai.1", "rx", PDMA_FILTER_PARAM(LOWEST, 15) }, { "pxa-ssp-dai.1", "tx", PDMA_FILTER_PARAM(LOWEST, 16) }, { "pxa2xx-ir", "rx", PDMA_FILTER_PARAM(LOWEST, 17) }, { "pxa2xx-ir", "tx", PDMA_FILTER_PARAM(LOWEST, 18) }, { "pxa2xx-mci.0", "rx", PDMA_FILTER_PARAM(LOWEST, 21) }, { "pxa2xx-mci.0", "tx", PDMA_FILTER_PARAM(LOWEST, 22) }, { "pxa-ssp-dai.2", "rx", PDMA_FILTER_PARAM(LOWEST, 66) }, { "pxa-ssp-dai.2", "tx", PDMA_FILTER_PARAM(LOWEST, 67) }, /* PXA27x specific map */ { "pxa2xx-i2s", "rx", PDMA_FILTER_PARAM(LOWEST, 2) }, { "pxa2xx-i2s", "tx", PDMA_FILTER_PARAM(LOWEST, 3) }, { "pxa27x-camera.0", "CI_Y", PDMA_FILTER_PARAM(HIGHEST, 68) }, { "pxa27x-camera.0", "CI_U", PDMA_FILTER_PARAM(HIGHEST, 69) }, { "pxa27x-camera.0", "CI_V", PDMA_FILTER_PARAM(HIGHEST, 70) }, }; static struct mmp_dma_platdata pxa27x_dma_pdata = { .dma_channels = 32, .nb_requestors = 75, .slave_map = pxa27x_slave_map, .slave_map_cnt = ARRAY_SIZE(pxa27x_slave_map), }; static int __init pxa27x_init(void) { int ret = 0; if (cpu_is_pxa27x()) { pxa_register_wdt(RCSR); pxa27x_init_pm(); register_syscore_ops(&pxa_irq_syscore_ops); register_syscore_ops(&pxa2xx_mfp_syscore_ops); if (!of_have_populated_dt()) { pxa_register_device(&pxa27x_device_gpio, &pxa27x_gpio_info); pxa2xx_set_dmac_info(&pxa27x_dma_pdata); ret = platform_add_devices(devices, ARRAY_SIZE(devices)); } } return ret; } postcore_initcall(pxa27x_init);
linux-master
arch/arm/mach-pxa/pxa27x.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/arch/arm/mach-pxa/pxa2xx.c * * code specific to pxa2xx * * Copyright (C) 2008 Dmitry Baryshkov */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/device.h> #include <linux/io.h> #include "pxa2xx-regs.h" #include "mfp-pxa25x.h" #include "generic.h" #include "reset.h" #include "smemc.h" #include <linux/soc/pxa/smemc.h> void pxa2xx_clear_reset_status(unsigned int mask) { /* RESET_STATUS_* has a 1:1 mapping with RCSR */ RCSR = mask; } #define MDCNFG_DRAC2(mdcnfg) (((mdcnfg) >> 21) & 0x3) #define MDCNFG_DRAC0(mdcnfg) (((mdcnfg) >> 5) & 0x3) int pxa2xx_smemc_get_sdram_rows(void) { static int sdram_rows; unsigned int drac2 = 0, drac0 = 0; u32 mdcnfg; if (sdram_rows) return sdram_rows; mdcnfg = readl_relaxed(MDCNFG); if (mdcnfg & (MDCNFG_DE2 | MDCNFG_DE3)) drac2 = MDCNFG_DRAC2(mdcnfg); if (mdcnfg & (MDCNFG_DE0 | MDCNFG_DE1)) drac0 = MDCNFG_DRAC0(mdcnfg); sdram_rows = 1 << (11 + max(drac0, drac2)); return sdram_rows; }
linux-master
arch/arm/mach-pxa/pxa2xx.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/arch/arm/mach-pxa/pxa3xx.c * * code specific to pxa3xx aka Monahans * * Copyright (C) 2006 Marvell International Ltd. * * 2007-09-02: eric miao <[email protected]> * initial version */ #include <linux/dmaengine.h> #include <linux/dma/pxa-dma.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/gpio-pxa.h> #include <linux/pm.h> #include <linux/platform_device.h> #include <linux/irq.h> #include <linux/irqchip.h> #include <linux/io.h> #include <linux/of.h> #include <linux/syscore_ops.h> #include <linux/platform_data/i2c-pxa.h> #include <linux/platform_data/mmp_dma.h> #include <linux/soc/pxa/cpu.h> #include <linux/clk/pxa.h> #include <asm/mach/map.h> #include <asm/suspend.h> #include "pxa3xx-regs.h" #include "reset.h" #include <linux/platform_data/usb-ohci-pxa27x.h> #include "pm.h" #include "addr-map.h" #include "smemc.h" #include "irqs.h" #include "generic.h" #include "devices.h" #define PECR_IE(n) ((1 << ((n) * 2)) << 28) #define PECR_IS(n) ((1 << ((n) * 2)) << 29) extern void __init pxa_dt_irq_init(int (*fn)(struct irq_data *, unsigned int)); /* * NAND NFC: DFI bus arbitration subset */ #define NDCR (*(volatile u32 __iomem*)(NAND_VIRT + 0)) #define NDCR_ND_ARB_EN (1 << 12) #define NDCR_ND_ARB_CNTL (1 << 19) #define CKEN_BOOT 11 /* < Boot rom clock enable */ #define CKEN_TPM 19 /* < TPM clock enable */ #define CKEN_HSIO2 41 /* < HSIO2 clock enable */ #ifdef CONFIG_PM #define ISRAM_START 0x5c000000 #define ISRAM_SIZE SZ_256K static void __iomem *sram; static unsigned long wakeup_src; /* * Enter a standby mode (S0D1C2 or S0D2C2). Upon wakeup, the dynamic * memory controller has to be reinitialised, so we place some code * in the SRAM to perform this function. * * We disable FIQs across the standby - otherwise, we might receive a * FIQ while the SDRAM is unavailable. */ static void pxa3xx_cpu_standby(unsigned int pwrmode) { void (*fn)(unsigned int) = (void __force *)(sram + 0x8000); memcpy_toio(sram + 0x8000, pm_enter_standby_start, pm_enter_standby_end - pm_enter_standby_start); AD2D0SR = ~0; AD2D1SR = ~0; AD2D0ER = wakeup_src; AD2D1ER = 0; ASCR = ASCR; ARSR = ARSR; local_fiq_disable(); fn(pwrmode); local_fiq_enable(); AD2D0ER = 0; AD2D1ER = 0; } /* * NOTE: currently, the OBM (OEM Boot Module) binary comes along with * PXA3xx development kits assumes that the resuming process continues * with the address stored within the first 4 bytes of SDRAM. The PSPR * register is used privately by BootROM and OBM, and _must_ be set to * 0x5c014000 for the moment. */ static void pxa3xx_cpu_pm_suspend(void) { volatile unsigned long *p = (volatile void *)0xc0000000; unsigned long saved_data = *p; #ifndef CONFIG_IWMMXT u64 acc0; #ifdef CONFIG_CC_IS_GCC asm volatile(".arch_extension xscale\n\t" "mra %Q0, %R0, acc0" : "=r" (acc0)); #else asm volatile("mrrc p0, 0, %Q0, %R0, c0" : "=r" (acc0)); #endif #endif /* resuming from D2 requires the HSIO2/BOOT/TPM clocks enabled */ CKENA |= (1 << CKEN_BOOT) | (1 << CKEN_TPM); CKENB |= 1 << (CKEN_HSIO2 & 0x1f); /* clear and setup wakeup source */ AD3SR = ~0; AD3ER = wakeup_src; ASCR = ASCR; ARSR = ARSR; PCFR |= (1u << 13); /* L1_DIS */ PCFR &= ~((1u << 12) | (1u << 1)); /* L0_EN | SL_ROD */ PSPR = 0x5c014000; /* overwrite with the resume address */ *p = __pa_symbol(cpu_resume); cpu_suspend(0, pxa3xx_finish_suspend); *p = saved_data; AD3ER = 0; #ifndef CONFIG_IWMMXT #ifndef CONFIG_AS_IS_LLVM asm volatile(".arch_extension xscale\n\t" "mar acc0, %Q0, %R0" : "=r" (acc0)); #else asm volatile("mcrr p0, 0, %Q0, %R0, c0" :: "r" (acc0)); #endif #endif } static void pxa3xx_cpu_pm_enter(suspend_state_t state) { /* * Don't sleep if no wakeup sources are defined */ if (wakeup_src == 0) { printk(KERN_ERR "Not suspending: no wakeup sources\n"); return; } switch (state) { case PM_SUSPEND_STANDBY: pxa3xx_cpu_standby(PXA3xx_PM_S0D2C2); break; case PM_SUSPEND_MEM: pxa3xx_cpu_pm_suspend(); break; } } static int pxa3xx_cpu_pm_valid(suspend_state_t state) { return state == PM_SUSPEND_MEM || state == PM_SUSPEND_STANDBY; } static struct pxa_cpu_pm_fns pxa3xx_cpu_pm_fns = { .valid = pxa3xx_cpu_pm_valid, .enter = pxa3xx_cpu_pm_enter, }; static void __init pxa3xx_init_pm(void) { sram = ioremap(ISRAM_START, ISRAM_SIZE); if (!sram) { printk(KERN_ERR "Unable to map ISRAM: disabling standby/suspend\n"); return; } /* * Since we copy wakeup code into the SRAM, we need to ensure * that it is preserved over the low power modes. Note: bit 8 * is undocumented in the developer manual, but must be set. */ AD1R |= ADXR_L2 | ADXR_R0; AD2R |= ADXR_L2 | ADXR_R0; AD3R |= ADXR_L2 | ADXR_R0; /* * Clear the resume enable registers. */ AD1D0ER = 0; AD2D0ER = 0; AD2D1ER = 0; AD3ER = 0; pxa_cpu_pm_fns = &pxa3xx_cpu_pm_fns; } static int pxa3xx_set_wake(struct irq_data *d, unsigned int on) { unsigned long flags, mask = 0; switch (d->irq) { case IRQ_SSP3: mask = ADXER_MFP_WSSP3; break; case IRQ_MSL: mask = ADXER_WMSL0; break; case IRQ_USBH2: case IRQ_USBH1: mask = ADXER_WUSBH; break; case IRQ_KEYPAD: mask = ADXER_WKP; break; case IRQ_AC97: mask = ADXER_MFP_WAC97; break; case IRQ_USIM: mask = ADXER_WUSIM0; break; case IRQ_SSP2: mask = ADXER_MFP_WSSP2; break; case IRQ_I2C: mask = ADXER_MFP_WI2C; break; case IRQ_STUART: mask = ADXER_MFP_WUART3; break; case IRQ_BTUART: mask = ADXER_MFP_WUART2; break; case IRQ_FFUART: mask = ADXER_MFP_WUART1; break; case IRQ_MMC: mask = ADXER_MFP_WMMC1; break; case IRQ_SSP: mask = ADXER_MFP_WSSP1; break; case IRQ_RTCAlrm: mask = ADXER_WRTC; break; case IRQ_SSP4: mask = ADXER_MFP_WSSP4; break; case IRQ_TSI: mask = ADXER_WTSI; break; case IRQ_USIM2: mask = ADXER_WUSIM1; break; case IRQ_MMC2: mask = ADXER_MFP_WMMC2; break; case IRQ_NAND: mask = ADXER_MFP_WFLASH; break; case IRQ_USB2: mask = ADXER_WUSB2; break; case IRQ_WAKEUP0: mask = ADXER_WEXTWAKE0; break; case IRQ_WAKEUP1: mask = ADXER_WEXTWAKE1; break; case IRQ_MMC3: mask = ADXER_MFP_GEN12; break; default: return -EINVAL; } local_irq_save(flags); if (on) wakeup_src |= mask; else wakeup_src &= ~mask; local_irq_restore(flags); return 0; } #else static inline void pxa3xx_init_pm(void) {} #define pxa3xx_set_wake NULL #endif static void pxa_ack_ext_wakeup(struct irq_data *d) { PECR |= PECR_IS(d->irq - IRQ_WAKEUP0); } static void pxa_mask_ext_wakeup(struct irq_data *d) { pxa_mask_irq(d); PECR &= ~PECR_IE(d->irq - IRQ_WAKEUP0); } static void pxa_unmask_ext_wakeup(struct irq_data *d) { pxa_unmask_irq(d); PECR |= PECR_IE(d->irq - IRQ_WAKEUP0); } static int pxa_set_ext_wakeup_type(struct irq_data *d, unsigned int flow_type) { if (flow_type & IRQ_TYPE_EDGE_RISING) PWER |= 1 << (d->irq - IRQ_WAKEUP0); if (flow_type & IRQ_TYPE_EDGE_FALLING) PWER |= 1 << (d->irq - IRQ_WAKEUP0 + 2); return 0; } static struct irq_chip pxa_ext_wakeup_chip = { .name = "WAKEUP", .irq_ack = pxa_ack_ext_wakeup, .irq_mask = pxa_mask_ext_wakeup, .irq_unmask = pxa_unmask_ext_wakeup, .irq_set_type = pxa_set_ext_wakeup_type, }; static void __init pxa_init_ext_wakeup_irq(int (*fn)(struct irq_data *, unsigned int)) { int irq; for (irq = IRQ_WAKEUP0; irq <= IRQ_WAKEUP1; irq++) { irq_set_chip_and_handler(irq, &pxa_ext_wakeup_chip, handle_edge_irq); irq_clear_status_flags(irq, IRQ_NOREQUEST); } pxa_ext_wakeup_chip.irq_set_wake = fn; } static void __init __pxa3xx_init_irq(void) { /* enable CP6 access */ u32 value; __asm__ __volatile__("mrc p15, 0, %0, c15, c1, 0\n": "=r"(value)); value |= (1 << 6); __asm__ __volatile__("mcr p15, 0, %0, c15, c1, 0\n": :"r"(value)); pxa_init_ext_wakeup_irq(pxa3xx_set_wake); } static int __init __init pxa3xx_dt_init_irq(struct device_node *node, struct device_node *parent) { __pxa3xx_init_irq(); pxa_dt_irq_init(pxa3xx_set_wake); set_handle_irq(ichp_handle_irq); return 0; } IRQCHIP_DECLARE(pxa3xx_intc, "marvell,pxa-intc", pxa3xx_dt_init_irq); static struct map_desc pxa3xx_io_desc[] __initdata = { { /* Mem Ctl */ .virtual = (unsigned long)SMEMC_VIRT, .pfn = __phys_to_pfn(PXA3XX_SMEMC_BASE), .length = SMEMC_SIZE, .type = MT_DEVICE }, { .virtual = (unsigned long)NAND_VIRT, .pfn = __phys_to_pfn(NAND_PHYS), .length = NAND_SIZE, .type = MT_DEVICE }, }; void __init pxa3xx_map_io(void) { pxa_map_io(); iotable_init(ARRAY_AND_SIZE(pxa3xx_io_desc)); pxa3xx_get_clk_frequency_khz(1); } static int __init pxa3xx_init(void) { int ret = 0; if (cpu_is_pxa3xx()) { pxa_register_wdt(ARSR); /* * clear RDH bit every time after reset * * Note: the last 3 bits DxS are write-1-to-clear so carefully * preserve them here in case they will be referenced later */ ASCR &= ~(ASCR_RDH | ASCR_D1S | ASCR_D2S | ASCR_D3S); /* * Disable DFI bus arbitration, to prevent a system bus lock if * somebody disables the NAND clock (unused clock) while this * bit remains set. */ NDCR = (NDCR & ~NDCR_ND_ARB_EN) | NDCR_ND_ARB_CNTL; pxa3xx_init_pm(); enable_irq_wake(IRQ_WAKEUP0); if (cpu_is_pxa320()) enable_irq_wake(IRQ_WAKEUP1); register_syscore_ops(&pxa_irq_syscore_ops); register_syscore_ops(&pxa3xx_mfp_syscore_ops); } return ret; } postcore_initcall(pxa3xx_init);
linux-master
arch/arm/mach-pxa/pxa3xx.c
/* * am300epd.c -- Platform device for AM300 EPD kit * * Copyright (C) 2008, Jaya Kumar * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. * * This work was made possible by help and equipment support from E-Ink * Corporation. http://support.eink.com/community * * This driver is written to be used with the Broadsheet display controller. * on the AM300 EPD prototype kit/development kit with an E-Ink 800x600 * Vizplex EPD on a Gumstix board using the Broadsheet interface board. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/fb.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/irq.h> #include <linux/gpio.h> #include "gumstix.h" #include "mfp-pxa25x.h" #include "irqs.h" #include <linux/platform_data/video-pxafb.h> #include "generic.h" #include <video/broadsheetfb.h> static unsigned int panel_type = 6; static struct platform_device *am300_device; static struct broadsheet_board am300_board; static unsigned long am300_pin_config[] __initdata = { GPIO16_GPIO, GPIO17_GPIO, GPIO32_GPIO, GPIO48_GPIO, GPIO49_GPIO, GPIO51_GPIO, GPIO74_GPIO, GPIO75_GPIO, GPIO76_GPIO, GPIO77_GPIO, /* this is the 16-bit hdb bus 58-73 */ GPIO58_GPIO, GPIO59_GPIO, GPIO60_GPIO, GPIO61_GPIO, GPIO62_GPIO, GPIO63_GPIO, GPIO64_GPIO, GPIO65_GPIO, GPIO66_GPIO, GPIO67_GPIO, GPIO68_GPIO, GPIO69_GPIO, GPIO70_GPIO, GPIO71_GPIO, GPIO72_GPIO, GPIO73_GPIO, }; /* register offsets for gpio control */ #define PWR_GPIO_PIN 16 #define CFG_GPIO_PIN 17 #define RDY_GPIO_PIN 32 #define DC_GPIO_PIN 48 #define RST_GPIO_PIN 49 #define LED_GPIO_PIN 51 #define RD_GPIO_PIN 74 #define WR_GPIO_PIN 75 #define CS_GPIO_PIN 76 #define IRQ_GPIO_PIN 77 /* hdb bus */ #define DB0_GPIO_PIN 58 #define DB15_GPIO_PIN 73 static int gpios[] = { PWR_GPIO_PIN, CFG_GPIO_PIN, RDY_GPIO_PIN, DC_GPIO_PIN, RST_GPIO_PIN, RD_GPIO_PIN, WR_GPIO_PIN, CS_GPIO_PIN, IRQ_GPIO_PIN, LED_GPIO_PIN }; static char *gpio_names[] = { "PWR", "CFG", "RDY", "DC", "RST", "RD", "WR", "CS", "IRQ", "LED" }; static int am300_wait_event(struct broadsheetfb_par *par) { /* todo: improve err recovery */ wait_event(par->waitq, gpio_get_value(RDY_GPIO_PIN)); return 0; } static int am300_init_gpio_regs(struct broadsheetfb_par *par) { int i; int err; char dbname[8]; for (i = 0; i < ARRAY_SIZE(gpios); i++) { err = gpio_request(gpios[i], gpio_names[i]); if (err) { dev_err(&am300_device->dev, "failed requesting " "gpio %s, err=%d\n", gpio_names[i], err); goto err_req_gpio; } } /* we also need to take care of the hdb bus */ for (i = DB0_GPIO_PIN; i <= DB15_GPIO_PIN; i++) { sprintf(dbname, "DB%d", i); err = gpio_request(i, dbname); if (err) { dev_err(&am300_device->dev, "failed requesting " "gpio %d, err=%d\n", i, err); goto err_req_gpio2; } } /* setup the outputs and init values */ gpio_direction_output(PWR_GPIO_PIN, 0); gpio_direction_output(CFG_GPIO_PIN, 1); gpio_direction_output(DC_GPIO_PIN, 0); gpio_direction_output(RD_GPIO_PIN, 1); gpio_direction_output(WR_GPIO_PIN, 1); gpio_direction_output(CS_GPIO_PIN, 1); gpio_direction_output(RST_GPIO_PIN, 0); /* setup the inputs */ gpio_direction_input(RDY_GPIO_PIN); gpio_direction_input(IRQ_GPIO_PIN); /* start the hdb bus as an input */ for (i = DB0_GPIO_PIN; i <= DB15_GPIO_PIN; i++) gpio_direction_output(i, 0); /* go into command mode */ gpio_set_value(CFG_GPIO_PIN, 1); gpio_set_value(RST_GPIO_PIN, 0); msleep(10); gpio_set_value(RST_GPIO_PIN, 1); msleep(10); am300_wait_event(par); return 0; err_req_gpio2: while (--i >= DB0_GPIO_PIN) gpio_free(i); i = ARRAY_SIZE(gpios); err_req_gpio: while (--i >= 0) gpio_free(gpios[i]); return err; } static int am300_init_board(struct broadsheetfb_par *par) { return am300_init_gpio_regs(par); } static void am300_cleanup(struct broadsheetfb_par *par) { int i; free_irq(PXA_GPIO_TO_IRQ(RDY_GPIO_PIN), par); for (i = 0; i < ARRAY_SIZE(gpios); i++) gpio_free(gpios[i]); for (i = DB0_GPIO_PIN; i <= DB15_GPIO_PIN; i++) gpio_free(i); } static u16 am300_get_hdb(struct broadsheetfb_par *par) { u16 res = 0; int i; for (i = 0; i <= (DB15_GPIO_PIN - DB0_GPIO_PIN) ; i++) res |= (gpio_get_value(DB0_GPIO_PIN + i)) ? (1 << i) : 0; return res; } static void am300_set_hdb(struct broadsheetfb_par *par, u16 data) { int i; for (i = 0; i <= (DB15_GPIO_PIN - DB0_GPIO_PIN) ; i++) gpio_set_value(DB0_GPIO_PIN + i, (data >> i) & 0x01); } static void am300_set_ctl(struct broadsheetfb_par *par, unsigned char bit, u8 state) { switch (bit) { case BS_CS: gpio_set_value(CS_GPIO_PIN, state); break; case BS_DC: gpio_set_value(DC_GPIO_PIN, state); break; case BS_WR: gpio_set_value(WR_GPIO_PIN, state); break; } } static int am300_get_panel_type(void) { return panel_type; } static irqreturn_t am300_handle_irq(int irq, void *dev_id) { struct broadsheetfb_par *par = dev_id; wake_up(&par->waitq); return IRQ_HANDLED; } static int am300_setup_irq(struct fb_info *info) { int ret; struct broadsheetfb_par *par = info->par; ret = request_irq(PXA_GPIO_TO_IRQ(RDY_GPIO_PIN), am300_handle_irq, IRQF_TRIGGER_RISING, "AM300", par); if (ret) dev_err(&am300_device->dev, "request_irq failed: %d\n", ret); return ret; } static struct broadsheet_board am300_board = { .owner = THIS_MODULE, .init = am300_init_board, .cleanup = am300_cleanup, .set_hdb = am300_set_hdb, .get_hdb = am300_get_hdb, .set_ctl = am300_set_ctl, .wait_for_rdy = am300_wait_event, .get_panel_type = am300_get_panel_type, .setup_irq = am300_setup_irq, }; int __init am300_init(void) { int ret; pxa2xx_mfp_config(ARRAY_AND_SIZE(am300_pin_config)); /* request our platform independent driver */ request_module("broadsheetfb"); am300_device = platform_device_alloc("broadsheetfb", -1); if (!am300_device) return -ENOMEM; /* the am300_board that will be seen by broadsheetfb is a copy */ platform_device_add_data(am300_device, &am300_board, sizeof(am300_board)); ret = platform_device_add(am300_device); if (ret) { platform_device_put(am300_device); return ret; } return 0; } module_param(panel_type, uint, 0); MODULE_PARM_DESC(panel_type, "Select the panel type: 37, 6, 97"); MODULE_DESCRIPTION("board driver for am300 epd kit"); MODULE_AUTHOR("Jaya Kumar"); MODULE_LICENSE("GPL");
linux-master
arch/arm/mach-pxa/am300epd.c
// SPDX-License-Identifier: GPL-2.0 /* * Static Memory Controller */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/io.h> #include <linux/syscore_ops.h> #include <linux/soc/pxa/cpu.h> #include "smemc.h" #include <linux/soc/pxa/smemc.h> #ifdef CONFIG_PM static unsigned long msc[2]; static unsigned long sxcnfg, memclkcfg; static unsigned long csadrcfg[4]; static int pxa3xx_smemc_suspend(void) { msc[0] = __raw_readl(MSC0); msc[1] = __raw_readl(MSC1); sxcnfg = __raw_readl(SXCNFG); memclkcfg = __raw_readl(MEMCLKCFG); csadrcfg[0] = __raw_readl(CSADRCFG0); csadrcfg[1] = __raw_readl(CSADRCFG1); csadrcfg[2] = __raw_readl(CSADRCFG2); csadrcfg[3] = __raw_readl(CSADRCFG3); return 0; } static void pxa3xx_smemc_resume(void) { __raw_writel(msc[0], MSC0); __raw_writel(msc[1], MSC1); __raw_writel(sxcnfg, SXCNFG); __raw_writel(memclkcfg, MEMCLKCFG); __raw_writel(csadrcfg[0], CSADRCFG0); __raw_writel(csadrcfg[1], CSADRCFG1); __raw_writel(csadrcfg[2], CSADRCFG2); __raw_writel(csadrcfg[3], CSADRCFG3); /* CSMSADRCFG wakes up in its default state (0), so we need to set it */ __raw_writel(0x2, CSMSADRCFG); } static struct syscore_ops smemc_syscore_ops = { .suspend = pxa3xx_smemc_suspend, .resume = pxa3xx_smemc_resume, }; static int __init smemc_init(void) { if (cpu_is_pxa3xx()) { /* * The only documentation we have on the * Chip Select Configuration Register (CSMSADRCFG) is that * it must be programmed to 0x2. * Moreover, in the bit definitions, the second bit * (CSMSADRCFG[1]) is called "SETALWAYS". * Other bits are reserved in this register. */ __raw_writel(0x2, CSMSADRCFG); register_syscore_ops(&smemc_syscore_ops); } return 0; } subsys_initcall(smemc_init); #endif static const unsigned int df_clkdiv[4] = { 1, 2, 4, 1 }; unsigned int pxa3xx_smemc_get_memclkdiv(void) { unsigned long memclkcfg = __raw_readl(MEMCLKCFG); return df_clkdiv[(memclkcfg >> 16) & 0x3]; }
linux-master
arch/arm/mach-pxa/smemc.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/arch/arm/mach-pxa/pxa-dt.c * * Copyright (C) 2012 Daniel Mack */ #include <asm/mach/arch.h> #include "generic.h" #ifdef CONFIG_PXA25x static const char * const pxa25x_dt_board_compat[] __initconst = { "marvell,pxa250", NULL, }; DT_MACHINE_START(PXA25X_DT, "Marvell PXA25x (Device Tree Support)") .map_io = pxa25x_map_io, .restart = pxa_restart, .dt_compat = pxa25x_dt_board_compat, MACHINE_END #endif #ifdef CONFIG_PXA27x static const char * const pxa27x_dt_board_compat[] __initconst = { "marvell,pxa270", NULL, }; DT_MACHINE_START(PXA27X_DT, "Marvell PXA27x (Device Tree Support)") .map_io = pxa27x_map_io, .restart = pxa_restart, .dt_compat = pxa27x_dt_board_compat, MACHINE_END #endif #ifdef CONFIG_PXA3xx static const char *const pxa3xx_dt_board_compat[] __initconst = { "marvell,pxa300", "marvell,pxa310", "marvell,pxa320", NULL, }; DT_MACHINE_START(PXA_DT, "Marvell PXA3xx (Device Tree Support)") .map_io = pxa3xx_map_io, .restart = pxa_restart, .dt_compat = pxa3xx_dt_board_compat, MACHINE_END #endif
linux-master
arch/arm/mach-pxa/pxa-dt.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Low-level power-management support for Alpine platform. * * Copyright (C) 2015 Annapurna Labs Ltd. */ #include <linux/io.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/regmap.h> #include <linux/mfd/syscon.h> #include "alpine_cpu_pm.h" #include "alpine_cpu_resume.h" /* NB registers */ #define AL_SYSFAB_POWER_CONTROL(cpu) (0x2000 + (cpu)*0x100 + 0x20) static struct regmap *al_sysfabric; static struct al_cpu_resume_regs __iomem *al_cpu_resume_regs; static int wakeup_supported; int alpine_cpu_wakeup(unsigned int phys_cpu, uint32_t phys_resume_addr) { if (!wakeup_supported) return -ENOSYS; /* * Set CPU resume address - * secure firmware running on boot will jump to this address * after setting proper CPU mode, and initialiing e.g. secure * regs (the same mode all CPUs are booted to - usually HYP) */ writel(phys_resume_addr, &al_cpu_resume_regs->per_cpu[phys_cpu].resume_addr); /* Power-up the CPU */ regmap_write(al_sysfabric, AL_SYSFAB_POWER_CONTROL(phys_cpu), 0); return 0; } void __init alpine_cpu_pm_init(void) { struct device_node *np; uint32_t watermark; al_sysfabric = syscon_regmap_lookup_by_compatible("al,alpine-sysfabric-service"); np = of_find_compatible_node(NULL, NULL, "al,alpine-cpu-resume"); al_cpu_resume_regs = of_iomap(np, 0); wakeup_supported = !IS_ERR(al_sysfabric) && al_cpu_resume_regs; if (wakeup_supported) { watermark = readl(&al_cpu_resume_regs->watermark); wakeup_supported = (watermark & AL_CPU_RESUME_MAGIC_NUM_MASK) == AL_CPU_RESUME_MAGIC_NUM; } }
linux-master
arch/arm/mach-alpine/alpine_cpu_pm.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Machine declaration for Alpine platforms. * * Copyright (C) 2015 Annapurna Labs Ltd. */ #include <asm/mach/arch.h> static const char * const al_match[] __initconst = { "al,alpine", NULL, }; DT_MACHINE_START(AL_DT, "Annapurna Labs Alpine") .dt_compat = al_match, MACHINE_END
linux-master
arch/arm/mach-alpine/alpine_machine.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * SMP operations for Alpine platform. * * Copyright (C) 2015 Annapurna Labs Ltd. */ #include <linux/init.h> #include <linux/errno.h> #include <linux/io.h> #include <linux/of.h> #include <asm/smp_plat.h> #include "alpine_cpu_pm.h" static int alpine_boot_secondary(unsigned int cpu, struct task_struct *idle) { phys_addr_t addr; addr = __pa_symbol(secondary_startup); if (addr > (phys_addr_t)(uint32_t)(-1)) { pr_err("FAIL: resume address over 32bit (%pa)", &addr); return -EINVAL; } return alpine_cpu_wakeup(cpu_logical_map(cpu), (uint32_t)addr); } static void __init alpine_smp_prepare_cpus(unsigned int max_cpus) { alpine_cpu_pm_init(); } static const struct smp_operations alpine_smp_ops __initconst = { .smp_prepare_cpus = alpine_smp_prepare_cpus, .smp_boot_secondary = alpine_boot_secondary, }; CPU_METHOD_OF_DECLARE(alpine_smp, "al,alpine-smp", &alpine_smp_ops);
linux-master
arch/arm/mach-alpine/platsmp.c
// SPDX-License-Identifier: GPL-2.0-only /* * arch/arm/mach-dove/mpp.c * * MPP functions for Marvell Dove SoCs */ #include <linux/kernel.h> #include <linux/gpio.h> #include <linux/io.h> #include <plat/mpp.h> #include <plat/orion-gpio.h> #include "dove.h" #include "mpp.h" struct dove_mpp_grp { int start; int end; }; /* Map a group to a range of GPIO pins in that group */ static const struct dove_mpp_grp dove_mpp_grp[] = { [MPP_24_39] = { .start = 24, .end = 39, }, [MPP_40_45] = { .start = 40, .end = 45, }, [MPP_46_51] = { .start = 46, .end = 51, }, [MPP_58_61] = { .start = 58, .end = 61, }, [MPP_62_63] = { .start = 62, .end = 63, }, }; /* Enable gpio for a range of pins. mode should be a combination of GPIO_OUTPUT_OK | GPIO_INPUT_OK */ static void __init dove_mpp_gpio_mode(int start, int end, int gpio_mode) { int i; for (i = start; i <= end; i++) orion_gpio_set_valid(i, gpio_mode); } /* Dump all the extra MPP registers. The platform code will dump the registers for pins 0-23. */ static void __init dove_mpp_dump_regs(void) { pr_debug("PMU_CTRL4_CTRL: %08x\n", readl(DOVE_MPP_CTRL4_VIRT_BASE)); pr_debug("PMU_MPP_GENERAL_CTRL: %08x\n", readl(DOVE_PMU_MPP_GENERAL_CTRL)); pr_debug("MPP_GENERAL: %08x\n", readl(DOVE_MPP_GENERAL_VIRT_BASE)); } static void __init dove_mpp_cfg_nfc(int sel) { u32 mpp_gen_cfg = readl(DOVE_MPP_GENERAL_VIRT_BASE); mpp_gen_cfg &= ~0x1; mpp_gen_cfg |= sel; writel(mpp_gen_cfg, DOVE_MPP_GENERAL_VIRT_BASE); dove_mpp_gpio_mode(64, 71, GPIO_OUTPUT_OK); } static void __init dove_mpp_cfg_au1(int sel) { u32 mpp_ctrl4 = readl(DOVE_MPP_CTRL4_VIRT_BASE); u32 ssp_ctrl1 = readl(DOVE_SSP_CTRL_STATUS_1); u32 mpp_gen_ctrl = readl(DOVE_MPP_GENERAL_VIRT_BASE); u32 global_cfg_2 = readl(DOVE_GLOBAL_CONFIG_2); mpp_ctrl4 &= ~(DOVE_AU1_GPIO_SEL); ssp_ctrl1 &= ~(DOVE_SSP_ON_AU1); mpp_gen_ctrl &= ~(DOVE_AU1_SPDIFO_GPIO_EN); global_cfg_2 &= ~(DOVE_TWSI_OPTION3_GPIO); if (!sel || sel == 0x2) dove_mpp_gpio_mode(52, 57, 0); else dove_mpp_gpio_mode(52, 57, GPIO_OUTPUT_OK | GPIO_INPUT_OK); if (sel & 0x1) { global_cfg_2 |= DOVE_TWSI_OPTION3_GPIO; dove_mpp_gpio_mode(56, 57, 0); } if (sel & 0x2) { mpp_gen_ctrl |= DOVE_AU1_SPDIFO_GPIO_EN; dove_mpp_gpio_mode(57, 57, GPIO_OUTPUT_OK | GPIO_INPUT_OK); } if (sel & 0x4) { ssp_ctrl1 |= DOVE_SSP_ON_AU1; dove_mpp_gpio_mode(52, 55, 0); } if (sel & 0x8) mpp_ctrl4 |= DOVE_AU1_GPIO_SEL; writel(mpp_ctrl4, DOVE_MPP_CTRL4_VIRT_BASE); writel(ssp_ctrl1, DOVE_SSP_CTRL_STATUS_1); writel(mpp_gen_ctrl, DOVE_MPP_GENERAL_VIRT_BASE); writel(global_cfg_2, DOVE_GLOBAL_CONFIG_2); } /* Configure the group registers, enabling GPIO if sel indicates the pin is to be used for GPIO */ static void __init dove_mpp_conf_grp(unsigned int *mpp_grp_list) { u32 mpp_ctrl4 = readl(DOVE_MPP_CTRL4_VIRT_BASE); int gpio_mode; for ( ; *mpp_grp_list; mpp_grp_list++) { unsigned int num = MPP_NUM(*mpp_grp_list); unsigned int sel = MPP_SEL(*mpp_grp_list); if (num > MPP_GRP_MAX) { pr_err("dove: invalid MPP GRP number (%u)\n", num); continue; } mpp_ctrl4 &= ~(0x1 << num); mpp_ctrl4 |= sel << num; gpio_mode = sel ? GPIO_OUTPUT_OK | GPIO_INPUT_OK : 0; dove_mpp_gpio_mode(dove_mpp_grp[num].start, dove_mpp_grp[num].end, gpio_mode); } writel(mpp_ctrl4, DOVE_MPP_CTRL4_VIRT_BASE); } /* Configure the various MPP pins on Dove */ void __init dove_mpp_conf(unsigned int *mpp_list, unsigned int *mpp_grp_list, unsigned int grp_au1_52_57, unsigned int grp_nfc_64_71) { dove_mpp_dump_regs(); /* Use platform code for pins 0-23 */ orion_mpp_conf(mpp_list, 0, MPP_MAX, DOVE_MPP_VIRT_BASE); dove_mpp_conf_grp(mpp_grp_list); dove_mpp_cfg_au1(grp_au1_52_57); dove_mpp_cfg_nfc(grp_nfc_64_71); dove_mpp_dump_regs(); }
linux-master
arch/arm/mach-dove/mpp.c
// SPDX-License-Identifier: GPL-2.0-only /* * arch/arm/mach-dove/common.c * * Core functions for Marvell Dove 88AP510 System On Chip */ #include <linux/clk-provider.h> #include <linux/dma-mapping.h> #include <linux/init.h> #include <linux/io.h> #include <linux/platform_data/dma-mv_xor.h> #include <linux/platform_data/usb-ehci-orion.h> #include <linux/platform_device.h> #include <linux/soc/dove/pmu.h> #include <asm/hardware/cache-tauros2.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/time.h> #include <plat/common.h> #include <plat/irq.h> #include <plat/time.h> #include "bridge-regs.h" #include "pm.h" #include "common.h" /* These can go away once Dove uses the mvebu-mbus DT binding */ #define DOVE_MBUS_PCIE0_MEM_TARGET 0x4 #define DOVE_MBUS_PCIE0_MEM_ATTR 0xe8 #define DOVE_MBUS_PCIE0_IO_TARGET 0x4 #define DOVE_MBUS_PCIE0_IO_ATTR 0xe0 #define DOVE_MBUS_PCIE1_MEM_TARGET 0x8 #define DOVE_MBUS_PCIE1_MEM_ATTR 0xe8 #define DOVE_MBUS_PCIE1_IO_TARGET 0x8 #define DOVE_MBUS_PCIE1_IO_ATTR 0xe0 #define DOVE_MBUS_CESA_TARGET 0x3 #define DOVE_MBUS_CESA_ATTR 0x1 #define DOVE_MBUS_BOOTROM_TARGET 0x1 #define DOVE_MBUS_BOOTROM_ATTR 0xfd #define DOVE_MBUS_SCRATCHPAD_TARGET 0xd #define DOVE_MBUS_SCRATCHPAD_ATTR 0x0 /***************************************************************************** * I/O Address Mapping ****************************************************************************/ static struct map_desc __maybe_unused dove_io_desc[] __initdata = { { .virtual = (unsigned long) DOVE_SB_REGS_VIRT_BASE, .pfn = __phys_to_pfn(DOVE_SB_REGS_PHYS_BASE), .length = DOVE_SB_REGS_SIZE, .type = MT_DEVICE, }, { .virtual = (unsigned long) DOVE_NB_REGS_VIRT_BASE, .pfn = __phys_to_pfn(DOVE_NB_REGS_PHYS_BASE), .length = DOVE_NB_REGS_SIZE, .type = MT_DEVICE, }, }; void __init dove_map_io(void) { iotable_init(dove_io_desc, ARRAY_SIZE(dove_io_desc)); } /***************************************************************************** * CLK tree ****************************************************************************/ static int dove_tclk; static DEFINE_SPINLOCK(gating_lock); static struct clk *tclk; static struct clk __init *dove_register_gate(const char *name, const char *parent, u8 bit_idx) { return clk_register_gate(NULL, name, parent, 0, (void __iomem *)CLOCK_GATING_CONTROL, bit_idx, 0, &gating_lock); } static void __init dove_clk_init(void) { struct clk *usb0, *usb1, *sata, *pex0, *pex1, *sdio0, *sdio1; struct clk *nand, *camera, *i2s0, *i2s1, *crypto, *ac97, *pdma; struct clk *xor0, *xor1, *ge, *gephy; tclk = clk_register_fixed_rate(NULL, "tclk", NULL, 0, dove_tclk); usb0 = dove_register_gate("usb0", "tclk", CLOCK_GATING_BIT_USB0); usb1 = dove_register_gate("usb1", "tclk", CLOCK_GATING_BIT_USB1); sata = dove_register_gate("sata", "tclk", CLOCK_GATING_BIT_SATA); pex0 = dove_register_gate("pex0", "tclk", CLOCK_GATING_BIT_PCIE0); pex1 = dove_register_gate("pex1", "tclk", CLOCK_GATING_BIT_PCIE1); sdio0 = dove_register_gate("sdio0", "tclk", CLOCK_GATING_BIT_SDIO0); sdio1 = dove_register_gate("sdio1", "tclk", CLOCK_GATING_BIT_SDIO1); nand = dove_register_gate("nand", "tclk", CLOCK_GATING_BIT_NAND); camera = dove_register_gate("camera", "tclk", CLOCK_GATING_BIT_CAMERA); i2s0 = dove_register_gate("i2s0", "tclk", CLOCK_GATING_BIT_I2S0); i2s1 = dove_register_gate("i2s1", "tclk", CLOCK_GATING_BIT_I2S1); crypto = dove_register_gate("crypto", "tclk", CLOCK_GATING_BIT_CRYPTO); ac97 = dove_register_gate("ac97", "tclk", CLOCK_GATING_BIT_AC97); pdma = dove_register_gate("pdma", "tclk", CLOCK_GATING_BIT_PDMA); xor0 = dove_register_gate("xor0", "tclk", CLOCK_GATING_BIT_XOR0); xor1 = dove_register_gate("xor1", "tclk", CLOCK_GATING_BIT_XOR1); gephy = dove_register_gate("gephy", "tclk", CLOCK_GATING_BIT_GIGA_PHY); ge = dove_register_gate("ge", "gephy", CLOCK_GATING_BIT_GBE); orion_clkdev_add(NULL, "orion_spi.0", tclk); orion_clkdev_add(NULL, "orion_spi.1", tclk); orion_clkdev_add(NULL, "orion_wdt", tclk); orion_clkdev_add(NULL, "mv64xxx_i2c.0", tclk); orion_clkdev_add(NULL, "orion-ehci.0", usb0); orion_clkdev_add(NULL, "orion-ehci.1", usb1); orion_clkdev_add(NULL, "mv643xx_eth_port.0", ge); orion_clkdev_add(NULL, "sata_mv.0", sata); orion_clkdev_add("0", "pcie", pex0); orion_clkdev_add("1", "pcie", pex1); orion_clkdev_add(NULL, "sdhci-dove.0", sdio0); orion_clkdev_add(NULL, "sdhci-dove.1", sdio1); orion_clkdev_add(NULL, "orion_nand", nand); orion_clkdev_add(NULL, "cafe1000-ccic.0", camera); orion_clkdev_add(NULL, "mvebu-audio.0", i2s0); orion_clkdev_add(NULL, "mvebu-audio.1", i2s1); orion_clkdev_add(NULL, "mv_crypto", crypto); orion_clkdev_add(NULL, "dove-ac97", ac97); orion_clkdev_add(NULL, "dove-pdma", pdma); orion_clkdev_add(NULL, MV_XOR_NAME ".0", xor0); orion_clkdev_add(NULL, MV_XOR_NAME ".1", xor1); } /***************************************************************************** * EHCI0 ****************************************************************************/ void __init dove_ehci0_init(void) { orion_ehci_init(DOVE_USB0_PHYS_BASE, IRQ_DOVE_USB0, EHCI_PHY_NA); } /***************************************************************************** * EHCI1 ****************************************************************************/ void __init dove_ehci1_init(void) { orion_ehci_1_init(DOVE_USB1_PHYS_BASE, IRQ_DOVE_USB1); } /***************************************************************************** * GE00 ****************************************************************************/ void __init dove_ge00_init(struct mv643xx_eth_platform_data *eth_data) { orion_ge00_init(eth_data, DOVE_GE00_PHYS_BASE, IRQ_DOVE_GE00_SUM, IRQ_DOVE_GE00_ERR, 1600); } /***************************************************************************** * SoC RTC ****************************************************************************/ static void __init dove_rtc_init(void) { orion_rtc_init(DOVE_RTC_PHYS_BASE, IRQ_DOVE_RTC); } /***************************************************************************** * SATA ****************************************************************************/ void __init dove_sata_init(struct mv_sata_platform_data *sata_data) { orion_sata_init(sata_data, DOVE_SATA_PHYS_BASE, IRQ_DOVE_SATA); } /***************************************************************************** * UART0 ****************************************************************************/ void __init dove_uart0_init(void) { orion_uart0_init(DOVE_UART0_VIRT_BASE, DOVE_UART0_PHYS_BASE, IRQ_DOVE_UART_0, tclk); } /***************************************************************************** * UART1 ****************************************************************************/ void __init dove_uart1_init(void) { orion_uart1_init(DOVE_UART1_VIRT_BASE, DOVE_UART1_PHYS_BASE, IRQ_DOVE_UART_1, tclk); } /***************************************************************************** * UART2 ****************************************************************************/ void __init dove_uart2_init(void) { orion_uart2_init(DOVE_UART2_VIRT_BASE, DOVE_UART2_PHYS_BASE, IRQ_DOVE_UART_2, tclk); } /***************************************************************************** * UART3 ****************************************************************************/ void __init dove_uart3_init(void) { orion_uart3_init(DOVE_UART3_VIRT_BASE, DOVE_UART3_PHYS_BASE, IRQ_DOVE_UART_3, tclk); } /***************************************************************************** * SPI ****************************************************************************/ void __init dove_spi0_init(void) { orion_spi_init(DOVE_SPI0_PHYS_BASE); } void __init dove_spi1_init(void) { orion_spi_1_init(DOVE_SPI1_PHYS_BASE); } /***************************************************************************** * I2C ****************************************************************************/ void __init dove_i2c_init(void) { orion_i2c_init(DOVE_I2C_PHYS_BASE, IRQ_DOVE_I2C, 10); } /***************************************************************************** * Time handling ****************************************************************************/ void __init dove_init_early(void) { orion_time_set_base(TIMER_VIRT_BASE); mvebu_mbus_init("marvell,dove-mbus", BRIDGE_WINS_BASE, BRIDGE_WINS_SZ, DOVE_MC_WINS_BASE, DOVE_MC_WINS_SZ); } static int __init dove_find_tclk(void) { return 166666667; } void __init dove_timer_init(void) { dove_tclk = dove_find_tclk(); orion_time_init(BRIDGE_VIRT_BASE, BRIDGE_INT_TIMER1_CLR, IRQ_DOVE_BRIDGE, dove_tclk); } /***************************************************************************** * XOR 0 ****************************************************************************/ static void __init dove_xor0_init(void) { orion_xor0_init(DOVE_XOR0_PHYS_BASE, DOVE_XOR0_HIGH_PHYS_BASE, IRQ_DOVE_XOR_00, IRQ_DOVE_XOR_01); } /***************************************************************************** * XOR 1 ****************************************************************************/ static void __init dove_xor1_init(void) { orion_xor1_init(DOVE_XOR1_PHYS_BASE, DOVE_XOR1_HIGH_PHYS_BASE, IRQ_DOVE_XOR_10, IRQ_DOVE_XOR_11); } /***************************************************************************** * SDIO ****************************************************************************/ static u64 sdio_dmamask = DMA_BIT_MASK(32); static struct resource dove_sdio0_resources[] = { { .start = DOVE_SDIO0_PHYS_BASE, .end = DOVE_SDIO0_PHYS_BASE + 0xff, .flags = IORESOURCE_MEM, }, { .start = IRQ_DOVE_SDIO0, .end = IRQ_DOVE_SDIO0, .flags = IORESOURCE_IRQ, }, }; static struct platform_device dove_sdio0 = { .name = "sdhci-dove", .id = 0, .dev = { .dma_mask = &sdio_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .resource = dove_sdio0_resources, .num_resources = ARRAY_SIZE(dove_sdio0_resources), }; void __init dove_sdio0_init(void) { platform_device_register(&dove_sdio0); } static struct resource dove_sdio1_resources[] = { { .start = DOVE_SDIO1_PHYS_BASE, .end = DOVE_SDIO1_PHYS_BASE + 0xff, .flags = IORESOURCE_MEM, }, { .start = IRQ_DOVE_SDIO1, .end = IRQ_DOVE_SDIO1, .flags = IORESOURCE_IRQ, }, }; static struct platform_device dove_sdio1 = { .name = "sdhci-dove", .id = 1, .dev = { .dma_mask = &sdio_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .resource = dove_sdio1_resources, .num_resources = ARRAY_SIZE(dove_sdio1_resources), }; void __init dove_sdio1_init(void) { platform_device_register(&dove_sdio1); } void __init dove_setup_cpu_wins(void) { /* * The PCIe windows will no longer be statically allocated * here once Dove is migrated to the pci-mvebu driver. The * non-PCIe windows will no longer be created here once Dove * fully moves to DT. */ mvebu_mbus_add_window_remap_by_id(DOVE_MBUS_PCIE0_IO_TARGET, DOVE_MBUS_PCIE0_IO_ATTR, DOVE_PCIE0_IO_PHYS_BASE, DOVE_PCIE0_IO_SIZE, DOVE_PCIE0_IO_BUS_BASE); mvebu_mbus_add_window_remap_by_id(DOVE_MBUS_PCIE1_IO_TARGET, DOVE_MBUS_PCIE1_IO_ATTR, DOVE_PCIE1_IO_PHYS_BASE, DOVE_PCIE1_IO_SIZE, DOVE_PCIE1_IO_BUS_BASE); mvebu_mbus_add_window_by_id(DOVE_MBUS_PCIE0_MEM_TARGET, DOVE_MBUS_PCIE0_MEM_ATTR, DOVE_PCIE0_MEM_PHYS_BASE, DOVE_PCIE0_MEM_SIZE); mvebu_mbus_add_window_by_id(DOVE_MBUS_PCIE1_MEM_TARGET, DOVE_MBUS_PCIE1_MEM_ATTR, DOVE_PCIE1_MEM_PHYS_BASE, DOVE_PCIE1_MEM_SIZE); mvebu_mbus_add_window_by_id(DOVE_MBUS_CESA_TARGET, DOVE_MBUS_CESA_ATTR, DOVE_CESA_PHYS_BASE, DOVE_CESA_SIZE); mvebu_mbus_add_window_by_id(DOVE_MBUS_BOOTROM_TARGET, DOVE_MBUS_BOOTROM_ATTR, DOVE_BOOTROM_PHYS_BASE, DOVE_BOOTROM_SIZE); mvebu_mbus_add_window_by_id(DOVE_MBUS_SCRATCHPAD_TARGET, DOVE_MBUS_SCRATCHPAD_ATTR, DOVE_SCRATCHPAD_PHYS_BASE, DOVE_SCRATCHPAD_SIZE); } static struct resource orion_wdt_resource[] = { DEFINE_RES_MEM(TIMER_PHYS_BASE, 0x04), DEFINE_RES_MEM(RSTOUTn_MASK_PHYS, 0x04), }; static struct platform_device orion_wdt_device = { .name = "orion_wdt", .id = -1, .num_resources = ARRAY_SIZE(orion_wdt_resource), .resource = orion_wdt_resource, }; static void __init __maybe_unused orion_wdt_init(void) { platform_device_register(&orion_wdt_device); } static const struct dove_pmu_domain_initdata pmu_domains[] __initconst = { { .pwr_mask = PMU_PWR_VPU_PWR_DWN_MASK, .rst_mask = PMU_SW_RST_VIDEO_MASK, .iso_mask = PMU_ISO_VIDEO_MASK, .name = "vpu-domain", }, { .pwr_mask = PMU_PWR_GPU_PWR_DWN_MASK, .rst_mask = PMU_SW_RST_GPU_MASK, .iso_mask = PMU_ISO_GPU_MASK, .name = "gpu-domain", }, { /* sentinel */ }, }; static const struct dove_pmu_initdata pmu_data __initconst = { .pmc_base = DOVE_PMU_VIRT_BASE, .pmu_base = DOVE_PMU_VIRT_BASE + 0x8000, .irq = IRQ_DOVE_PMU, .irq_domain_start = IRQ_DOVE_PMU_START, .domains = pmu_domains, }; void __init dove_init(void) { pr_info("Dove 88AP510 SoC, TCLK = %d MHz.\n", (dove_tclk + 499999) / 1000000); #ifdef CONFIG_CACHE_TAUROS2 tauros2_init(0); #endif dove_setup_cpu_wins(); /* Setup root of clk tree */ dove_clk_init(); /* internal devices that every board has */ dove_init_pmu_legacy(&pmu_data); dove_rtc_init(); dove_xor0_init(); dove_xor1_init(); } void dove_restart(enum reboot_mode mode, const char *cmd) { /* * Enable soft reset to assert RSTOUTn. */ writel(SOFT_RESET_OUT_EN, RSTOUTn_MASK); /* * Assert soft reset. */ writel(SOFT_RESET, SYSTEM_SOFT_RESET); while (1) ; }
linux-master
arch/arm/mach-dove/common.c
// SPDX-License-Identifier: GPL-2.0-only /* * arch/arm/mach-dove/irq.c * * Dove IRQ handling. */ #include <linux/init.h> #include <linux/irq.h> #include <linux/io.h> #include <asm/exception.h> #include <plat/irq.h> #include <plat/orion-gpio.h> #include "pm.h" #include "bridge-regs.h" #include "common.h" static int __initdata gpio0_irqs[4] = { IRQ_DOVE_GPIO_0_7, IRQ_DOVE_GPIO_8_15, IRQ_DOVE_GPIO_16_23, IRQ_DOVE_GPIO_24_31, }; static int __initdata gpio1_irqs[4] = { IRQ_DOVE_HIGH_GPIO, 0, 0, 0, }; static int __initdata gpio2_irqs[4] = { 0, 0, 0, 0, }; static void __iomem *dove_irq_base = IRQ_VIRT_BASE; static asmlinkage void __exception_irq_entry dove_legacy_handle_irq(struct pt_regs *regs) { u32 stat; stat = readl_relaxed(dove_irq_base + IRQ_CAUSE_LOW_OFF); stat &= readl_relaxed(dove_irq_base + IRQ_MASK_LOW_OFF); if (stat) { unsigned int hwirq = 1 + __fls(stat); handle_IRQ(hwirq, regs); return; } stat = readl_relaxed(dove_irq_base + IRQ_CAUSE_HIGH_OFF); stat &= readl_relaxed(dove_irq_base + IRQ_MASK_HIGH_OFF); if (stat) { unsigned int hwirq = 33 + __fls(stat); handle_IRQ(hwirq, regs); return; } } void __init dove_init_irq(void) { orion_irq_init(1, IRQ_VIRT_BASE + IRQ_MASK_LOW_OFF); orion_irq_init(33, IRQ_VIRT_BASE + IRQ_MASK_HIGH_OFF); set_handle_irq(dove_legacy_handle_irq); /* * Initialize gpiolib for GPIOs 0-71. */ orion_gpio_init(0, 32, DOVE_GPIO_LO_VIRT_BASE, 0, IRQ_DOVE_GPIO_START, gpio0_irqs); orion_gpio_init(32, 32, DOVE_GPIO_HI_VIRT_BASE, 0, IRQ_DOVE_GPIO_START + 32, gpio1_irqs); orion_gpio_init(64, 8, DOVE_GPIO2_VIRT_BASE, 0, IRQ_DOVE_GPIO_START + 64, gpio2_irqs); }
linux-master
arch/arm/mach-dove/irq.c
// SPDX-License-Identifier: GPL-2.0-only /* * arch/arm/mach-dove/pcie.c * * PCIe functions for Marvell Dove 88AP510 SoC */ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/clk.h> #include <video/vga.h> #include <asm/mach/pci.h> #include <asm/mach/arch.h> #include <asm/setup.h> #include <asm/delay.h> #include <plat/pcie.h> #include <plat/addr-map.h> #include "irqs.h" #include "bridge-regs.h" #include "common.h" struct pcie_port { u8 index; u8 root_bus_nr; void __iomem *base; spinlock_t conf_lock; char mem_space_name[16]; struct resource res; }; static struct pcie_port pcie_port[2]; static int num_pcie_ports; static int __init dove_pcie_setup(int nr, struct pci_sys_data *sys) { struct pcie_port *pp; struct resource realio; if (nr >= num_pcie_ports) return 0; pp = &pcie_port[nr]; sys->private_data = pp; pp->root_bus_nr = sys->busnr; /* * Generic PCIe unit setup. */ orion_pcie_set_local_bus_nr(pp->base, sys->busnr); orion_pcie_setup(pp->base); realio.start = sys->busnr * SZ_64K; realio.end = realio.start + SZ_64K - 1; pci_remap_iospace(&realio, pp->index == 0 ? DOVE_PCIE0_IO_PHYS_BASE : DOVE_PCIE1_IO_PHYS_BASE); /* * IORESOURCE_MEM */ snprintf(pp->mem_space_name, sizeof(pp->mem_space_name), "PCIe %d MEM", pp->index); pp->mem_space_name[sizeof(pp->mem_space_name) - 1] = 0; pp->res.name = pp->mem_space_name; if (pp->index == 0) { pp->res.start = DOVE_PCIE0_MEM_PHYS_BASE; pp->res.end = pp->res.start + DOVE_PCIE0_MEM_SIZE - 1; } else { pp->res.start = DOVE_PCIE1_MEM_PHYS_BASE; pp->res.end = pp->res.start + DOVE_PCIE1_MEM_SIZE - 1; } pp->res.flags = IORESOURCE_MEM; if (request_resource(&iomem_resource, &pp->res)) panic("Request PCIe Memory resource failed\n"); pci_add_resource_offset(&sys->resources, &pp->res, sys->mem_offset); return 1; } static int pcie_valid_config(struct pcie_port *pp, int bus, int dev) { /* * Don't go out when trying to access nonexisting devices * on the local bus. */ if (bus == pp->root_bus_nr && dev > 1) return 0; return 1; } static int pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where, int size, u32 *val) { struct pci_sys_data *sys = bus->sysdata; struct pcie_port *pp = sys->private_data; unsigned long flags; int ret; if (pcie_valid_config(pp, bus->number, PCI_SLOT(devfn)) == 0) { *val = 0xffffffff; return PCIBIOS_DEVICE_NOT_FOUND; } spin_lock_irqsave(&pp->conf_lock, flags); ret = orion_pcie_rd_conf(pp->base, bus, devfn, where, size, val); spin_unlock_irqrestore(&pp->conf_lock, flags); return ret; } static int pcie_wr_conf(struct pci_bus *bus, u32 devfn, int where, int size, u32 val) { struct pci_sys_data *sys = bus->sysdata; struct pcie_port *pp = sys->private_data; unsigned long flags; int ret; if (pcie_valid_config(pp, bus->number, PCI_SLOT(devfn)) == 0) return PCIBIOS_DEVICE_NOT_FOUND; spin_lock_irqsave(&pp->conf_lock, flags); ret = orion_pcie_wr_conf(pp->base, bus, devfn, where, size, val); spin_unlock_irqrestore(&pp->conf_lock, flags); return ret; } static struct pci_ops pcie_ops = { .read = pcie_rd_conf, .write = pcie_wr_conf, }; /* * The root complex has a hardwired class of PCI_CLASS_MEMORY_OTHER, when it * is operating as a root complex this needs to be switched to * PCI_CLASS_BRIDGE_HOST or Linux will errantly try to process the BAR's on * the device. Decoding setup is handled by the orion code. */ static void rc_pci_fixup(struct pci_dev *dev) { if (dev->bus->parent == NULL && dev->devfn == 0) { struct resource *r; dev->class &= 0xff; dev->class |= PCI_CLASS_BRIDGE_HOST << 8; pci_dev_for_each_resource(dev, r) { r->start = 0; r->end = 0; r->flags = 0; } } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL, PCI_ANY_ID, rc_pci_fixup); static int __init dove_pcie_scan_bus(int nr, struct pci_host_bridge *bridge) { struct pci_sys_data *sys = pci_host_bridge_priv(bridge); if (nr >= num_pcie_ports) { BUG(); return -EINVAL; } list_splice_init(&sys->resources, &bridge->windows); bridge->dev.parent = NULL; bridge->sysdata = sys; bridge->busnr = sys->busnr; bridge->ops = &pcie_ops; return pci_scan_root_bus_bridge(bridge); } static int __init dove_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { struct pci_sys_data *sys = dev->sysdata; struct pcie_port *pp = sys->private_data; return pp->index ? IRQ_DOVE_PCIE1 : IRQ_DOVE_PCIE0; } static struct hw_pci dove_pci __initdata = { .nr_controllers = 2, .setup = dove_pcie_setup, .scan = dove_pcie_scan_bus, .map_irq = dove_pcie_map_irq, }; static void __init add_pcie_port(int index, void __iomem *base) { printk(KERN_INFO "Dove PCIe port %d: ", index); if (orion_pcie_link_up(base)) { struct pcie_port *pp = &pcie_port[num_pcie_ports++]; struct clk *clk = clk_get_sys("pcie", (index ? "1" : "0")); if (!IS_ERR(clk)) clk_prepare_enable(clk); printk(KERN_INFO "link up\n"); pp->index = index; pp->root_bus_nr = -1; pp->base = base; spin_lock_init(&pp->conf_lock); memset(&pp->res, 0, sizeof(pp->res)); } else { printk(KERN_INFO "link down, ignoring\n"); } } void __init dove_pcie_init(int init_port0, int init_port1) { vga_base = DOVE_PCIE0_MEM_PHYS_BASE; if (init_port0) add_pcie_port(0, DOVE_PCIE0_VIRT_BASE); if (init_port1) add_pcie_port(1, DOVE_PCIE1_VIRT_BASE); pci_common_init(&dove_pci); }
linux-master
arch/arm/mach-dove/pcie.c
// SPDX-License-Identifier: GPL-2.0-only /* * arch/arm/mach-dove/cm-a510.c * * Copyright (C) 2010 CompuLab, Ltd. * Konstantin Sinyuk <[email protected]> * * Based on Marvell DB-MV88AP510-BP Development Board Setup */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/ata_platform.h> #include <linux/mv643xx_eth.h> #include <linux/spi/spi.h> #include <linux/spi/flash.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include "dove.h" #include "common.h" static struct mv643xx_eth_platform_data cm_a510_ge00_data = { .phy_addr = MV643XX_ETH_PHY_ADDR_DEFAULT, }; static struct mv_sata_platform_data cm_a510_sata_data = { .n_ports = 1, }; /* * SPI Devices: * SPI0: 1M Flash Winbond w25q32bv */ static const struct flash_platform_data cm_a510_spi_flash_data = { .type = "w25q32bv", }; static struct spi_board_info __initdata cm_a510_spi_flash_info[] = { { .modalias = "m25p80", .platform_data = &cm_a510_spi_flash_data, .irq = -1, .max_speed_hz = 20000000, .bus_num = 0, .chip_select = 0, }, }; static int __init cm_a510_pci_init(void) { if (machine_is_cm_a510()) dove_pcie_init(1, 1); return 0; } subsys_initcall(cm_a510_pci_init); /* Board Init */ static void __init cm_a510_init(void) { /* * Basic Dove setup. Needs to be called early. */ dove_init(); dove_ge00_init(&cm_a510_ge00_data); dove_ehci0_init(); dove_ehci1_init(); dove_sata_init(&cm_a510_sata_data); dove_sdio0_init(); dove_sdio1_init(); dove_spi0_init(); dove_spi1_init(); dove_uart0_init(); dove_uart1_init(); dove_i2c_init(); spi_register_board_info(cm_a510_spi_flash_info, ARRAY_SIZE(cm_a510_spi_flash_info)); } MACHINE_START(CM_A510, "Compulab CM-A510 Board") .atag_offset = 0x100, .nr_irqs = DOVE_NR_IRQS, .init_machine = cm_a510_init, .map_io = dove_map_io, .init_early = dove_init_early, .init_irq = dove_init_irq, .init_time = dove_timer_init, .restart = dove_restart, MACHINE_END
linux-master
arch/arm/mach-dove/cm-a510.c
/* * Dynamic function tracing support. * * Copyright (C) 2008 Abhishek Sagar <[email protected]> * Copyright (C) 2010 Rabin Vincent <[email protected]> * * For licencing details, see COPYING. * * Defines low-level handling of mcount calls when the kernel * is compiled with the -pg flag. When using dynamic ftrace, the * mcount call-sites get patched with NOP till they are enabled. * All code mutation routines here are called under stop_machine(). */ #include <linux/ftrace.h> #include <linux/uaccess.h> #include <linux/module.h> #include <linux/stop_machine.h> #include <asm/cacheflush.h> #include <asm/opcodes.h> #include <asm/ftrace.h> #include <asm/insn.h> #include <asm/set_memory.h> #include <asm/stacktrace.h> #include <asm/patch.h> /* * The compiler emitted profiling hook consists of * * PUSH {LR} * BL __gnu_mcount_nc * * To turn this combined sequence into a NOP, we need to restore the value of * SP before the PUSH. Let's use an ADD rather than a POP into LR, as LR is not * modified anyway, and reloading LR from memory is highly likely to be less * efficient. */ #ifdef CONFIG_THUMB2_KERNEL #define NOP 0xf10d0d04 /* add.w sp, sp, #4 */ #else #define NOP 0xe28dd004 /* add sp, sp, #4 */ #endif #ifdef CONFIG_DYNAMIC_FTRACE static int __ftrace_modify_code(void *data) { int *command = data; ftrace_modify_all_code(*command); return 0; } void arch_ftrace_update_code(int command) { stop_machine(__ftrace_modify_code, &command, NULL); } static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec) { return NOP; } void ftrace_caller_from_init(void); void ftrace_regs_caller_from_init(void); static unsigned long __ref adjust_address(struct dyn_ftrace *rec, unsigned long addr) { if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE) || system_state >= SYSTEM_FREEING_INITMEM || likely(!is_kernel_inittext(rec->ip))) return addr; if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) || addr == (unsigned long)&ftrace_caller) return (unsigned long)&ftrace_caller_from_init; return (unsigned long)&ftrace_regs_caller_from_init; } void ftrace_arch_code_modify_prepare(void) { } void ftrace_arch_code_modify_post_process(void) { /* Make sure any TLB misses during machine stop are cleared. */ flush_tlb_all(); } static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr, bool warn) { return arm_gen_branch_link(pc, addr, warn); } static int ftrace_modify_code(unsigned long pc, unsigned long old, unsigned long new, bool validate) { unsigned long replaced; if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) old = __opcode_to_mem_thumb32(old); else old = __opcode_to_mem_arm(old); if (validate) { if (copy_from_kernel_nofault(&replaced, (void *)pc, MCOUNT_INSN_SIZE)) return -EFAULT; if (replaced != old) return -EINVAL; } __patch_text((void *)pc, new); return 0; } int ftrace_update_ftrace_func(ftrace_func_t func) { unsigned long pc; unsigned long new; int ret; pc = (unsigned long)&ftrace_call; new = ftrace_call_replace(pc, (unsigned long)func, true); ret = ftrace_modify_code(pc, 0, new, false); #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS if (!ret) { pc = (unsigned long)&ftrace_regs_call; new = ftrace_call_replace(pc, (unsigned long)func, true); ret = ftrace_modify_code(pc, 0, new, false); } #endif return ret; } int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) { unsigned long new, old; unsigned long ip = rec->ip; unsigned long aaddr = adjust_address(rec, addr); struct module *mod = NULL; #ifdef CONFIG_ARM_MODULE_PLTS mod = rec->arch.mod; #endif old = ftrace_nop_replace(rec); new = ftrace_call_replace(ip, aaddr, !mod); #ifdef CONFIG_ARM_MODULE_PLTS if (!new && mod) { aaddr = get_module_plt(mod, ip, aaddr); new = ftrace_call_replace(ip, aaddr, true); } #endif return ftrace_modify_code(rec->ip, old, new, true); } #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, unsigned long addr) { unsigned long new, old; unsigned long ip = rec->ip; old = ftrace_call_replace(ip, adjust_address(rec, old_addr), true); new = ftrace_call_replace(ip, adjust_address(rec, addr), true); return ftrace_modify_code(rec->ip, old, new, true); } #endif int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr) { unsigned long aaddr = adjust_address(rec, addr); unsigned long ip = rec->ip; unsigned long old; unsigned long new; int ret; #ifdef CONFIG_ARM_MODULE_PLTS /* mod is only supplied during module loading */ if (!mod) mod = rec->arch.mod; else rec->arch.mod = mod; #endif old = ftrace_call_replace(ip, aaddr, !IS_ENABLED(CONFIG_ARM_MODULE_PLTS) || !mod); #ifdef CONFIG_ARM_MODULE_PLTS if (!old && mod) { aaddr = get_module_plt(mod, ip, aaddr); old = ftrace_call_replace(ip, aaddr, true); } #endif new = ftrace_nop_replace(rec); /* * Locations in .init.text may call __gnu_mcount_mc via a linker * emitted veneer if they are too far away from its implementation, and * so validation may fail spuriously in such cases. Let's work around * this by omitting those from validation. */ ret = ftrace_modify_code(ip, old, new, !is_kernel_inittext(ip)); return ret; } #endif /* CONFIG_DYNAMIC_FTRACE */ #ifdef CONFIG_FUNCTION_GRAPH_TRACER asmlinkage void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, unsigned long frame_pointer, unsigned long stack_pointer) { unsigned long return_hooker = (unsigned long) &return_to_handler; unsigned long old; if (unlikely(atomic_read(&current->tracing_graph_pause))) return; if (IS_ENABLED(CONFIG_UNWINDER_FRAME_POINTER)) { /* FP points one word below parent's top of stack */ frame_pointer += 4; } else { struct stackframe frame = { .fp = frame_pointer, .sp = stack_pointer, .lr = self_addr, .pc = self_addr, }; if (unwind_frame(&frame) < 0) return; if (frame.lr != self_addr) parent = frame.lr_addr; frame_pointer = frame.sp; } old = *parent; *parent = return_hooker; if (function_graph_enter(old, self_addr, frame_pointer, NULL)) *parent = old; } #ifdef CONFIG_DYNAMIC_FTRACE extern unsigned long ftrace_graph_call; extern unsigned long ftrace_graph_call_old; extern void ftrace_graph_caller_old(void); extern unsigned long ftrace_graph_regs_call; extern void ftrace_graph_regs_caller(void); static int __ftrace_modify_caller(unsigned long *callsite, void (*func) (void), bool enable) { unsigned long caller_fn = (unsigned long) func; unsigned long pc = (unsigned long) callsite; unsigned long branch = arm_gen_branch(pc, caller_fn); unsigned long nop = arm_gen_nop(); unsigned long old = enable ? nop : branch; unsigned long new = enable ? branch : nop; return ftrace_modify_code(pc, old, new, true); } static int ftrace_modify_graph_caller(bool enable) { int ret; ret = __ftrace_modify_caller(&ftrace_graph_call, ftrace_graph_caller, enable); #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS if (!ret) ret = __ftrace_modify_caller(&ftrace_graph_regs_call, ftrace_graph_regs_caller, enable); #endif return ret; } int ftrace_enable_ftrace_graph_caller(void) { return ftrace_modify_graph_caller(true); } int ftrace_disable_ftrace_graph_caller(void) { return ftrace_modify_graph_caller(false); } #endif /* CONFIG_DYNAMIC_FTRACE */ #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
linux-master
arch/arm/kernel/ftrace.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/arch/arm/kernel/devtree.c * * Copyright (C) 2009 Canonical Ltd. <[email protected]> */ #include <linux/init.h> #include <linux/export.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/memblock.h> #include <linux/of.h> #include <linux/of_fdt.h> #include <linux/of_irq.h> #include <linux/of_platform.h> #include <linux/smp.h> #include <asm/cputype.h> #include <asm/setup.h> #include <asm/page.h> #include <asm/prom.h> #include <asm/smp_plat.h> #include <asm/mach/arch.h> #include <asm/mach-types.h> #ifdef CONFIG_SMP extern struct of_cpu_method __cpu_method_of_table[]; static const struct of_cpu_method __cpu_method_of_table_sentinel __used __section("__cpu_method_of_table_end"); static int __init set_smp_ops_by_method(struct device_node *node) { const char *method; struct of_cpu_method *m = __cpu_method_of_table; if (of_property_read_string(node, "enable-method", &method)) return 0; for (; m->method; m++) if (!strcmp(m->method, method)) { smp_set_ops(m->ops); return 1; } return 0; } #else static inline int set_smp_ops_by_method(struct device_node *node) { return 1; } #endif /* * arm_dt_init_cpu_maps - Function retrieves cpu nodes from the device tree * and builds the cpu logical map array containing MPIDR values related to * logical cpus * * Updates the cpu possible mask with the number of parsed cpu nodes */ void __init arm_dt_init_cpu_maps(void) { /* * Temp logical map is initialized with UINT_MAX values that are * considered invalid logical map entries since the logical map must * contain a list of MPIDR[23:0] values where MPIDR[31:24] must * read as 0. */ struct device_node *cpu, *cpus; int found_method = 0; u32 i, j, cpuidx = 1; u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0; u32 tmp_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID }; bool bootcpu_valid = false; cpus = of_find_node_by_path("/cpus"); if (!cpus) return; for_each_of_cpu_node(cpu) { u32 hwid = of_get_cpu_hwid(cpu, 0); pr_debug(" * %pOF...\n", cpu); /* * Bits n:24 must be set to 0 in the DT since the reg property * defines the MPIDR[23:0]. */ if (hwid & ~MPIDR_HWID_BITMASK) { of_node_put(cpu); return; } /* * Duplicate MPIDRs are a recipe for disaster. * Scan all initialized entries and check for * duplicates. If any is found just bail out. * temp values were initialized to UINT_MAX * to avoid matching valid MPIDR[23:0] values. */ for (j = 0; j < cpuidx; j++) if (WARN(tmp_map[j] == hwid, "Duplicate /cpu reg properties in the DT\n")) { of_node_put(cpu); return; } /* * Build a stashed array of MPIDR values. Numbering scheme * requires that if detected the boot CPU must be assigned * logical id 0. Other CPUs get sequential indexes starting * from 1. If a CPU node with a reg property matching the * boot CPU MPIDR is detected, this is recorded so that the * logical map built from DT is validated and can be used * to override the map created in smp_setup_processor_id(). */ if (hwid == mpidr) { i = 0; bootcpu_valid = true; } else { i = cpuidx++; } if (WARN(cpuidx > nr_cpu_ids, "DT /cpu %u nodes greater than " "max cores %u, capping them\n", cpuidx, nr_cpu_ids)) { cpuidx = nr_cpu_ids; of_node_put(cpu); break; } tmp_map[i] = hwid; if (!found_method) found_method = set_smp_ops_by_method(cpu); } /* * Fallback to an enable-method in the cpus node if nothing found in * a cpu node. */ if (!found_method) set_smp_ops_by_method(cpus); if (!bootcpu_valid) { pr_warn("DT missing boot CPU MPIDR[23:0], fall back to default cpu_logical_map\n"); return; } /* * Since the boot CPU node contains proper data, and all nodes have * a reg property, the DT CPU list can be considered valid and the * logical map created in smp_setup_processor_id() can be overridden */ for (i = 0; i < cpuidx; i++) { set_cpu_possible(i, true); cpu_logical_map(i) = tmp_map[i]; pr_debug("cpu logical map 0x%x\n", cpu_logical_map(i)); } } bool arch_match_cpu_phys_id(int cpu, u64 phys_id) { return phys_id == cpu_logical_map(cpu); } static const void * __init arch_get_next_mach(const char *const **match) { static const struct machine_desc *mdesc = __arch_info_begin; const struct machine_desc *m = mdesc; if (m >= __arch_info_end) return NULL; mdesc++; *match = m->dt_compat; return m; } /** * setup_machine_fdt - Machine setup when an dtb was passed to the kernel * @dt_virt: virtual address of dt blob * * If a dtb was passed to the kernel in r2, then use it to choose the * correct machine_desc and to setup the system. */ const struct machine_desc * __init setup_machine_fdt(void *dt_virt) { const struct machine_desc *mdesc, *mdesc_best = NULL; DT_MACHINE_START(GENERIC_DT, "Generic DT based system") .l2c_aux_val = 0x0, .l2c_aux_mask = ~0x0, MACHINE_END mdesc_best = &__mach_desc_GENERIC_DT; if (!dt_virt || !early_init_dt_verify(dt_virt)) return NULL; mdesc = of_flat_dt_match_machine(mdesc_best, arch_get_next_mach); if (!mdesc) { const char *prop; int size; unsigned long dt_root; early_print("\nError: unrecognized/unsupported " "device tree compatible list:\n[ "); dt_root = of_get_flat_dt_root(); prop = of_get_flat_dt_prop(dt_root, "compatible", &size); while (size > 0) { early_print("'%s' ", prop); size -= strlen(prop) + 1; prop += strlen(prop) + 1; } early_print("]\n\n"); dump_machine_table(); /* does not return */ } /* We really don't want to do this, but sometimes firmware provides buggy data */ if (mdesc->dt_fixup) mdesc->dt_fixup(); early_init_dt_scan_nodes(); /* Change machine number to match the mdesc we're using */ __machine_arch_type = mdesc->nr; return mdesc; }
linux-master
arch/arm/kernel/devtree.c
// SPDX-License-Identifier: GPL-2.0-only /* * * Copyright (C) 2012 ARM Limited * * Author: Will Deacon <[email protected]> */ #include <linux/init.h> #include <linux/smp.h> #include <linux/of.h> #include <linux/delay.h> #include <linux/psci.h> #include <uapi/linux/psci.h> #include <asm/psci.h> #include <asm/smp_plat.h> /* * psci_smp assumes that the following is true about PSCI: * * cpu_suspend Suspend the execution on a CPU * @state we don't currently describe affinity levels, so just pass 0. * @entry_point the first instruction to be executed on return * returns 0 success, < 0 on failure * * cpu_off Power down a CPU * @state we don't currently describe affinity levels, so just pass 0. * no return on successful call * * cpu_on Power up a CPU * @cpuid cpuid of target CPU, as from MPIDR * @entry_point the first instruction to be executed on return * returns 0 success, < 0 on failure * * migrate Migrate the context to a different CPU * @cpuid cpuid of target CPU, as from MPIDR * returns 0 success, < 0 on failure * */ extern void secondary_startup(void); static int psci_boot_secondary(unsigned int cpu, struct task_struct *idle) { if (psci_ops.cpu_on) return psci_ops.cpu_on(cpu_logical_map(cpu), virt_to_idmap(&secondary_startup)); return -ENODEV; } #ifdef CONFIG_HOTPLUG_CPU static int psci_cpu_disable(unsigned int cpu) { /* Fail early if we don't have CPU_OFF support */ if (!psci_ops.cpu_off) return -EOPNOTSUPP; /* Trusted OS will deny CPU_OFF */ if (psci_tos_resident_on(cpu)) return -EPERM; return 0; } static void psci_cpu_die(unsigned int cpu) { u32 state = PSCI_POWER_STATE_TYPE_POWER_DOWN << PSCI_0_2_POWER_STATE_TYPE_SHIFT; if (psci_ops.cpu_off) psci_ops.cpu_off(state); /* We should never return */ panic("psci: cpu %d failed to shutdown\n", cpu); } static int psci_cpu_kill(unsigned int cpu) { int err, i; if (!psci_ops.affinity_info) return 1; /* * cpu_kill could race with cpu_die and we can * potentially end up declaring this cpu undead * while it is dying. So, try again a few times. */ for (i = 0; i < 10; i++) { err = psci_ops.affinity_info(cpu_logical_map(cpu), 0); if (err == PSCI_0_2_AFFINITY_LEVEL_OFF) { pr_info("CPU%d killed.\n", cpu); return 1; } msleep(10); pr_info("Retrying again to check for CPU kill\n"); } pr_warn("CPU%d may not have shut down cleanly (AFFINITY_INFO reports %d)\n", cpu, err); /* Make platform_cpu_kill() fail. */ return 0; } #endif bool __init psci_smp_available(void) { /* is cpu_on available at least? */ return (psci_ops.cpu_on != NULL); } const struct smp_operations psci_smp_ops __initconst = { .smp_boot_secondary = psci_boot_secondary, #ifdef CONFIG_HOTPLUG_CPU .cpu_disable = psci_cpu_disable, .cpu_die = psci_cpu_die, .cpu_kill = psci_cpu_kill, #endif };
linux-master
arch/arm/kernel/psci_smp.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/arch/arm/kernel/pj4-cp0.c * * PJ4 iWMMXt coprocessor context switching and handling * * Copyright (c) 2010 Marvell International Inc. */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/init.h> #include <linux/io.h> #include <asm/thread_notify.h> #include <asm/cputype.h> static int iwmmxt_do(struct notifier_block *self, unsigned long cmd, void *t) { struct thread_info *thread = t; switch (cmd) { case THREAD_NOTIFY_FLUSH: /* * flush_thread() zeroes thread->fpstate, so no need * to do anything here. * * FALLTHROUGH: Ensure we don't try to overwrite our newly * initialised state information on the first fault. */ case THREAD_NOTIFY_EXIT: iwmmxt_task_release(thread); break; case THREAD_NOTIFY_SWITCH: iwmmxt_task_switch(thread); break; } return NOTIFY_DONE; } static struct notifier_block __maybe_unused iwmmxt_notifier_block = { .notifier_call = iwmmxt_do, }; static u32 __init pj4_cp_access_read(void) { u32 value; __asm__ __volatile__ ( "mrc p15, 0, %0, c1, c0, 2\n\t" : "=r" (value)); return value; } static void __init pj4_cp_access_write(u32 value) { u32 temp; __asm__ __volatile__ ( "mcr p15, 0, %1, c1, c0, 2\n\t" #ifdef CONFIG_THUMB2_KERNEL "isb\n\t" #else "mrc p15, 0, %0, c1, c0, 2\n\t" "mov %0, %0\n\t" "sub pc, pc, #4\n\t" #endif : "=r" (temp) : "r" (value)); } static int __init pj4_get_iwmmxt_version(void) { u32 cp_access, wcid; cp_access = pj4_cp_access_read(); pj4_cp_access_write(cp_access | 0xf); /* check if coprocessor 0 and 1 are available */ if ((pj4_cp_access_read() & 0xf) != 0xf) { pj4_cp_access_write(cp_access); return -ENODEV; } /* read iWMMXt coprocessor id register p1, c0 */ __asm__ __volatile__ ("mrc p1, 0, %0, c0, c0, 0\n" : "=r" (wcid)); pj4_cp_access_write(cp_access); /* iWMMXt v1 */ if ((wcid & 0xffffff00) == 0x56051000) return 1; /* iWMMXt v2 */ if ((wcid & 0xffffff00) == 0x56052000) return 2; return -EINVAL; } /* * Disable CP0/CP1 on boot, and let call_fpe() and the iWMMXt lazy * switch code handle iWMMXt context switching. */ static int __init pj4_cp0_init(void) { u32 __maybe_unused cp_access; int vers; if (!cpu_is_pj4()) return 0; vers = pj4_get_iwmmxt_version(); if (vers < 0) return 0; #ifndef CONFIG_IWMMXT pr_info("PJ4 iWMMXt coprocessor detected, but kernel support is missing.\n"); #else cp_access = pj4_cp_access_read() & ~0xf; pj4_cp_access_write(cp_access); pr_info("PJ4 iWMMXt v%d coprocessor enabled.\n", vers); elf_hwcap |= HWCAP_IWMMXT; thread_register_notifier(&iwmmxt_notifier_block); register_iwmmxt_undef_handler(); #endif return 0; } late_initcall(pj4_cp0_init);
linux-master
arch/arm/kernel/pj4-cp0.c
// SPDX-License-Identifier: GPL-2.0 /* * ARMv6 Performance counter handling code. * * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles * * ARMv6 has 2 configurable performance counters and a single cycle counter. * They all share a single reset bit but can be written to zero so we can use * that for a reset. * * The counters can't be individually enabled or disabled so when we remove * one event and replace it with another we could get spurious counts from the * wrong event. However, we can take advantage of the fact that the * performance counters can export events to the event bus, and the event bus * itself can be monitored. This requires that we *don't* export the events to * the event bus. The procedure for disabling a configurable counter is: * - change the counter to count the ETMEXTOUT[0] signal (0x20). This * effectively stops the counter from counting. * - disable the counter's interrupt generation (each counter has it's * own interrupt enable bit). * Once stopped, the counter value can be written as 0 to reset. * * To enable a counter: * - enable the counter's interrupt generation. * - set the new event type. * * Note: the dedicated cycle counter only counts cycles and can't be * enabled/disabled independently of the others. When we want to disable the * cycle counter, we have to just disable the interrupt reporting and start * ignoring that counter. When re-enabling, we have to reset the value and * enable the interrupt. */ #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) #include <asm/cputype.h> #include <asm/irq_regs.h> #include <linux/of.h> #include <linux/perf/arm_pmu.h> #include <linux/platform_device.h> enum armv6_perf_types { ARMV6_PERFCTR_ICACHE_MISS = 0x0, ARMV6_PERFCTR_IBUF_STALL = 0x1, ARMV6_PERFCTR_DDEP_STALL = 0x2, ARMV6_PERFCTR_ITLB_MISS = 0x3, ARMV6_PERFCTR_DTLB_MISS = 0x4, ARMV6_PERFCTR_BR_EXEC = 0x5, ARMV6_PERFCTR_BR_MISPREDICT = 0x6, ARMV6_PERFCTR_INSTR_EXEC = 0x7, ARMV6_PERFCTR_DCACHE_HIT = 0x9, ARMV6_PERFCTR_DCACHE_ACCESS = 0xA, ARMV6_PERFCTR_DCACHE_MISS = 0xB, ARMV6_PERFCTR_DCACHE_WBACK = 0xC, ARMV6_PERFCTR_SW_PC_CHANGE = 0xD, ARMV6_PERFCTR_MAIN_TLB_MISS = 0xF, ARMV6_PERFCTR_EXPL_D_ACCESS = 0x10, ARMV6_PERFCTR_LSU_FULL_STALL = 0x11, ARMV6_PERFCTR_WBUF_DRAINED = 0x12, ARMV6_PERFCTR_CPU_CYCLES = 0xFF, ARMV6_PERFCTR_NOP = 0x20, }; enum armv6_counters { ARMV6_CYCLE_COUNTER = 0, ARMV6_COUNTER0, ARMV6_COUNTER1, }; /* * The hardware events that we support. We do support cache operations but * we have harvard caches and no way to combine instruction and data * accesses/misses in hardware. */ static const unsigned armv6_perf_map[PERF_COUNT_HW_MAX] = { PERF_MAP_ALL_UNSUPPORTED, [PERF_COUNT_HW_CPU_CYCLES] = ARMV6_PERFCTR_CPU_CYCLES, [PERF_COUNT_HW_INSTRUCTIONS] = ARMV6_PERFCTR_INSTR_EXEC, [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6_PERFCTR_BR_EXEC, [PERF_COUNT_HW_BRANCH_MISSES] = ARMV6_PERFCTR_BR_MISPREDICT, [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV6_PERFCTR_IBUF_STALL, [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV6_PERFCTR_LSU_FULL_STALL, }; static const unsigned armv6_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_OP_MAX] [PERF_COUNT_HW_CACHE_RESULT_MAX] = { PERF_CACHE_MAP_ALL_UNSUPPORTED, /* * The performance counters don't differentiate between read and write * accesses/misses so this isn't strictly correct, but it's the best we * can do. Writes and reads get combined. */ [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV6_PERFCTR_DCACHE_ACCESS, [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV6_PERFCTR_DCACHE_MISS, [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV6_PERFCTR_DCACHE_ACCESS, [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV6_PERFCTR_DCACHE_MISS, [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV6_PERFCTR_ICACHE_MISS, /* * The ARM performance counters can count micro DTLB misses, micro ITLB * misses and main TLB misses. There isn't an event for TLB misses, so * use the micro misses here and if users want the main TLB misses they * can use a raw counter. */ [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV6_PERFCTR_DTLB_MISS, [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV6_PERFCTR_DTLB_MISS, [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV6_PERFCTR_ITLB_MISS, [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV6_PERFCTR_ITLB_MISS, }; enum armv6mpcore_perf_types { ARMV6MPCORE_PERFCTR_ICACHE_MISS = 0x0, ARMV6MPCORE_PERFCTR_IBUF_STALL = 0x1, ARMV6MPCORE_PERFCTR_DDEP_STALL = 0x2, ARMV6MPCORE_PERFCTR_ITLB_MISS = 0x3, ARMV6MPCORE_PERFCTR_DTLB_MISS = 0x4, ARMV6MPCORE_PERFCTR_BR_EXEC = 0x5, ARMV6MPCORE_PERFCTR_BR_NOTPREDICT = 0x6, ARMV6MPCORE_PERFCTR_BR_MISPREDICT = 0x7, ARMV6MPCORE_PERFCTR_INSTR_EXEC = 0x8, ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS = 0xA, ARMV6MPCORE_PERFCTR_DCACHE_RDMISS = 0xB, ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS = 0xC, ARMV6MPCORE_PERFCTR_DCACHE_WRMISS = 0xD, ARMV6MPCORE_PERFCTR_DCACHE_EVICTION = 0xE, ARMV6MPCORE_PERFCTR_SW_PC_CHANGE = 0xF, ARMV6MPCORE_PERFCTR_MAIN_TLB_MISS = 0x10, ARMV6MPCORE_PERFCTR_EXPL_MEM_ACCESS = 0x11, ARMV6MPCORE_PERFCTR_LSU_FULL_STALL = 0x12, ARMV6MPCORE_PERFCTR_WBUF_DRAINED = 0x13, ARMV6MPCORE_PERFCTR_CPU_CYCLES = 0xFF, }; /* * The hardware events that we support. We do support cache operations but * we have harvard caches and no way to combine instruction and data * accesses/misses in hardware. */ static const unsigned armv6mpcore_perf_map[PERF_COUNT_HW_MAX] = { PERF_MAP_ALL_UNSUPPORTED, [PERF_COUNT_HW_CPU_CYCLES] = ARMV6MPCORE_PERFCTR_CPU_CYCLES, [PERF_COUNT_HW_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_INSTR_EXEC, [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_BR_EXEC, [PERF_COUNT_HW_BRANCH_MISSES] = ARMV6MPCORE_PERFCTR_BR_MISPREDICT, [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV6MPCORE_PERFCTR_IBUF_STALL, [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV6MPCORE_PERFCTR_LSU_FULL_STALL, }; static const unsigned armv6mpcore_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_OP_MAX] [PERF_COUNT_HW_CACHE_RESULT_MAX] = { PERF_CACHE_MAP_ALL_UNSUPPORTED, [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS, [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_DCACHE_RDMISS, [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS, [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_DCACHE_WRMISS, [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ICACHE_MISS, /* * The ARM performance counters can count micro DTLB misses, micro ITLB * misses and main TLB misses. There isn't an event for TLB misses, so * use the micro misses here and if users want the main TLB misses they * can use a raw counter. */ [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_DTLB_MISS, [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_DTLB_MISS, [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ITLB_MISS, [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ITLB_MISS, }; static inline unsigned long armv6_pmcr_read(void) { u32 val; asm volatile("mrc p15, 0, %0, c15, c12, 0" : "=r"(val)); return val; } static inline void armv6_pmcr_write(unsigned long val) { asm volatile("mcr p15, 0, %0, c15, c12, 0" : : "r"(val)); } #define ARMV6_PMCR_ENABLE (1 << 0) #define ARMV6_PMCR_CTR01_RESET (1 << 1) #define ARMV6_PMCR_CCOUNT_RESET (1 << 2) #define ARMV6_PMCR_CCOUNT_DIV (1 << 3) #define ARMV6_PMCR_COUNT0_IEN (1 << 4) #define ARMV6_PMCR_COUNT1_IEN (1 << 5) #define ARMV6_PMCR_CCOUNT_IEN (1 << 6) #define ARMV6_PMCR_COUNT0_OVERFLOW (1 << 8) #define ARMV6_PMCR_COUNT1_OVERFLOW (1 << 9) #define ARMV6_PMCR_CCOUNT_OVERFLOW (1 << 10) #define ARMV6_PMCR_EVT_COUNT0_SHIFT 20 #define ARMV6_PMCR_EVT_COUNT0_MASK (0xFF << ARMV6_PMCR_EVT_COUNT0_SHIFT) #define ARMV6_PMCR_EVT_COUNT1_SHIFT 12 #define ARMV6_PMCR_EVT_COUNT1_MASK (0xFF << ARMV6_PMCR_EVT_COUNT1_SHIFT) #define ARMV6_PMCR_OVERFLOWED_MASK \ (ARMV6_PMCR_COUNT0_OVERFLOW | ARMV6_PMCR_COUNT1_OVERFLOW | \ ARMV6_PMCR_CCOUNT_OVERFLOW) static inline int armv6_pmcr_has_overflowed(unsigned long pmcr) { return pmcr & ARMV6_PMCR_OVERFLOWED_MASK; } static inline int armv6_pmcr_counter_has_overflowed(unsigned long pmcr, enum armv6_counters counter) { int ret = 0; if (ARMV6_CYCLE_COUNTER == counter) ret = pmcr & ARMV6_PMCR_CCOUNT_OVERFLOW; else if (ARMV6_COUNTER0 == counter) ret = pmcr & ARMV6_PMCR_COUNT0_OVERFLOW; else if (ARMV6_COUNTER1 == counter) ret = pmcr & ARMV6_PMCR_COUNT1_OVERFLOW; else WARN_ONCE(1, "invalid counter number (%d)\n", counter); return ret; } static inline u64 armv6pmu_read_counter(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; int counter = hwc->idx; unsigned long value = 0; if (ARMV6_CYCLE_COUNTER == counter) asm volatile("mrc p15, 0, %0, c15, c12, 1" : "=r"(value)); else if (ARMV6_COUNTER0 == counter) asm volatile("mrc p15, 0, %0, c15, c12, 2" : "=r"(value)); else if (ARMV6_COUNTER1 == counter) asm volatile("mrc p15, 0, %0, c15, c12, 3" : "=r"(value)); else WARN_ONCE(1, "invalid counter number (%d)\n", counter); return value; } static inline void armv6pmu_write_counter(struct perf_event *event, u64 value) { struct hw_perf_event *hwc = &event->hw; int counter = hwc->idx; if (ARMV6_CYCLE_COUNTER == counter) asm volatile("mcr p15, 0, %0, c15, c12, 1" : : "r"(value)); else if (ARMV6_COUNTER0 == counter) asm volatile("mcr p15, 0, %0, c15, c12, 2" : : "r"(value)); else if (ARMV6_COUNTER1 == counter) asm volatile("mcr p15, 0, %0, c15, c12, 3" : : "r"(value)); else WARN_ONCE(1, "invalid counter number (%d)\n", counter); } static void armv6pmu_enable_event(struct perf_event *event) { unsigned long val, mask, evt, flags; struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); struct hw_perf_event *hwc = &event->hw; struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); int idx = hwc->idx; if (ARMV6_CYCLE_COUNTER == idx) { mask = 0; evt = ARMV6_PMCR_CCOUNT_IEN; } else if (ARMV6_COUNTER0 == idx) { mask = ARMV6_PMCR_EVT_COUNT0_MASK; evt = (hwc->config_base << ARMV6_PMCR_EVT_COUNT0_SHIFT) | ARMV6_PMCR_COUNT0_IEN; } else if (ARMV6_COUNTER1 == idx) { mask = ARMV6_PMCR_EVT_COUNT1_MASK; evt = (hwc->config_base << ARMV6_PMCR_EVT_COUNT1_SHIFT) | ARMV6_PMCR_COUNT1_IEN; } else { WARN_ONCE(1, "invalid counter number (%d)\n", idx); return; } /* * Mask out the current event and set the counter to count the event * that we're interested in. */ raw_spin_lock_irqsave(&events->pmu_lock, flags); val = armv6_pmcr_read(); val &= ~mask; val |= evt; armv6_pmcr_write(val); raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static irqreturn_t armv6pmu_handle_irq(struct arm_pmu *cpu_pmu) { unsigned long pmcr = armv6_pmcr_read(); struct perf_sample_data data; struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events); struct pt_regs *regs; int idx; if (!armv6_pmcr_has_overflowed(pmcr)) return IRQ_NONE; regs = get_irq_regs(); /* * The interrupts are cleared by writing the overflow flags back to * the control register. All of the other bits don't have any effect * if they are rewritten, so write the whole value back. */ armv6_pmcr_write(pmcr); for (idx = 0; idx < cpu_pmu->num_events; ++idx) { struct perf_event *event = cpuc->events[idx]; struct hw_perf_event *hwc; /* Ignore if we don't have an event. */ if (!event) continue; /* * We have a single interrupt for all counters. Check that * each counter has overflowed before we process it. */ if (!armv6_pmcr_counter_has_overflowed(pmcr, idx)) continue; hwc = &event->hw; armpmu_event_update(event); perf_sample_data_init(&data, 0, hwc->last_period); if (!armpmu_event_set_period(event)) continue; if (perf_event_overflow(event, &data, regs)) cpu_pmu->disable(event); } /* * Handle the pending perf events. * * Note: this call *must* be run with interrupts disabled. For * platforms that can have the PMU interrupts raised as an NMI, this * will not work. */ irq_work_run(); return IRQ_HANDLED; } static void armv6pmu_start(struct arm_pmu *cpu_pmu) { unsigned long flags, val; struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); raw_spin_lock_irqsave(&events->pmu_lock, flags); val = armv6_pmcr_read(); val |= ARMV6_PMCR_ENABLE; armv6_pmcr_write(val); raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static void armv6pmu_stop(struct arm_pmu *cpu_pmu) { unsigned long flags, val; struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); raw_spin_lock_irqsave(&events->pmu_lock, flags); val = armv6_pmcr_read(); val &= ~ARMV6_PMCR_ENABLE; armv6_pmcr_write(val); raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static int armv6pmu_get_event_idx(struct pmu_hw_events *cpuc, struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; /* Always place a cycle counter into the cycle counter. */ if (ARMV6_PERFCTR_CPU_CYCLES == hwc->config_base) { if (test_and_set_bit(ARMV6_CYCLE_COUNTER, cpuc->used_mask)) return -EAGAIN; return ARMV6_CYCLE_COUNTER; } else { /* * For anything other than a cycle counter, try and use * counter0 and counter1. */ if (!test_and_set_bit(ARMV6_COUNTER1, cpuc->used_mask)) return ARMV6_COUNTER1; if (!test_and_set_bit(ARMV6_COUNTER0, cpuc->used_mask)) return ARMV6_COUNTER0; /* The counters are all in use. */ return -EAGAIN; } } static void armv6pmu_clear_event_idx(struct pmu_hw_events *cpuc, struct perf_event *event) { clear_bit(event->hw.idx, cpuc->used_mask); } static void armv6pmu_disable_event(struct perf_event *event) { unsigned long val, mask, evt, flags; struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); struct hw_perf_event *hwc = &event->hw; struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); int idx = hwc->idx; if (ARMV6_CYCLE_COUNTER == idx) { mask = ARMV6_PMCR_CCOUNT_IEN; evt = 0; } else if (ARMV6_COUNTER0 == idx) { mask = ARMV6_PMCR_COUNT0_IEN | ARMV6_PMCR_EVT_COUNT0_MASK; evt = ARMV6_PERFCTR_NOP << ARMV6_PMCR_EVT_COUNT0_SHIFT; } else if (ARMV6_COUNTER1 == idx) { mask = ARMV6_PMCR_COUNT1_IEN | ARMV6_PMCR_EVT_COUNT1_MASK; evt = ARMV6_PERFCTR_NOP << ARMV6_PMCR_EVT_COUNT1_SHIFT; } else { WARN_ONCE(1, "invalid counter number (%d)\n", idx); return; } /* * Mask out the current event and set the counter to count the number * of ETM bus signal assertion cycles. The external reporting should * be disabled and so this should never increment. */ raw_spin_lock_irqsave(&events->pmu_lock, flags); val = armv6_pmcr_read(); val &= ~mask; val |= evt; armv6_pmcr_write(val); raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static void armv6mpcore_pmu_disable_event(struct perf_event *event) { unsigned long val, mask, flags, evt = 0; struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); struct hw_perf_event *hwc = &event->hw; struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); int idx = hwc->idx; if (ARMV6_CYCLE_COUNTER == idx) { mask = ARMV6_PMCR_CCOUNT_IEN; } else if (ARMV6_COUNTER0 == idx) { mask = ARMV6_PMCR_COUNT0_IEN; } else if (ARMV6_COUNTER1 == idx) { mask = ARMV6_PMCR_COUNT1_IEN; } else { WARN_ONCE(1, "invalid counter number (%d)\n", idx); return; } /* * Unlike UP ARMv6, we don't have a way of stopping the counters. We * simply disable the interrupt reporting. */ raw_spin_lock_irqsave(&events->pmu_lock, flags); val = armv6_pmcr_read(); val &= ~mask; val |= evt; armv6_pmcr_write(val); raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static int armv6_map_event(struct perf_event *event) { return armpmu_map_event(event, &armv6_perf_map, &armv6_perf_cache_map, 0xFF); } static void armv6pmu_init(struct arm_pmu *cpu_pmu) { cpu_pmu->handle_irq = armv6pmu_handle_irq; cpu_pmu->enable = armv6pmu_enable_event; cpu_pmu->disable = armv6pmu_disable_event; cpu_pmu->read_counter = armv6pmu_read_counter; cpu_pmu->write_counter = armv6pmu_write_counter; cpu_pmu->get_event_idx = armv6pmu_get_event_idx; cpu_pmu->clear_event_idx = armv6pmu_clear_event_idx; cpu_pmu->start = armv6pmu_start; cpu_pmu->stop = armv6pmu_stop; cpu_pmu->map_event = armv6_map_event; cpu_pmu->num_events = 3; } static int armv6_1136_pmu_init(struct arm_pmu *cpu_pmu) { armv6pmu_init(cpu_pmu); cpu_pmu->name = "armv6_1136"; return 0; } static int armv6_1156_pmu_init(struct arm_pmu *cpu_pmu) { armv6pmu_init(cpu_pmu); cpu_pmu->name = "armv6_1156"; return 0; } static int armv6_1176_pmu_init(struct arm_pmu *cpu_pmu) { armv6pmu_init(cpu_pmu); cpu_pmu->name = "armv6_1176"; return 0; } /* * ARMv6mpcore is almost identical to single core ARMv6 with the exception * that some of the events have different enumerations and that there is no * *hack* to stop the programmable counters. To stop the counters we simply * disable the interrupt reporting and update the event. When unthrottling we * reset the period and enable the interrupt reporting. */ static int armv6mpcore_map_event(struct perf_event *event) { return armpmu_map_event(event, &armv6mpcore_perf_map, &armv6mpcore_perf_cache_map, 0xFF); } static int armv6mpcore_pmu_init(struct arm_pmu *cpu_pmu) { cpu_pmu->name = "armv6_11mpcore"; cpu_pmu->handle_irq = armv6pmu_handle_irq; cpu_pmu->enable = armv6pmu_enable_event; cpu_pmu->disable = armv6mpcore_pmu_disable_event; cpu_pmu->read_counter = armv6pmu_read_counter; cpu_pmu->write_counter = armv6pmu_write_counter; cpu_pmu->get_event_idx = armv6pmu_get_event_idx; cpu_pmu->clear_event_idx = armv6pmu_clear_event_idx; cpu_pmu->start = armv6pmu_start; cpu_pmu->stop = armv6pmu_stop; cpu_pmu->map_event = armv6mpcore_map_event; cpu_pmu->num_events = 3; return 0; } static const struct of_device_id armv6_pmu_of_device_ids[] = { {.compatible = "arm,arm11mpcore-pmu", .data = armv6mpcore_pmu_init}, {.compatible = "arm,arm1176-pmu", .data = armv6_1176_pmu_init}, {.compatible = "arm,arm1136-pmu", .data = armv6_1136_pmu_init}, { /* sentinel value */ } }; static const struct pmu_probe_info armv6_pmu_probe_table[] = { ARM_PMU_PROBE(ARM_CPU_PART_ARM1136, armv6_1136_pmu_init), ARM_PMU_PROBE(ARM_CPU_PART_ARM1156, armv6_1156_pmu_init), ARM_PMU_PROBE(ARM_CPU_PART_ARM1176, armv6_1176_pmu_init), ARM_PMU_PROBE(ARM_CPU_PART_ARM11MPCORE, armv6mpcore_pmu_init), { /* sentinel value */ } }; static int armv6_pmu_device_probe(struct platform_device *pdev) { return arm_pmu_device_probe(pdev, armv6_pmu_of_device_ids, armv6_pmu_probe_table); } static struct platform_driver armv6_pmu_driver = { .driver = { .name = "armv6-pmu", .of_match_table = armv6_pmu_of_device_ids, }, .probe = armv6_pmu_device_probe, }; builtin_platform_driver(armv6_pmu_driver); #endif /* CONFIG_CPU_V6 || CONFIG_CPU_V6K */
linux-master
arch/arm/kernel/perf_event_v6.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/arch/arm/kernel/early_printk.c * * Copyright (C) 2009 Sascha Hauer <[email protected]> */ #include <linux/kernel.h> #include <linux/console.h> #include <linux/init.h> #include <linux/string.h> extern void printascii(const char *); static void early_write(const char *s, unsigned n) { char buf[128]; while (n) { unsigned l = min(n, sizeof(buf)-1); memcpy(buf, s, l); buf[l] = 0; s += l; n -= l; printascii(buf); } } static void early_console_write(struct console *con, const char *s, unsigned n) { early_write(s, n); } static struct console early_console_dev = { .name = "earlycon", .write = early_console_write, .flags = CON_PRINTBUFFER | CON_BOOT, .index = -1, }; static int __init setup_early_printk(char *buf) { early_console = &early_console_dev; register_console(&early_console_dev); return 0; } early_param("earlyprintk", setup_early_printk);
linux-master
arch/arm/kernel/early_printk.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/arch/arm/kernel/smp_twd.c * * Copyright (C) 2002 ARM Ltd. * All Rights Reserved */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/clk.h> #include <linux/cpu.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/err.h> #include <linux/smp.h> #include <linux/jiffies.h> #include <linux/clockchips.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/of_irq.h> #include <linux/of_address.h> #include <asm/smp_twd.h> /* set up by the platform code */ static void __iomem *twd_base; static struct clk *twd_clk; static unsigned long twd_timer_rate; static DEFINE_PER_CPU(bool, percpu_setup_called); static struct clock_event_device __percpu *twd_evt; static unsigned int twd_features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; static int twd_ppi; static int twd_shutdown(struct clock_event_device *clk) { writel_relaxed(0, twd_base + TWD_TIMER_CONTROL); return 0; } static int twd_set_oneshot(struct clock_event_device *clk) { /* period set, and timer enabled in 'next_event' hook */ writel_relaxed(TWD_TIMER_CONTROL_IT_ENABLE | TWD_TIMER_CONTROL_ONESHOT, twd_base + TWD_TIMER_CONTROL); return 0; } static int twd_set_periodic(struct clock_event_device *clk) { unsigned long ctrl = TWD_TIMER_CONTROL_ENABLE | TWD_TIMER_CONTROL_IT_ENABLE | TWD_TIMER_CONTROL_PERIODIC; writel_relaxed(DIV_ROUND_CLOSEST(twd_timer_rate, HZ), twd_base + TWD_TIMER_LOAD); writel_relaxed(ctrl, twd_base + TWD_TIMER_CONTROL); return 0; } static int twd_set_next_event(unsigned long evt, struct clock_event_device *unused) { unsigned long ctrl = readl_relaxed(twd_base + TWD_TIMER_CONTROL); ctrl |= TWD_TIMER_CONTROL_ENABLE; writel_relaxed(evt, twd_base + TWD_TIMER_COUNTER); writel_relaxed(ctrl, twd_base + TWD_TIMER_CONTROL); return 0; } /* * local_timer_ack: checks for a local timer interrupt. * * If a local timer interrupt has occurred, acknowledge and return 1. * Otherwise, return 0. */ static int twd_timer_ack(void) { if (readl_relaxed(twd_base + TWD_TIMER_INTSTAT)) { writel_relaxed(1, twd_base + TWD_TIMER_INTSTAT); return 1; } return 0; } static void twd_timer_stop(void) { struct clock_event_device *clk = raw_cpu_ptr(twd_evt); twd_shutdown(clk); disable_percpu_irq(clk->irq); } /* * Updates clockevent frequency when the cpu frequency changes. * Called on the cpu that is changing frequency with interrupts disabled. */ static void twd_update_frequency(void *new_rate) { twd_timer_rate = *((unsigned long *) new_rate); clockevents_update_freq(raw_cpu_ptr(twd_evt), twd_timer_rate); } static int twd_rate_change(struct notifier_block *nb, unsigned long flags, void *data) { struct clk_notifier_data *cnd = data; /* * The twd clock events must be reprogrammed to account for the new * frequency. The timer is local to a cpu, so cross-call to the * changing cpu. */ if (flags == POST_RATE_CHANGE) on_each_cpu(twd_update_frequency, (void *)&cnd->new_rate, 1); return NOTIFY_OK; } static struct notifier_block twd_clk_nb = { .notifier_call = twd_rate_change, }; static int twd_clk_init(void) { if (twd_evt && raw_cpu_ptr(twd_evt) && !IS_ERR(twd_clk)) return clk_notifier_register(twd_clk, &twd_clk_nb); return 0; } core_initcall(twd_clk_init); static void twd_calibrate_rate(void) { unsigned long count; u64 waitjiffies; /* * If this is the first time round, we need to work out how fast * the timer ticks */ if (twd_timer_rate == 0) { pr_info("Calibrating local timer... "); /* Wait for a tick to start */ waitjiffies = get_jiffies_64() + 1; while (get_jiffies_64() < waitjiffies) udelay(10); /* OK, now the tick has started, let's get the timer going */ waitjiffies += 5; /* enable, no interrupt or reload */ writel_relaxed(0x1, twd_base + TWD_TIMER_CONTROL); /* maximum value */ writel_relaxed(0xFFFFFFFFU, twd_base + TWD_TIMER_COUNTER); while (get_jiffies_64() < waitjiffies) udelay(10); count = readl_relaxed(twd_base + TWD_TIMER_COUNTER); twd_timer_rate = (0xFFFFFFFFU - count) * (HZ / 5); pr_cont("%lu.%02luMHz.\n", twd_timer_rate / 1000000, (twd_timer_rate / 10000) % 100); } } static irqreturn_t twd_handler(int irq, void *dev_id) { struct clock_event_device *evt = dev_id; if (twd_timer_ack()) { evt->event_handler(evt); return IRQ_HANDLED; } return IRQ_NONE; } static void twd_get_clock(struct device_node *np) { int err; if (np) twd_clk = of_clk_get(np, 0); else twd_clk = clk_get_sys("smp_twd", NULL); if (IS_ERR(twd_clk)) { pr_err("smp_twd: clock not found %d\n", (int) PTR_ERR(twd_clk)); return; } err = clk_prepare_enable(twd_clk); if (err) { pr_err("smp_twd: clock failed to prepare+enable: %d\n", err); clk_put(twd_clk); return; } twd_timer_rate = clk_get_rate(twd_clk); } /* * Setup the local clock events for a CPU. */ static void twd_timer_setup(void) { struct clock_event_device *clk = raw_cpu_ptr(twd_evt); int cpu = smp_processor_id(); /* * If the basic setup for this CPU has been done before don't * bother with the below. */ if (per_cpu(percpu_setup_called, cpu)) { writel_relaxed(0, twd_base + TWD_TIMER_CONTROL); clockevents_register_device(clk); enable_percpu_irq(clk->irq, 0); return; } per_cpu(percpu_setup_called, cpu) = true; twd_calibrate_rate(); /* * The following is done once per CPU the first time .setup() is * called. */ writel_relaxed(0, twd_base + TWD_TIMER_CONTROL); clk->name = "local_timer"; clk->features = twd_features; clk->rating = 350; clk->set_state_shutdown = twd_shutdown; clk->set_state_periodic = twd_set_periodic; clk->set_state_oneshot = twd_set_oneshot; clk->tick_resume = twd_shutdown; clk->set_next_event = twd_set_next_event; clk->irq = twd_ppi; clk->cpumask = cpumask_of(cpu); clockevents_config_and_register(clk, twd_timer_rate, 0xf, 0xffffffff); enable_percpu_irq(clk->irq, 0); } static int twd_timer_starting_cpu(unsigned int cpu) { twd_timer_setup(); return 0; } static int twd_timer_dying_cpu(unsigned int cpu) { twd_timer_stop(); return 0; } static int __init twd_local_timer_common_register(struct device_node *np) { int err; twd_evt = alloc_percpu(struct clock_event_device); if (!twd_evt) { err = -ENOMEM; goto out_free; } err = request_percpu_irq(twd_ppi, twd_handler, "twd", twd_evt); if (err) { pr_err("twd: can't register interrupt %d (%d)\n", twd_ppi, err); goto out_free; } cpuhp_setup_state_nocalls(CPUHP_AP_ARM_TWD_STARTING, "arm/timer/twd:starting", twd_timer_starting_cpu, twd_timer_dying_cpu); twd_get_clock(np); if (!of_property_read_bool(np, "always-on")) twd_features |= CLOCK_EVT_FEAT_C3STOP; /* * Immediately configure the timer on the boot CPU, unless we need * jiffies to be incrementing to calibrate the rate in which case * setup the timer in late_time_init. */ if (twd_timer_rate) twd_timer_setup(); else late_time_init = twd_timer_setup; return 0; out_free: iounmap(twd_base); twd_base = NULL; free_percpu(twd_evt); return err; } static int __init twd_local_timer_of_register(struct device_node *np) { int err; twd_ppi = irq_of_parse_and_map(np, 0); if (!twd_ppi) { err = -EINVAL; goto out; } twd_base = of_iomap(np, 0); if (!twd_base) { err = -ENOMEM; goto out; } err = twd_local_timer_common_register(np); out: WARN(err, "twd_local_timer_of_register failed (%d)\n", err); return err; } TIMER_OF_DECLARE(arm_twd_a9, "arm,cortex-a9-twd-timer", twd_local_timer_of_register); TIMER_OF_DECLARE(arm_twd_a5, "arm,cortex-a5-twd-timer", twd_local_timer_of_register); TIMER_OF_DECLARE(arm_twd_11mp, "arm,arm11mp-twd-timer", twd_local_timer_of_register);
linux-master
arch/arm/kernel/smp_twd.c
// SPDX-License-Identifier: GPL-2.0-only /* * arch/arm/kernel/thumbee.c * * Copyright (C) 2008 ARM Limited */ #include <linux/kernel.h> #include <linux/init.h> #include <asm/cputype.h> #include <asm/system_info.h> #include <asm/thread_notify.h> /* * Access to the ThumbEE Handler Base register */ static inline unsigned long teehbr_read(void) { unsigned long v; asm("mrc p14, 6, %0, c1, c0, 0\n" : "=r" (v)); return v; } static inline void teehbr_write(unsigned long v) { asm("mcr p14, 6, %0, c1, c0, 0\n" : : "r" (v)); } static int thumbee_notifier(struct notifier_block *self, unsigned long cmd, void *t) { struct thread_info *thread = t; switch (cmd) { case THREAD_NOTIFY_FLUSH: teehbr_write(0); break; case THREAD_NOTIFY_SWITCH: current_thread_info()->thumbee_state = teehbr_read(); teehbr_write(thread->thumbee_state); break; } return NOTIFY_DONE; } static struct notifier_block thumbee_notifier_block = { .notifier_call = thumbee_notifier, }; static int __init thumbee_init(void) { unsigned long pfr0; unsigned int cpu_arch = cpu_architecture(); if (cpu_arch < CPU_ARCH_ARMv7) return 0; pfr0 = read_cpuid_ext(CPUID_EXT_PFR0); if ((pfr0 & 0x0000f000) != 0x00001000) return 0; pr_info("ThumbEE CPU extension supported.\n"); elf_hwcap |= HWCAP_THUMBEE; thread_register_notifier(&thumbee_notifier_block); return 0; } late_initcall(thumbee_init);
linux-master
arch/arm/kernel/thumbee.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/arch/arm/kernel/fiq.c * * Copyright (C) 1998 Russell King * Copyright (C) 1998, 1999 Phil Blundell * * FIQ support written by Philip Blundell <[email protected]>, 1998. * * FIQ support re-written by Russell King to be more generic * * We now properly support a method by which the FIQ handlers can * be stacked onto the vector. We still do not support sharing * the FIQ vector itself. * * Operation is as follows: * 1. Owner A claims FIQ: * - default_fiq relinquishes control. * 2. Owner A: * - inserts code. * - sets any registers, * - enables FIQ. * 3. Owner B claims FIQ: * - if owner A has a relinquish function. * - disable FIQs. * - saves any registers. * - returns zero. * 4. Owner B: * - inserts code. * - sets any registers, * - enables FIQ. * 5. Owner B releases FIQ: * - Owner A is asked to reacquire FIQ: * - inserts code. * - restores saved registers. * - enables FIQ. * 6. Goto 3 */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/seq_file.h> #include <asm/cacheflush.h> #include <asm/cp15.h> #include <asm/fiq.h> #include <asm/mach/irq.h> #include <asm/irq.h> #include <asm/traps.h> #define FIQ_OFFSET ({ \ extern void *vector_fiq_offset; \ (unsigned)&vector_fiq_offset; \ }) static unsigned long dfl_fiq_insn; static struct pt_regs dfl_fiq_regs; /* Default reacquire function * - we always relinquish FIQ control * - we always reacquire FIQ control */ static int fiq_def_op(void *ref, int relinquish) { if (!relinquish) { /* Restore default handler and registers */ local_fiq_disable(); set_fiq_regs(&dfl_fiq_regs); set_fiq_handler(&dfl_fiq_insn, sizeof(dfl_fiq_insn)); local_fiq_enable(); /* FIXME: notify irq controller to standard enable FIQs */ } return 0; } static struct fiq_handler default_owner = { .name = "default", .fiq_op = fiq_def_op, }; static struct fiq_handler *current_fiq = &default_owner; int show_fiq_list(struct seq_file *p, int prec) { if (current_fiq != &default_owner) seq_printf(p, "%*s: %s\n", prec, "FIQ", current_fiq->name); return 0; } void set_fiq_handler(void *start, unsigned int length) { void *base = vectors_page; unsigned offset = FIQ_OFFSET; memcpy(base + offset, start, length); if (!cache_is_vipt_nonaliasing()) flush_icache_range((unsigned long)base + offset, (unsigned long)base + offset + length); flush_icache_range(0xffff0000 + offset, 0xffff0000 + offset + length); } int claim_fiq(struct fiq_handler *f) { int ret = 0; if (current_fiq) { ret = -EBUSY; if (current_fiq->fiq_op != NULL) ret = current_fiq->fiq_op(current_fiq->dev_id, 1); } if (!ret) { f->next = current_fiq; current_fiq = f; } return ret; } void release_fiq(struct fiq_handler *f) { if (current_fiq != f) { pr_err("%s FIQ trying to release %s FIQ\n", f->name, current_fiq->name); dump_stack(); return; } do current_fiq = current_fiq->next; while (current_fiq->fiq_op(current_fiq->dev_id, 0)); } static int fiq_start; void enable_fiq(int fiq) { enable_irq(fiq + fiq_start); } void disable_fiq(int fiq) { disable_irq(fiq + fiq_start); } EXPORT_SYMBOL(set_fiq_handler); EXPORT_SYMBOL(__set_fiq_regs); /* defined in fiqasm.S */ EXPORT_SYMBOL(__get_fiq_regs); /* defined in fiqasm.S */ EXPORT_SYMBOL(claim_fiq); EXPORT_SYMBOL(release_fiq); EXPORT_SYMBOL(enable_fiq); EXPORT_SYMBOL(disable_fiq); void __init init_FIQ(int start) { unsigned offset = FIQ_OFFSET; dfl_fiq_insn = *(unsigned long *)(0xffff0000 + offset); get_fiq_regs(&dfl_fiq_regs); fiq_start = start; }
linux-master
arch/arm/kernel/fiq.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/arch/arm/kernel/process.c * * Copyright (C) 1996-2000 Russell King - Converted to ARM. * Original Copyright (C) 1995 Linus Torvalds */ #include <linux/export.h> #include <linux/sched.h> #include <linux/sched/debug.h> #include <linux/sched/task.h> #include <linux/sched/task_stack.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/stddef.h> #include <linux/unistd.h> #include <linux/user.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/elfcore.h> #include <linux/pm.h> #include <linux/tick.h> #include <linux/utsname.h> #include <linux/uaccess.h> #include <linux/random.h> #include <linux/hw_breakpoint.h> #include <linux/leds.h> #include <asm/processor.h> #include <asm/thread_notify.h> #include <asm/stacktrace.h> #include <asm/system_misc.h> #include <asm/mach/time.h> #include <asm/tls.h> #include <asm/vdso.h> #include "signal.h" #if defined(CONFIG_CURRENT_POINTER_IN_TPIDRURO) || defined(CONFIG_SMP) DEFINE_PER_CPU(struct task_struct *, __entry_task); #endif #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK) #include <linux/stackprotector.h> unsigned long __stack_chk_guard __read_mostly; EXPORT_SYMBOL(__stack_chk_guard); #endif #ifndef CONFIG_CURRENT_POINTER_IN_TPIDRURO asmlinkage struct task_struct *__current; EXPORT_SYMBOL(__current); #endif static const char *processor_modes[] __maybe_unused = { "USER_26", "FIQ_26" , "IRQ_26" , "SVC_26" , "UK4_26" , "UK5_26" , "UK6_26" , "UK7_26" , "UK8_26" , "UK9_26" , "UK10_26", "UK11_26", "UK12_26", "UK13_26", "UK14_26", "UK15_26", "USER_32", "FIQ_32" , "IRQ_32" , "SVC_32" , "UK4_32" , "UK5_32" , "MON_32" , "ABT_32" , "UK8_32" , "UK9_32" , "HYP_32", "UND_32" , "UK12_32", "UK13_32", "UK14_32", "SYS_32" }; static const char *isa_modes[] __maybe_unused = { "ARM" , "Thumb" , "Jazelle", "ThumbEE" }; /* * This is our default idle handler. */ void (*arm_pm_idle)(void); /* * Called from the core idle loop. */ void arch_cpu_idle(void) { if (arm_pm_idle) arm_pm_idle(); else cpu_do_idle(); } void arch_cpu_idle_prepare(void) { local_fiq_enable(); } void arch_cpu_idle_enter(void) { ledtrig_cpu(CPU_LED_IDLE_START); #ifdef CONFIG_PL310_ERRATA_769419 wmb(); #endif } void arch_cpu_idle_exit(void) { ledtrig_cpu(CPU_LED_IDLE_END); } void __show_regs_alloc_free(struct pt_regs *regs) { int i; /* check for r0 - r12 only */ for (i = 0; i < 13; i++) { pr_alert("Register r%d information:", i); mem_dump_obj((void *)regs->uregs[i]); } } void __show_regs(struct pt_regs *regs) { unsigned long flags; char buf[64]; #ifndef CONFIG_CPU_V7M unsigned int domain; #ifdef CONFIG_CPU_SW_DOMAIN_PAN /* * Get the domain register for the parent context. In user * mode, we don't save the DACR, so lets use what it should * be. For other modes, we place it after the pt_regs struct. */ if (user_mode(regs)) { domain = DACR_UACCESS_ENABLE; } else { domain = to_svc_pt_regs(regs)->dacr; } #else domain = get_domain(); #endif #endif show_regs_print_info(KERN_DEFAULT); printk("PC is at %pS\n", (void *)instruction_pointer(regs)); printk("LR is at %pS\n", (void *)regs->ARM_lr); printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n", regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr); printk("sp : %08lx ip : %08lx fp : %08lx\n", regs->ARM_sp, regs->ARM_ip, regs->ARM_fp); printk("r10: %08lx r9 : %08lx r8 : %08lx\n", regs->ARM_r10, regs->ARM_r9, regs->ARM_r8); printk("r7 : %08lx r6 : %08lx r5 : %08lx r4 : %08lx\n", regs->ARM_r7, regs->ARM_r6, regs->ARM_r5, regs->ARM_r4); printk("r3 : %08lx r2 : %08lx r1 : %08lx r0 : %08lx\n", regs->ARM_r3, regs->ARM_r2, regs->ARM_r1, regs->ARM_r0); flags = regs->ARM_cpsr; buf[0] = flags & PSR_N_BIT ? 'N' : 'n'; buf[1] = flags & PSR_Z_BIT ? 'Z' : 'z'; buf[2] = flags & PSR_C_BIT ? 'C' : 'c'; buf[3] = flags & PSR_V_BIT ? 'V' : 'v'; buf[4] = '\0'; #ifndef CONFIG_CPU_V7M { const char *segment; if ((domain & domain_mask(DOMAIN_USER)) == domain_val(DOMAIN_USER, DOMAIN_NOACCESS)) segment = "none"; else segment = "user"; printk("Flags: %s IRQs o%s FIQs o%s Mode %s ISA %s Segment %s\n", buf, interrupts_enabled(regs) ? "n" : "ff", fast_interrupts_enabled(regs) ? "n" : "ff", processor_modes[processor_mode(regs)], isa_modes[isa_mode(regs)], segment); } #else printk("xPSR: %08lx\n", regs->ARM_cpsr); #endif #ifdef CONFIG_CPU_CP15 { unsigned int ctrl; buf[0] = '\0'; #ifdef CONFIG_CPU_CP15_MMU { unsigned int transbase; asm("mrc p15, 0, %0, c2, c0\n\t" : "=r" (transbase)); snprintf(buf, sizeof(buf), " Table: %08x DAC: %08x", transbase, domain); } #endif asm("mrc p15, 0, %0, c1, c0\n" : "=r" (ctrl)); printk("Control: %08x%s\n", ctrl, buf); } #endif } void show_regs(struct pt_regs * regs) { __show_regs(regs); dump_backtrace(regs, NULL, KERN_DEFAULT); } ATOMIC_NOTIFIER_HEAD(thread_notify_head); EXPORT_SYMBOL_GPL(thread_notify_head); /* * Free current thread data structures etc.. */ void exit_thread(struct task_struct *tsk) { thread_notify(THREAD_NOTIFY_EXIT, task_thread_info(tsk)); } void flush_thread(void) { struct thread_info *thread = current_thread_info(); struct task_struct *tsk = current; flush_ptrace_hw_breakpoint(tsk); memset(&tsk->thread.debug, 0, sizeof(struct debug_info)); memset(&thread->fpstate, 0, sizeof(union fp_state)); flush_tls(); thread_notify(THREAD_NOTIFY_FLUSH, thread); } asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) { unsigned long clone_flags = args->flags; unsigned long stack_start = args->stack; unsigned long tls = args->tls; struct thread_info *thread = task_thread_info(p); struct pt_regs *childregs = task_pt_regs(p); memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save)); #ifdef CONFIG_CPU_USE_DOMAINS /* * Copy the initial value of the domain access control register * from the current thread: thread->addr_limit will have been * copied from the current thread via setup_thread_stack() in * kernel/fork.c */ thread->cpu_domain = get_domain(); #endif if (likely(!args->fn)) { *childregs = *current_pt_regs(); childregs->ARM_r0 = 0; if (stack_start) childregs->ARM_sp = stack_start; } else { memset(childregs, 0, sizeof(struct pt_regs)); thread->cpu_context.r4 = (unsigned long)args->fn_arg; thread->cpu_context.r5 = (unsigned long)args->fn; childregs->ARM_cpsr = SVC_MODE; } thread->cpu_context.pc = (unsigned long)ret_from_fork; thread->cpu_context.sp = (unsigned long)childregs; clear_ptrace_hw_breakpoint(p); if (clone_flags & CLONE_SETTLS) thread->tp_value[0] = tls; thread->tp_value[1] = get_tpuser(); thread_notify(THREAD_NOTIFY_COPY, thread); return 0; } unsigned long __get_wchan(struct task_struct *p) { struct stackframe frame; unsigned long stack_page; int count = 0; frame.fp = thread_saved_fp(p); frame.sp = thread_saved_sp(p); frame.lr = 0; /* recovered from the stack */ frame.pc = thread_saved_pc(p); stack_page = (unsigned long)task_stack_page(p); do { if (frame.sp < stack_page || frame.sp >= stack_page + THREAD_SIZE || unwind_frame(&frame) < 0) return 0; if (!in_sched_functions(frame.pc)) return frame.pc; } while (count ++ < 16); return 0; } #ifdef CONFIG_MMU #ifdef CONFIG_KUSER_HELPERS /* * The vectors page is always readable from user space for the * atomic helpers. Insert it into the gate_vma so that it is visible * through ptrace and /proc/<pid>/mem. */ static struct vm_area_struct gate_vma; static int __init gate_vma_init(void) { vma_init(&gate_vma, NULL); gate_vma.vm_page_prot = PAGE_READONLY_EXEC; gate_vma.vm_start = 0xffff0000; gate_vma.vm_end = 0xffff0000 + PAGE_SIZE; vm_flags_init(&gate_vma, VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC); return 0; } arch_initcall(gate_vma_init); struct vm_area_struct *get_gate_vma(struct mm_struct *mm) { return &gate_vma; } int in_gate_area(struct mm_struct *mm, unsigned long addr) { return (addr >= gate_vma.vm_start) && (addr < gate_vma.vm_end); } int in_gate_area_no_mm(unsigned long addr) { return in_gate_area(NULL, addr); } #define is_gate_vma(vma) ((vma) == &gate_vma) #else #define is_gate_vma(vma) 0 #endif const char *arch_vma_name(struct vm_area_struct *vma) { return is_gate_vma(vma) ? "[vectors]" : NULL; } /* If possible, provide a placement hint at a random offset from the * stack for the sigpage and vdso pages. */ static unsigned long sigpage_addr(const struct mm_struct *mm, unsigned int npages) { unsigned long offset; unsigned long first; unsigned long last; unsigned long addr; unsigned int slots; first = PAGE_ALIGN(mm->start_stack); last = TASK_SIZE - (npages << PAGE_SHIFT); /* No room after stack? */ if (first > last) return 0; /* Just enough room? */ if (first == last) return first; slots = ((last - first) >> PAGE_SHIFT) + 1; offset = get_random_u32_below(slots); addr = first + (offset << PAGE_SHIFT); return addr; } static struct page *signal_page; extern struct page *get_signal_page(void); static int sigpage_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma) { current->mm->context.sigpage = new_vma->vm_start; return 0; } static const struct vm_special_mapping sigpage_mapping = { .name = "[sigpage]", .pages = &signal_page, .mremap = sigpage_mremap, }; int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long npages; unsigned long addr; unsigned long hint; int ret = 0; if (!signal_page) signal_page = get_signal_page(); if (!signal_page) return -ENOMEM; npages = 1; /* for sigpage */ npages += vdso_total_pages; if (mmap_write_lock_killable(mm)) return -EINTR; hint = sigpage_addr(mm, npages); addr = get_unmapped_area(NULL, hint, npages << PAGE_SHIFT, 0, 0); if (IS_ERR_VALUE(addr)) { ret = addr; goto up_fail; } vma = _install_special_mapping(mm, addr, PAGE_SIZE, VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC, &sigpage_mapping); if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto up_fail; } mm->context.sigpage = addr; /* Unlike the sigpage, failure to install the vdso is unlikely * to be fatal to the process, so no error check needed * here. */ arm_install_vdso(mm, addr + PAGE_SIZE); up_fail: mmap_write_unlock(mm); return ret; } #endif
linux-master
arch/arm/kernel/process.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/arch/arm/kernel/sys_arm.c * * Copyright (C) People who wrote linux/arch/i386/kernel/sys_i386.c * Copyright (C) 1995, 1996 Russell King. * * This file contains various random system calls that * have a non-standard calling sequence on the Linux/arm * platform. */ #include <linux/export.h> #include <linux/errno.h> #include <linux/sched.h> #include <linux/mm.h> #include <linux/sem.h> #include <linux/msg.h> #include <linux/shm.h> #include <linux/stat.h> #include <linux/syscalls.h> #include <linux/mman.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/ipc.h> #include <linux/uaccess.h> #include <linux/slab.h> #include <asm/syscalls.h> /* * Since loff_t is a 64 bit type we avoid a lot of ABI hassle * with a different argument ordering. */ asmlinkage long sys_arm_fadvise64_64(int fd, int advice, loff_t offset, loff_t len) { return ksys_fadvise64_64(fd, offset, len, advice); }
linux-master
arch/arm/kernel/sys_arm.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/kernel.h> #include <linux/spinlock.h> #include <linux/kprobes.h> #include <linux/mm.h> #include <linux/stop_machine.h> #include <asm/cacheflush.h> #include <asm/fixmap.h> #include <asm/smp_plat.h> #include <asm/opcodes.h> #include <asm/patch.h> struct patch { void *addr; unsigned int insn; }; #ifdef CONFIG_MMU static DEFINE_RAW_SPINLOCK(patch_lock); static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags) { unsigned int uintaddr = (uintptr_t) addr; bool module = !core_kernel_text(uintaddr); struct page *page; if (module && IS_ENABLED(CONFIG_STRICT_MODULE_RWX)) page = vmalloc_to_page(addr); else if (!module && IS_ENABLED(CONFIG_STRICT_KERNEL_RWX)) page = virt_to_page(addr); else return addr; if (flags) raw_spin_lock_irqsave(&patch_lock, *flags); set_fixmap(fixmap, page_to_phys(page)); return (void *) (__fix_to_virt(fixmap) + (uintaddr & ~PAGE_MASK)); } static void __kprobes patch_unmap(int fixmap, unsigned long *flags) { clear_fixmap(fixmap); if (flags) raw_spin_unlock_irqrestore(&patch_lock, *flags); } #else static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags) { return addr; } static void __kprobes patch_unmap(int fixmap, unsigned long *flags) { } #endif void __kprobes __patch_text_real(void *addr, unsigned int insn, bool remap) { bool thumb2 = IS_ENABLED(CONFIG_THUMB2_KERNEL); unsigned int uintaddr = (uintptr_t) addr; bool twopage = false; unsigned long flags; void *waddr = addr; int size; if (remap) waddr = patch_map(addr, FIX_TEXT_POKE0, &flags); if (thumb2 && __opcode_is_thumb16(insn)) { *(u16 *)waddr = __opcode_to_mem_thumb16(insn); size = sizeof(u16); } else if (thumb2 && (uintaddr & 2)) { u16 first = __opcode_thumb32_first(insn); u16 second = __opcode_thumb32_second(insn); u16 *addrh0 = waddr; u16 *addrh1 = waddr + 2; twopage = (uintaddr & ~PAGE_MASK) == PAGE_SIZE - 2; if (twopage && remap) addrh1 = patch_map(addr + 2, FIX_TEXT_POKE1, NULL); *addrh0 = __opcode_to_mem_thumb16(first); *addrh1 = __opcode_to_mem_thumb16(second); if (twopage && addrh1 != addr + 2) { flush_kernel_vmap_range(addrh1, 2); patch_unmap(FIX_TEXT_POKE1, NULL); } size = sizeof(u32); } else { if (thumb2) insn = __opcode_to_mem_thumb32(insn); else insn = __opcode_to_mem_arm(insn); *(u32 *)waddr = insn; size = sizeof(u32); } if (waddr != addr) { flush_kernel_vmap_range(waddr, twopage ? size / 2 : size); patch_unmap(FIX_TEXT_POKE0, &flags); } flush_icache_range((uintptr_t)(addr), (uintptr_t)(addr) + size); } static int __kprobes patch_text_stop_machine(void *data) { struct patch *patch = data; __patch_text(patch->addr, patch->insn); return 0; } void __kprobes patch_text(void *addr, unsigned int insn) { struct patch patch = { .addr = addr, .insn = insn, }; stop_machine_cpuslocked(patch_text_stop_machine, &patch, NULL); }
linux-master
arch/arm/kernel/patch.c