python_code
stringlengths
0
1.8M
repo_name
stringclasses
7 values
file_path
stringlengths
5
99
// SPDX-License-Identifier: GPL-2.0-only /* * Smart reflex Class 3 specific implementations * * Author: Thara Gopinath <[email protected]> * * Copyright (C) 2010 Texas Instruments, Inc. * Thara Gopinath <[email protected]> */ #include <linux/power/smartreflex.h> #include "soc.h" #include "voltage.h" static int sr_class3_enable(struct omap_sr *sr) { unsigned long volt = voltdm_get_voltage(sr->voltdm); if (!volt) { pr_warn("%s: Curr voltage unknown. Cannot enable %s\n", __func__, sr->name); return -ENODATA; } omap_vp_enable(sr->voltdm); return sr_enable(sr, volt); } static int sr_class3_disable(struct omap_sr *sr, int is_volt_reset) { sr_disable_errgen(sr); omap_vp_disable(sr->voltdm); sr_disable(sr); if (is_volt_reset) voltdm_reset(sr->voltdm); return 0; } static int sr_class3_configure(struct omap_sr *sr) { return sr_configure_errgen(sr); } /* SR class3 structure */ static struct omap_sr_class_data class3_data = { .enable = sr_class3_enable, .disable = sr_class3_disable, .configure = sr_class3_configure, .class_type = SR_CLASS3, }; /* Smartreflex Class3 init API to be called from board file */ static int __init sr_class3_init(void) { pr_info("SmartReflex Class3 initialized\n"); return sr_register_class(&class3_data); } omap_late_initcall(sr_class3_init);
linux-master
arch/arm/mach-omap2/smartreflex-class3.c
// SPDX-License-Identifier: GPL-2.0-only /* * OMAP IOMMU quirks for various TI SoCs * * Copyright (C) 2015-2019 Texas Instruments Incorporated - https://www.ti.com/ * Suman Anna <[email protected]> */ #include <linux/platform_device.h> #include <linux/err.h> #include <linux/clk.h> #include <linux/list.h> #include "clockdomain.h" #include "powerdomain.h" #include "common.h" struct pwrdm_link { struct device *dev; struct powerdomain *pwrdm; struct list_head node; }; static DEFINE_SPINLOCK(iommu_lock); static struct clockdomain *emu_clkdm; static atomic_t emu_count; static void omap_iommu_dra7_emu_swsup_config(struct platform_device *pdev, bool enable) { struct device_node *np = pdev->dev.of_node; unsigned long flags; if (!of_device_is_compatible(np, "ti,dra7-dsp-iommu")) return; if (!emu_clkdm) { emu_clkdm = clkdm_lookup("emu_clkdm"); if (WARN_ON_ONCE(!emu_clkdm)) return; } spin_lock_irqsave(&iommu_lock, flags); if (enable && (atomic_inc_return(&emu_count) == 1)) clkdm_deny_idle(emu_clkdm); else if (!enable && (atomic_dec_return(&emu_count) == 0)) clkdm_allow_idle(emu_clkdm); spin_unlock_irqrestore(&iommu_lock, flags); } static struct powerdomain *_get_pwrdm(struct device *dev) { struct clk *clk; struct clk_hw_omap *hwclk; struct clockdomain *clkdm; struct powerdomain *pwrdm = NULL; struct pwrdm_link *entry; unsigned long flags; static LIST_HEAD(cache); spin_lock_irqsave(&iommu_lock, flags); list_for_each_entry(entry, &cache, node) { if (entry->dev == dev) { pwrdm = entry->pwrdm; break; } } spin_unlock_irqrestore(&iommu_lock, flags); if (pwrdm) return pwrdm; clk = of_clk_get(dev->of_node->parent, 0); if (IS_ERR(clk)) { dev_err(dev, "no fck found\n"); return NULL; } hwclk = to_clk_hw_omap(__clk_get_hw(clk)); clk_put(clk); if (!hwclk || !hwclk->clkdm_name) { dev_err(dev, "no hwclk data\n"); return NULL; } clkdm = clkdm_lookup(hwclk->clkdm_name); if (!clkdm) { dev_err(dev, "clkdm not found: %s\n", hwclk->clkdm_name); return NULL; } pwrdm = clkdm_get_pwrdm(clkdm); if (!pwrdm) { dev_err(dev, "pwrdm not found: %s\n", clkdm->name); return NULL; } entry = kmalloc(sizeof(*entry), GFP_KERNEL); if (entry) { entry->dev = dev; entry->pwrdm = pwrdm; spin_lock_irqsave(&iommu_lock, flags); list_add(&entry->node, &cache); spin_unlock_irqrestore(&iommu_lock, flags); } return pwrdm; } int omap_iommu_set_pwrdm_constraint(struct platform_device *pdev, bool request, u8 *pwrst) { struct powerdomain *pwrdm; u8 next_pwrst; int ret = 0; pwrdm = _get_pwrdm(&pdev->dev); if (!pwrdm) return -ENODEV; if (request) { *pwrst = pwrdm_read_next_pwrst(pwrdm); omap_iommu_dra7_emu_swsup_config(pdev, true); } if (*pwrst > PWRDM_POWER_RET) goto out; next_pwrst = request ? PWRDM_POWER_ON : *pwrst; ret = pwrdm_set_next_pwrst(pwrdm, next_pwrst); out: if (!request) omap_iommu_dra7_emu_swsup_config(pdev, false); return ret; }
linux-master
arch/arm/mach-omap2/omap-iommu.c
// SPDX-License-Identifier: GPL-2.0-only /* * OMAP4 PRM module functions * * Copyright (C) 2011-2012 Texas Instruments, Inc. * Copyright (C) 2010 Nokia Corporation * Benoît Cousson * Paul Walmsley * Rajendra Nayak <[email protected]> */ #include <linux/cpu_pm.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/io.h> #include <linux/of_irq.h> #include <linux/of.h> #include "soc.h" #include "iomap.h" #include "common.h" #include "vp.h" #include "prm44xx.h" #include "prcm43xx.h" #include "prm-regbits-44xx.h" #include "prcm44xx.h" #include "prminst44xx.h" #include "powerdomain.h" #include "pm.h" /* Static data */ static void omap44xx_prm_read_pending_irqs(unsigned long *events); static void omap44xx_prm_ocp_barrier(void); static void omap44xx_prm_save_and_clear_irqen(u32 *saved_mask); static void omap44xx_prm_restore_irqen(u32 *saved_mask); static void omap44xx_prm_reconfigure_io_chain(void); static const struct omap_prcm_irq omap4_prcm_irqs[] = { OMAP_PRCM_IRQ("io", 9, 1), }; static struct omap_prcm_irq_setup omap4_prcm_irq_setup = { .ack = OMAP4_PRM_IRQSTATUS_MPU_OFFSET, .mask = OMAP4_PRM_IRQENABLE_MPU_OFFSET, .pm_ctrl = OMAP4_PRM_IO_PMCTRL_OFFSET, .nr_regs = 2, .irqs = omap4_prcm_irqs, .nr_irqs = ARRAY_SIZE(omap4_prcm_irqs), .read_pending_irqs = &omap44xx_prm_read_pending_irqs, .ocp_barrier = &omap44xx_prm_ocp_barrier, .save_and_clear_irqen = &omap44xx_prm_save_and_clear_irqen, .restore_irqen = &omap44xx_prm_restore_irqen, .reconfigure_io_chain = &omap44xx_prm_reconfigure_io_chain, }; struct omap_prm_irq_context { unsigned long irq_enable; unsigned long pm_ctrl; }; static struct omap_prm_irq_context omap_prm_context; /* * omap44xx_prm_reset_src_map - map from bits in the PRM_RSTST * hardware register (which are specific to OMAP44xx SoCs) to reset * source ID bit shifts (which is an OMAP SoC-independent * enumeration) */ static struct prm_reset_src_map omap44xx_prm_reset_src_map[] = { { OMAP4430_GLOBAL_WARM_SW_RST_SHIFT, OMAP_GLOBAL_WARM_RST_SRC_ID_SHIFT }, { OMAP4430_GLOBAL_COLD_RST_SHIFT, OMAP_GLOBAL_COLD_RST_SRC_ID_SHIFT }, { OMAP4430_MPU_SECURITY_VIOL_RST_SHIFT, OMAP_SECU_VIOL_RST_SRC_ID_SHIFT }, { OMAP4430_MPU_WDT_RST_SHIFT, OMAP_MPU_WD_RST_SRC_ID_SHIFT }, { OMAP4430_SECURE_WDT_RST_SHIFT, OMAP_SECU_WD_RST_SRC_ID_SHIFT }, { OMAP4430_EXTERNAL_WARM_RST_SHIFT, OMAP_EXTWARM_RST_SRC_ID_SHIFT }, { OMAP4430_VDD_MPU_VOLT_MGR_RST_SHIFT, OMAP_VDD_MPU_VM_RST_SRC_ID_SHIFT }, { OMAP4430_VDD_IVA_VOLT_MGR_RST_SHIFT, OMAP_VDD_IVA_VM_RST_SRC_ID_SHIFT }, { OMAP4430_VDD_CORE_VOLT_MGR_RST_SHIFT, OMAP_VDD_CORE_VM_RST_SRC_ID_SHIFT }, { OMAP4430_ICEPICK_RST_SHIFT, OMAP_ICEPICK_RST_SRC_ID_SHIFT }, { OMAP4430_C2C_RST_SHIFT, OMAP_C2C_RST_SRC_ID_SHIFT }, { -1, -1 }, }; /* PRM low-level functions */ /* Read a register in a CM/PRM instance in the PRM module */ static u32 omap4_prm_read_inst_reg(s16 inst, u16 reg) { return readl_relaxed(prm_base.va + inst + reg); } /* Write into a register in a CM/PRM instance in the PRM module */ static void omap4_prm_write_inst_reg(u32 val, s16 inst, u16 reg) { writel_relaxed(val, prm_base.va + inst + reg); } /* Read-modify-write a register in a PRM module. Caller must lock */ static u32 omap4_prm_rmw_inst_reg_bits(u32 mask, u32 bits, s16 inst, s16 reg) { u32 v; v = omap4_prm_read_inst_reg(inst, reg); v &= ~mask; v |= bits; omap4_prm_write_inst_reg(v, inst, reg); return v; } /* PRM VP */ /* * struct omap4_vp - OMAP4 VP register access description. * @irqstatus_mpu: offset to IRQSTATUS_MPU register for VP * @tranxdone_status: VP_TRANXDONE_ST bitmask in PRM_IRQSTATUS_MPU reg */ struct omap4_vp { u32 irqstatus_mpu; u32 tranxdone_status; }; static struct omap4_vp omap4_vp[] = { [OMAP4_VP_VDD_MPU_ID] = { .irqstatus_mpu = OMAP4_PRM_IRQSTATUS_MPU_2_OFFSET, .tranxdone_status = OMAP4430_VP_MPU_TRANXDONE_ST_MASK, }, [OMAP4_VP_VDD_IVA_ID] = { .irqstatus_mpu = OMAP4_PRM_IRQSTATUS_MPU_OFFSET, .tranxdone_status = OMAP4430_VP_IVA_TRANXDONE_ST_MASK, }, [OMAP4_VP_VDD_CORE_ID] = { .irqstatus_mpu = OMAP4_PRM_IRQSTATUS_MPU_OFFSET, .tranxdone_status = OMAP4430_VP_CORE_TRANXDONE_ST_MASK, }, }; static u32 omap4_prm_vp_check_txdone(u8 vp_id) { struct omap4_vp *vp = &omap4_vp[vp_id]; u32 irqstatus; irqstatus = omap4_prminst_read_inst_reg(OMAP4430_PRM_PARTITION, OMAP4430_PRM_OCP_SOCKET_INST, vp->irqstatus_mpu); return irqstatus & vp->tranxdone_status; } static void omap4_prm_vp_clear_txdone(u8 vp_id) { struct omap4_vp *vp = &omap4_vp[vp_id]; omap4_prminst_write_inst_reg(vp->tranxdone_status, OMAP4430_PRM_PARTITION, OMAP4430_PRM_OCP_SOCKET_INST, vp->irqstatus_mpu); }; u32 omap4_prm_vcvp_read(u8 offset) { s32 inst = omap4_prmst_get_prm_dev_inst(); if (inst == PRM_INSTANCE_UNKNOWN) return 0; return omap4_prminst_read_inst_reg(OMAP4430_PRM_PARTITION, inst, offset); } void omap4_prm_vcvp_write(u32 val, u8 offset) { s32 inst = omap4_prmst_get_prm_dev_inst(); if (inst == PRM_INSTANCE_UNKNOWN) return; omap4_prminst_write_inst_reg(val, OMAP4430_PRM_PARTITION, inst, offset); } u32 omap4_prm_vcvp_rmw(u32 mask, u32 bits, u8 offset) { s32 inst = omap4_prmst_get_prm_dev_inst(); if (inst == PRM_INSTANCE_UNKNOWN) return 0; return omap4_prminst_rmw_inst_reg_bits(mask, bits, OMAP4430_PRM_PARTITION, inst, offset); } static inline u32 _read_pending_irq_reg(u16 irqen_offs, u16 irqst_offs) { u32 mask, st; /* XXX read mask from RAM? */ mask = omap4_prm_read_inst_reg(OMAP4430_PRM_OCP_SOCKET_INST, irqen_offs); st = omap4_prm_read_inst_reg(OMAP4430_PRM_OCP_SOCKET_INST, irqst_offs); return mask & st; } /** * omap44xx_prm_read_pending_irqs - read pending PRM MPU IRQs into @events * @events: ptr to two consecutive u32s, preallocated by caller * * Read PRM_IRQSTATUS_MPU* bits, AND'ed with the currently-enabled PRM * MPU IRQs, and store the result into the two u32s pointed to by @events. * No return value. */ static void omap44xx_prm_read_pending_irqs(unsigned long *events) { int i; for (i = 0; i < omap4_prcm_irq_setup.nr_regs; i++) events[i] = _read_pending_irq_reg(omap4_prcm_irq_setup.mask + i * 4, omap4_prcm_irq_setup.ack + i * 4); } /** * omap44xx_prm_ocp_barrier - force buffered MPU writes to the PRM to complete * * Force any buffered writes to the PRM IP block to complete. Needed * by the PRM IRQ handler, which reads and writes directly to the IP * block, to avoid race conditions after acknowledging or clearing IRQ * bits. No return value. */ static void omap44xx_prm_ocp_barrier(void) { omap4_prm_read_inst_reg(OMAP4430_PRM_OCP_SOCKET_INST, OMAP4_REVISION_PRM_OFFSET); } /** * omap44xx_prm_save_and_clear_irqen - save/clear PRM_IRQENABLE_MPU* regs * @saved_mask: ptr to a u32 array to save IRQENABLE bits * * Save the PRM_IRQENABLE_MPU and PRM_IRQENABLE_MPU_2 registers to * @saved_mask. @saved_mask must be allocated by the caller. * Intended to be used in the PRM interrupt handler suspend callback. * The OCP barrier is needed to ensure the write to disable PRM * interrupts reaches the PRM before returning; otherwise, spurious * interrupts might occur. No return value. */ static void omap44xx_prm_save_and_clear_irqen(u32 *saved_mask) { int i; u16 reg; for (i = 0; i < omap4_prcm_irq_setup.nr_regs; i++) { reg = omap4_prcm_irq_setup.mask + i * 4; saved_mask[i] = omap4_prm_read_inst_reg(OMAP4430_PRM_OCP_SOCKET_INST, reg); omap4_prm_write_inst_reg(0, OMAP4430_PRM_OCP_SOCKET_INST, reg); } /* OCP barrier */ omap4_prm_read_inst_reg(OMAP4430_PRM_OCP_SOCKET_INST, OMAP4_REVISION_PRM_OFFSET); } /** * omap44xx_prm_restore_irqen - set PRM_IRQENABLE_MPU* registers from args * @saved_mask: ptr to a u32 array of IRQENABLE bits saved previously * * Restore the PRM_IRQENABLE_MPU and PRM_IRQENABLE_MPU_2 registers from * @saved_mask. Intended to be used in the PRM interrupt handler resume * callback to restore values saved by omap44xx_prm_save_and_clear_irqen(). * No OCP barrier should be needed here; any pending PRM interrupts will fire * once the writes reach the PRM. No return value. */ static void omap44xx_prm_restore_irqen(u32 *saved_mask) { int i; for (i = 0; i < omap4_prcm_irq_setup.nr_regs; i++) omap4_prm_write_inst_reg(saved_mask[i], OMAP4430_PRM_OCP_SOCKET_INST, omap4_prcm_irq_setup.mask + i * 4); } /** * omap44xx_prm_reconfigure_io_chain - clear latches and reconfigure I/O chain * * Clear any previously-latched I/O wakeup events and ensure that the * I/O wakeup gates are aligned with the current mux settings. Works * by asserting WUCLKIN, waiting for WUCLKOUT to be asserted, and then * deasserting WUCLKIN and waiting for WUCLKOUT to be deasserted. * No return value. XXX Are the final two steps necessary? */ static void omap44xx_prm_reconfigure_io_chain(void) { int i = 0; s32 inst = omap4_prmst_get_prm_dev_inst(); if (inst == PRM_INSTANCE_UNKNOWN) return; /* Trigger WUCLKIN enable */ omap4_prm_rmw_inst_reg_bits(OMAP4430_WUCLK_CTRL_MASK, OMAP4430_WUCLK_CTRL_MASK, inst, omap4_prcm_irq_setup.pm_ctrl); omap_test_timeout( (((omap4_prm_read_inst_reg(inst, omap4_prcm_irq_setup.pm_ctrl) & OMAP4430_WUCLK_STATUS_MASK) >> OMAP4430_WUCLK_STATUS_SHIFT) == 1), MAX_IOPAD_LATCH_TIME, i); if (i == MAX_IOPAD_LATCH_TIME) pr_warn("PRM: I/O chain clock line assertion timed out\n"); /* Trigger WUCLKIN disable */ omap4_prm_rmw_inst_reg_bits(OMAP4430_WUCLK_CTRL_MASK, 0x0, inst, omap4_prcm_irq_setup.pm_ctrl); omap_test_timeout( (((omap4_prm_read_inst_reg(inst, omap4_prcm_irq_setup.pm_ctrl) & OMAP4430_WUCLK_STATUS_MASK) >> OMAP4430_WUCLK_STATUS_SHIFT) == 0), MAX_IOPAD_LATCH_TIME, i); if (i == MAX_IOPAD_LATCH_TIME) pr_warn("PRM: I/O chain clock line deassertion timed out\n"); return; } /** * omap44xx_prm_enable_io_wakeup - enable wakeup events from I/O wakeup latches * * Activates the I/O wakeup event latches and allows events logged by * those latches to signal a wakeup event to the PRCM. For I/O wakeups * to occur, WAKEUPENABLE bits must be set in the pad mux registers, and * omap44xx_prm_reconfigure_io_chain() must be called. No return value. */ static void omap44xx_prm_enable_io_wakeup(void) { s32 inst = omap4_prmst_get_prm_dev_inst(); if (inst == PRM_INSTANCE_UNKNOWN) return; omap4_prm_rmw_inst_reg_bits(OMAP4430_GLOBAL_WUEN_MASK, OMAP4430_GLOBAL_WUEN_MASK, inst, omap4_prcm_irq_setup.pm_ctrl); } /** * omap44xx_prm_read_reset_sources - return the last SoC reset source * * Return a u32 representing the last reset sources of the SoC. The * returned reset source bits are standardized across OMAP SoCs. */ static u32 omap44xx_prm_read_reset_sources(void) { struct prm_reset_src_map *p; u32 r = 0; u32 v; s32 inst = omap4_prmst_get_prm_dev_inst(); if (inst == PRM_INSTANCE_UNKNOWN) return 0; v = omap4_prm_read_inst_reg(inst, OMAP4_RM_RSTST); p = omap44xx_prm_reset_src_map; while (p->reg_shift >= 0 && p->std_shift >= 0) { if (v & (1 << p->reg_shift)) r |= 1 << p->std_shift; p++; } return r; } /** * omap44xx_prm_was_any_context_lost_old - was module hardware context lost? * @part: PRM partition ID (e.g., OMAP4430_PRM_PARTITION) * @inst: PRM instance offset (e.g., OMAP4430_PRM_MPU_INST) * @idx: CONTEXT register offset * * Return 1 if any bits were set in the *_CONTEXT_* register * identified by (@part, @inst, @idx), which means that some context * was lost for that module; otherwise, return 0. */ static bool omap44xx_prm_was_any_context_lost_old(u8 part, s16 inst, u16 idx) { return (omap4_prminst_read_inst_reg(part, inst, idx)) ? 1 : 0; } /** * omap44xx_prm_clear_context_lost_flags_old - clear context loss flags * @part: PRM partition ID (e.g., OMAP4430_PRM_PARTITION) * @inst: PRM instance offset (e.g., OMAP4430_PRM_MPU_INST) * @idx: CONTEXT register offset * * Clear hardware context loss bits for the module identified by * (@part, @inst, @idx). No return value. XXX Writes to reserved bits; * is there a way to avoid this? */ static void omap44xx_prm_clear_context_loss_flags_old(u8 part, s16 inst, u16 idx) { omap4_prminst_write_inst_reg(0xffffffff, part, inst, idx); } /* Powerdomain low-level functions */ static int omap4_pwrdm_set_next_pwrst(struct powerdomain *pwrdm, u8 pwrst) { omap4_prminst_rmw_inst_reg_bits(OMAP_POWERSTATE_MASK, (pwrst << OMAP_POWERSTATE_SHIFT), pwrdm->prcm_partition, pwrdm->prcm_offs, OMAP4_PM_PWSTCTRL); return 0; } static int omap4_pwrdm_read_next_pwrst(struct powerdomain *pwrdm) { u32 v; v = omap4_prminst_read_inst_reg(pwrdm->prcm_partition, pwrdm->prcm_offs, OMAP4_PM_PWSTCTRL); v &= OMAP_POWERSTATE_MASK; v >>= OMAP_POWERSTATE_SHIFT; return v; } static int omap4_pwrdm_read_pwrst(struct powerdomain *pwrdm) { u32 v; v = omap4_prminst_read_inst_reg(pwrdm->prcm_partition, pwrdm->prcm_offs, OMAP4_PM_PWSTST); v &= OMAP_POWERSTATEST_MASK; v >>= OMAP_POWERSTATEST_SHIFT; return v; } static int omap4_pwrdm_read_prev_pwrst(struct powerdomain *pwrdm) { u32 v; v = omap4_prminst_read_inst_reg(pwrdm->prcm_partition, pwrdm->prcm_offs, OMAP4_PM_PWSTST); v &= OMAP4430_LASTPOWERSTATEENTERED_MASK; v >>= OMAP4430_LASTPOWERSTATEENTERED_SHIFT; return v; } static int omap4_pwrdm_set_lowpwrstchange(struct powerdomain *pwrdm) { omap4_prminst_rmw_inst_reg_bits(OMAP4430_LOWPOWERSTATECHANGE_MASK, (1 << OMAP4430_LOWPOWERSTATECHANGE_SHIFT), pwrdm->prcm_partition, pwrdm->prcm_offs, OMAP4_PM_PWSTCTRL); return 0; } static int omap4_pwrdm_clear_all_prev_pwrst(struct powerdomain *pwrdm) { omap4_prminst_rmw_inst_reg_bits(OMAP4430_LASTPOWERSTATEENTERED_MASK, OMAP4430_LASTPOWERSTATEENTERED_MASK, pwrdm->prcm_partition, pwrdm->prcm_offs, OMAP4_PM_PWSTST); return 0; } static int omap4_pwrdm_set_logic_retst(struct powerdomain *pwrdm, u8 pwrst) { u32 v; v = pwrst << __ffs(OMAP4430_LOGICRETSTATE_MASK); omap4_prminst_rmw_inst_reg_bits(OMAP4430_LOGICRETSTATE_MASK, v, pwrdm->prcm_partition, pwrdm->prcm_offs, OMAP4_PM_PWSTCTRL); return 0; } static int omap4_pwrdm_set_mem_onst(struct powerdomain *pwrdm, u8 bank, u8 pwrst) { u32 m; m = omap2_pwrdm_get_mem_bank_onstate_mask(bank); omap4_prminst_rmw_inst_reg_bits(m, (pwrst << __ffs(m)), pwrdm->prcm_partition, pwrdm->prcm_offs, OMAP4_PM_PWSTCTRL); return 0; } static int omap4_pwrdm_set_mem_retst(struct powerdomain *pwrdm, u8 bank, u8 pwrst) { u32 m; m = omap2_pwrdm_get_mem_bank_retst_mask(bank); omap4_prminst_rmw_inst_reg_bits(m, (pwrst << __ffs(m)), pwrdm->prcm_partition, pwrdm->prcm_offs, OMAP4_PM_PWSTCTRL); return 0; } static int omap4_pwrdm_read_logic_pwrst(struct powerdomain *pwrdm) { u32 v; v = omap4_prminst_read_inst_reg(pwrdm->prcm_partition, pwrdm->prcm_offs, OMAP4_PM_PWSTST); v &= OMAP4430_LOGICSTATEST_MASK; v >>= OMAP4430_LOGICSTATEST_SHIFT; return v; } static int omap4_pwrdm_read_logic_retst(struct powerdomain *pwrdm) { u32 v; v = omap4_prminst_read_inst_reg(pwrdm->prcm_partition, pwrdm->prcm_offs, OMAP4_PM_PWSTCTRL); v &= OMAP4430_LOGICRETSTATE_MASK; v >>= OMAP4430_LOGICRETSTATE_SHIFT; return v; } /** * omap4_pwrdm_read_prev_logic_pwrst - read the previous logic powerstate * @pwrdm: struct powerdomain * to read the state for * * Reads the previous logic powerstate for a powerdomain. This * function must determine the previous logic powerstate by first * checking the previous powerstate for the domain. If that was OFF, * then logic has been lost. If previous state was RETENTION, the * function reads the setting for the next retention logic state to * see the actual value. In every other case, the logic is * retained. Returns either PWRDM_POWER_OFF or PWRDM_POWER_RET * depending whether the logic was retained or not. */ static int omap4_pwrdm_read_prev_logic_pwrst(struct powerdomain *pwrdm) { int state; state = omap4_pwrdm_read_prev_pwrst(pwrdm); if (state == PWRDM_POWER_OFF) return PWRDM_POWER_OFF; if (state != PWRDM_POWER_RET) return PWRDM_POWER_RET; return omap4_pwrdm_read_logic_retst(pwrdm); } static int omap4_pwrdm_read_mem_pwrst(struct powerdomain *pwrdm, u8 bank) { u32 m, v; m = omap2_pwrdm_get_mem_bank_stst_mask(bank); v = omap4_prminst_read_inst_reg(pwrdm->prcm_partition, pwrdm->prcm_offs, OMAP4_PM_PWSTST); v &= m; v >>= __ffs(m); return v; } static int omap4_pwrdm_read_mem_retst(struct powerdomain *pwrdm, u8 bank) { u32 m, v; m = omap2_pwrdm_get_mem_bank_retst_mask(bank); v = omap4_prminst_read_inst_reg(pwrdm->prcm_partition, pwrdm->prcm_offs, OMAP4_PM_PWSTCTRL); v &= m; v >>= __ffs(m); return v; } /** * omap4_pwrdm_read_prev_mem_pwrst - reads the previous memory powerstate * @pwrdm: struct powerdomain * to read mem powerstate for * @bank: memory bank index * * Reads the previous memory powerstate for a powerdomain. This * function must determine the previous memory powerstate by first * checking the previous powerstate for the domain. If that was OFF, * then logic has been lost. If previous state was RETENTION, the * function reads the setting for the next memory retention state to * see the actual value. In every other case, the logic is * retained. Returns either PWRDM_POWER_OFF or PWRDM_POWER_RET * depending whether logic was retained or not. */ static int omap4_pwrdm_read_prev_mem_pwrst(struct powerdomain *pwrdm, u8 bank) { int state; state = omap4_pwrdm_read_prev_pwrst(pwrdm); if (state == PWRDM_POWER_OFF) return PWRDM_POWER_OFF; if (state != PWRDM_POWER_RET) return PWRDM_POWER_RET; return omap4_pwrdm_read_mem_retst(pwrdm, bank); } static int omap4_pwrdm_wait_transition(struct powerdomain *pwrdm) { u32 c = 0; /* * REVISIT: pwrdm_wait_transition() may be better implemented * via a callback and a periodic timer check -- how long do we expect * powerdomain transitions to take? */ /* XXX Is this udelay() value meaningful? */ while ((omap4_prminst_read_inst_reg(pwrdm->prcm_partition, pwrdm->prcm_offs, OMAP4_PM_PWSTST) & OMAP_INTRANSITION_MASK) && (c++ < PWRDM_TRANSITION_BAILOUT)) udelay(1); if (c > PWRDM_TRANSITION_BAILOUT) { pr_err("powerdomain: %s: waited too long to complete transition\n", pwrdm->name); return -EAGAIN; } pr_debug("powerdomain: completed transition in %d loops\n", c); return 0; } static int omap4_check_vcvp(void) { if (prm_features & PRM_HAS_VOLTAGE) return 1; return 0; } /** * omap4_pwrdm_save_context - Saves the powerdomain state * @pwrdm: pointer to individual powerdomain * * The function saves the powerdomain state control information. * This is needed in rtc+ddr modes where we lose powerdomain context. */ static void omap4_pwrdm_save_context(struct powerdomain *pwrdm) { pwrdm->context = omap4_prminst_read_inst_reg(pwrdm->prcm_partition, pwrdm->prcm_offs, pwrdm->pwrstctrl_offs); /* * Do not save LOWPOWERSTATECHANGE, writing a 1 indicates a request, * reading back a 1 indicates a request in progress. */ pwrdm->context &= ~OMAP4430_LOWPOWERSTATECHANGE_MASK; } /** * omap4_pwrdm_restore_context - Restores the powerdomain state * @pwrdm: pointer to individual powerdomain * * The function restores the powerdomain state control information. * This is needed in rtc+ddr modes where we lose powerdomain context. */ static void omap4_pwrdm_restore_context(struct powerdomain *pwrdm) { int st, ctrl; st = omap4_prminst_read_inst_reg(pwrdm->prcm_partition, pwrdm->prcm_offs, pwrdm->pwrstctrl_offs); omap4_prminst_write_inst_reg(pwrdm->context, pwrdm->prcm_partition, pwrdm->prcm_offs, pwrdm->pwrstctrl_offs); /* Make sure we only wait for a transition if there is one */ st &= OMAP_POWERSTATEST_MASK; ctrl = OMAP_POWERSTATEST_MASK & pwrdm->context; if (st != ctrl) omap4_pwrdm_wait_transition(pwrdm); } struct pwrdm_ops omap4_pwrdm_operations = { .pwrdm_set_next_pwrst = omap4_pwrdm_set_next_pwrst, .pwrdm_read_next_pwrst = omap4_pwrdm_read_next_pwrst, .pwrdm_read_pwrst = omap4_pwrdm_read_pwrst, .pwrdm_read_prev_pwrst = omap4_pwrdm_read_prev_pwrst, .pwrdm_set_lowpwrstchange = omap4_pwrdm_set_lowpwrstchange, .pwrdm_clear_all_prev_pwrst = omap4_pwrdm_clear_all_prev_pwrst, .pwrdm_set_logic_retst = omap4_pwrdm_set_logic_retst, .pwrdm_read_logic_pwrst = omap4_pwrdm_read_logic_pwrst, .pwrdm_read_prev_logic_pwrst = omap4_pwrdm_read_prev_logic_pwrst, .pwrdm_read_logic_retst = omap4_pwrdm_read_logic_retst, .pwrdm_read_mem_pwrst = omap4_pwrdm_read_mem_pwrst, .pwrdm_read_mem_retst = omap4_pwrdm_read_mem_retst, .pwrdm_read_prev_mem_pwrst = omap4_pwrdm_read_prev_mem_pwrst, .pwrdm_set_mem_onst = omap4_pwrdm_set_mem_onst, .pwrdm_set_mem_retst = omap4_pwrdm_set_mem_retst, .pwrdm_wait_transition = omap4_pwrdm_wait_transition, .pwrdm_has_voltdm = omap4_check_vcvp, .pwrdm_save_context = omap4_pwrdm_save_context, .pwrdm_restore_context = omap4_pwrdm_restore_context, }; static int omap44xx_prm_late_init(void); static void prm_save_context(void) { omap_prm_context.irq_enable = omap4_prm_read_inst_reg(AM43XX_PRM_OCP_SOCKET_INST, omap4_prcm_irq_setup.mask); omap_prm_context.pm_ctrl = omap4_prm_read_inst_reg(AM43XX_PRM_DEVICE_INST, omap4_prcm_irq_setup.pm_ctrl); } static void prm_restore_context(void) { omap4_prm_write_inst_reg(omap_prm_context.irq_enable, OMAP4430_PRM_OCP_SOCKET_INST, omap4_prcm_irq_setup.mask); omap4_prm_write_inst_reg(omap_prm_context.pm_ctrl, AM43XX_PRM_DEVICE_INST, omap4_prcm_irq_setup.pm_ctrl); } static int cpu_notifier(struct notifier_block *nb, unsigned long cmd, void *v) { switch (cmd) { case CPU_CLUSTER_PM_ENTER: if (enable_off_mode) prm_save_context(); break; case CPU_CLUSTER_PM_EXIT: if (enable_off_mode) prm_restore_context(); break; } return NOTIFY_OK; } /* * XXX document */ static struct prm_ll_data omap44xx_prm_ll_data = { .read_reset_sources = &omap44xx_prm_read_reset_sources, .was_any_context_lost_old = &omap44xx_prm_was_any_context_lost_old, .clear_context_loss_flags_old = &omap44xx_prm_clear_context_loss_flags_old, .late_init = &omap44xx_prm_late_init, .assert_hardreset = omap4_prminst_assert_hardreset, .deassert_hardreset = omap4_prminst_deassert_hardreset, .is_hardreset_asserted = omap4_prminst_is_hardreset_asserted, .reset_system = omap4_prminst_global_warm_sw_reset, .vp_check_txdone = omap4_prm_vp_check_txdone, .vp_clear_txdone = omap4_prm_vp_clear_txdone, }; static const struct omap_prcm_init_data *prm_init_data; int __init omap44xx_prm_init(const struct omap_prcm_init_data *data) { static struct notifier_block nb; omap_prm_base_init(); prm_init_data = data; if (data->flags & PRM_HAS_IO_WAKEUP) prm_features |= PRM_HAS_IO_WAKEUP; if (data->flags & PRM_HAS_VOLTAGE) prm_features |= PRM_HAS_VOLTAGE; omap4_prminst_set_prm_dev_inst(data->device_inst_offset); /* Add AM437X specific differences */ if (of_device_is_compatible(data->np, "ti,am4-prcm")) { omap4_prcm_irq_setup.nr_irqs = 1; omap4_prcm_irq_setup.nr_regs = 1; omap4_prcm_irq_setup.pm_ctrl = AM43XX_PRM_IO_PMCTRL_OFFSET; omap4_prcm_irq_setup.ack = AM43XX_PRM_IRQSTATUS_MPU_OFFSET; omap4_prcm_irq_setup.mask = AM43XX_PRM_IRQENABLE_MPU_OFFSET; } /* Only AM43XX can lose prm context during rtc-ddr suspend */ if (soc_is_am43xx()) { nb.notifier_call = cpu_notifier; cpu_pm_register_notifier(&nb); } return prm_register(&omap44xx_prm_ll_data); } static int omap44xx_prm_late_init(void) { int irq_num; if (!(prm_features & PRM_HAS_IO_WAKEUP)) return 0; irq_num = of_irq_get(prm_init_data->np, 0); if (irq_num == -EPROBE_DEFER) return irq_num; omap4_prcm_irq_setup.irq = irq_num; omap44xx_prm_enable_io_wakeup(); return omap_prcm_register_chain_handler(&omap4_prcm_irq_setup); } static void __exit omap44xx_prm_exit(void) { prm_unregister(&omap44xx_prm_ll_data); } __exitcall(omap44xx_prm_exit);
linux-master
arch/arm/mach-omap2/prm44xx.c
// SPDX-License-Identifier: GPL-2.0-only /* * OMAP2/3/4 clockdomain framework functions * * Copyright (C) 2008-2011 Texas Instruments, Inc. * Copyright (C) 2008-2011 Nokia Corporation * * Written by Paul Walmsley and Jouni Högander * Added OMAP4 specific support by Abhijit Pagare <[email protected]> */ #undef DEBUG #include <linux/kernel.h> #include <linux/device.h> #include <linux/list.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/clk.h> #include <linux/limits.h> #include <linux/err.h> #include <linux/clk-provider.h> #include <linux/cpu_pm.h> #include <linux/io.h> #include <linux/bitops.h> #include "soc.h" #include "clock.h" #include "clockdomain.h" #include "pm.h" /* clkdm_list contains all registered struct clockdomains */ static LIST_HEAD(clkdm_list); /* array of clockdomain deps to be added/removed when clkdm in hwsup mode */ static struct clkdm_autodep *autodeps; static struct clkdm_ops *arch_clkdm; void clkdm_save_context(void); void clkdm_restore_context(void); /* Private functions */ static struct clockdomain *_clkdm_lookup(const char *name) { struct clockdomain *clkdm, *temp_clkdm; if (!name) return NULL; clkdm = NULL; list_for_each_entry(temp_clkdm, &clkdm_list, node) { if (!strcmp(name, temp_clkdm->name)) { clkdm = temp_clkdm; break; } } return clkdm; } /** * _clkdm_register - register a clockdomain * @clkdm: struct clockdomain * to register * * Adds a clockdomain to the internal clockdomain list. * Returns -EINVAL if given a null pointer, -EEXIST if a clockdomain is * already registered by the provided name, or 0 upon success. */ static int _clkdm_register(struct clockdomain *clkdm) { struct powerdomain *pwrdm; if (!clkdm || !clkdm->name) return -EINVAL; pwrdm = pwrdm_lookup(clkdm->pwrdm.name); if (!pwrdm) { pr_err("clockdomain: %s: powerdomain %s does not exist\n", clkdm->name, clkdm->pwrdm.name); return -EINVAL; } clkdm->pwrdm.ptr = pwrdm; /* Verify that the clockdomain is not already registered */ if (_clkdm_lookup(clkdm->name)) return -EEXIST; list_add(&clkdm->node, &clkdm_list); pwrdm_add_clkdm(pwrdm, clkdm); pr_debug("clockdomain: registered %s\n", clkdm->name); return 0; } /* _clkdm_deps_lookup - look up the specified clockdomain in a clkdm list */ static struct clkdm_dep *_clkdm_deps_lookup(struct clockdomain *clkdm, struct clkdm_dep *deps) { struct clkdm_dep *cd; if (!clkdm || !deps) return ERR_PTR(-EINVAL); for (cd = deps; cd->clkdm_name; cd++) { if (!cd->clkdm && cd->clkdm_name) cd->clkdm = _clkdm_lookup(cd->clkdm_name); if (cd->clkdm == clkdm) break; } if (!cd->clkdm_name) return ERR_PTR(-ENOENT); return cd; } /** * _autodep_lookup - resolve autodep clkdm names to clkdm pointers; store * @autodep: struct clkdm_autodep * to resolve * * Resolve autodep clockdomain names to clockdomain pointers via * clkdm_lookup() and store the pointers in the autodep structure. An * "autodep" is a clockdomain sleep/wakeup dependency that is * automatically added and removed whenever clocks in the associated * clockdomain are enabled or disabled (respectively) when the * clockdomain is in hardware-supervised mode. Meant to be called * once at clockdomain layer initialization, since these should remain * fixed for a particular architecture. No return value. * * XXX autodeps are deprecated and should be removed at the earliest * opportunity */ static void _autodep_lookup(struct clkdm_autodep *autodep) { struct clockdomain *clkdm; if (!autodep) return; clkdm = clkdm_lookup(autodep->clkdm.name); if (!clkdm) { pr_err("clockdomain: autodeps: clockdomain %s does not exist\n", autodep->clkdm.name); clkdm = ERR_PTR(-ENOENT); } autodep->clkdm.ptr = clkdm; } /** * _resolve_clkdm_deps() - resolve clkdm_names in @clkdm_deps to clkdms * @clkdm: clockdomain that we are resolving dependencies for * @clkdm_deps: ptr to array of struct clkdm_deps to resolve * * Iterates through @clkdm_deps, looking up the struct clockdomain named by * clkdm_name and storing the clockdomain pointer in the struct clkdm_dep. * No return value. */ static void _resolve_clkdm_deps(struct clockdomain *clkdm, struct clkdm_dep *clkdm_deps) { struct clkdm_dep *cd; for (cd = clkdm_deps; cd && cd->clkdm_name; cd++) { if (cd->clkdm) continue; cd->clkdm = _clkdm_lookup(cd->clkdm_name); WARN(!cd->clkdm, "clockdomain: %s: could not find clkdm %s while resolving dependencies - should never happen", clkdm->name, cd->clkdm_name); } } /** * _clkdm_add_wkdep - add a wakeup dependency from clkdm2 to clkdm1 (lockless) * @clkdm1: wake this struct clockdomain * up (dependent) * @clkdm2: when this struct clockdomain * wakes up (source) * * When the clockdomain represented by @clkdm2 wakes up, wake up * @clkdm1. Implemented in hardware on the OMAP, this feature is * designed to reduce wakeup latency of the dependent clockdomain @clkdm1. * Returns -EINVAL if presented with invalid clockdomain pointers, * -ENOENT if @clkdm2 cannot wake up clkdm1 in hardware, or 0 upon * success. */ static int _clkdm_add_wkdep(struct clockdomain *clkdm1, struct clockdomain *clkdm2) { struct clkdm_dep *cd; int ret = 0; if (!clkdm1 || !clkdm2) return -EINVAL; cd = _clkdm_deps_lookup(clkdm2, clkdm1->wkdep_srcs); if (IS_ERR(cd)) ret = PTR_ERR(cd); if (!arch_clkdm || !arch_clkdm->clkdm_add_wkdep) ret = -EINVAL; if (ret) { pr_debug("clockdomain: hardware cannot set/clear wake up of %s when %s wakes up\n", clkdm1->name, clkdm2->name); return ret; } cd->wkdep_usecount++; if (cd->wkdep_usecount == 1) { pr_debug("clockdomain: hardware will wake up %s when %s wakes up\n", clkdm1->name, clkdm2->name); ret = arch_clkdm->clkdm_add_wkdep(clkdm1, clkdm2); } return ret; } /** * _clkdm_del_wkdep - remove a wakeup dep from clkdm2 to clkdm1 (lockless) * @clkdm1: wake this struct clockdomain * up (dependent) * @clkdm2: when this struct clockdomain * wakes up (source) * * Remove a wakeup dependency causing @clkdm1 to wake up when @clkdm2 * wakes up. Returns -EINVAL if presented with invalid clockdomain * pointers, -ENOENT if @clkdm2 cannot wake up clkdm1 in hardware, or * 0 upon success. */ static int _clkdm_del_wkdep(struct clockdomain *clkdm1, struct clockdomain *clkdm2) { struct clkdm_dep *cd; int ret = 0; if (!clkdm1 || !clkdm2) return -EINVAL; cd = _clkdm_deps_lookup(clkdm2, clkdm1->wkdep_srcs); if (IS_ERR(cd)) ret = PTR_ERR(cd); if (!arch_clkdm || !arch_clkdm->clkdm_del_wkdep) ret = -EINVAL; if (ret) { pr_debug("clockdomain: hardware cannot set/clear wake up of %s when %s wakes up\n", clkdm1->name, clkdm2->name); return ret; } cd->wkdep_usecount--; if (cd->wkdep_usecount == 0) { pr_debug("clockdomain: hardware will no longer wake up %s after %s wakes up\n", clkdm1->name, clkdm2->name); ret = arch_clkdm->clkdm_del_wkdep(clkdm1, clkdm2); } return ret; } /** * _clkdm_add_sleepdep - add a sleep dependency from clkdm2 to clkdm1 (lockless) * @clkdm1: prevent this struct clockdomain * from sleeping (dependent) * @clkdm2: when this struct clockdomain * is active (source) * * Prevent @clkdm1 from automatically going inactive (and then to * retention or off) if @clkdm2 is active. Returns -EINVAL if * presented with invalid clockdomain pointers or called on a machine * that does not support software-configurable hardware sleep * dependencies, -ENOENT if the specified dependency cannot be set in * hardware, or 0 upon success. */ static int _clkdm_add_sleepdep(struct clockdomain *clkdm1, struct clockdomain *clkdm2) { struct clkdm_dep *cd; int ret = 0; if (!clkdm1 || !clkdm2) return -EINVAL; cd = _clkdm_deps_lookup(clkdm2, clkdm1->sleepdep_srcs); if (IS_ERR(cd)) ret = PTR_ERR(cd); if (!arch_clkdm || !arch_clkdm->clkdm_add_sleepdep) ret = -EINVAL; if (ret) { pr_debug("clockdomain: hardware cannot set/clear sleep dependency affecting %s from %s\n", clkdm1->name, clkdm2->name); return ret; } cd->sleepdep_usecount++; if (cd->sleepdep_usecount == 1) { pr_debug("clockdomain: will prevent %s from sleeping if %s is active\n", clkdm1->name, clkdm2->name); ret = arch_clkdm->clkdm_add_sleepdep(clkdm1, clkdm2); } return ret; } /** * _clkdm_del_sleepdep - remove a sleep dep from clkdm2 to clkdm1 (lockless) * @clkdm1: prevent this struct clockdomain * from sleeping (dependent) * @clkdm2: when this struct clockdomain * is active (source) * * Allow @clkdm1 to automatically go inactive (and then to retention or * off), independent of the activity state of @clkdm2. Returns -EINVAL * if presented with invalid clockdomain pointers or called on a machine * that does not support software-configurable hardware sleep dependencies, * -ENOENT if the specified dependency cannot be cleared in hardware, or * 0 upon success. */ static int _clkdm_del_sleepdep(struct clockdomain *clkdm1, struct clockdomain *clkdm2) { struct clkdm_dep *cd; int ret = 0; if (!clkdm1 || !clkdm2) return -EINVAL; cd = _clkdm_deps_lookup(clkdm2, clkdm1->sleepdep_srcs); if (IS_ERR(cd)) ret = PTR_ERR(cd); if (!arch_clkdm || !arch_clkdm->clkdm_del_sleepdep) ret = -EINVAL; if (ret) { pr_debug("clockdomain: hardware cannot set/clear sleep dependency affecting %s from %s\n", clkdm1->name, clkdm2->name); return ret; } cd->sleepdep_usecount--; if (cd->sleepdep_usecount == 0) { pr_debug("clockdomain: will no longer prevent %s from sleeping if %s is active\n", clkdm1->name, clkdm2->name); ret = arch_clkdm->clkdm_del_sleepdep(clkdm1, clkdm2); } return ret; } /* Public functions */ /** * clkdm_register_platform_funcs - register clockdomain implementation fns * @co: func pointers for arch specific implementations * * Register the list of function pointers used to implement the * clockdomain functions on different OMAP SoCs. Should be called * before any other clkdm_register*() function. Returns -EINVAL if * @co is null, -EEXIST if platform functions have already been * registered, or 0 upon success. */ int clkdm_register_platform_funcs(struct clkdm_ops *co) { if (!co) return -EINVAL; if (arch_clkdm) return -EEXIST; arch_clkdm = co; return 0; }; /** * clkdm_register_clkdms - register SoC clockdomains * @cs: pointer to an array of struct clockdomain to register * * Register the clockdomains available on a particular OMAP SoC. Must * be called after clkdm_register_platform_funcs(). May be called * multiple times. Returns -EACCES if called before * clkdm_register_platform_funcs(); -EINVAL if the argument @cs is * null; or 0 upon success. */ int clkdm_register_clkdms(struct clockdomain **cs) { struct clockdomain **c = NULL; if (!arch_clkdm) return -EACCES; if (!cs) return -EINVAL; for (c = cs; *c; c++) _clkdm_register(*c); return 0; } /** * clkdm_register_autodeps - register autodeps (if required) * @ia: pointer to a static array of struct clkdm_autodep to register * * Register clockdomain "automatic dependencies." These are * clockdomain wakeup and sleep dependencies that are automatically * added whenever the first clock inside a clockdomain is enabled, and * removed whenever the last clock inside a clockdomain is disabled. * These are currently only used on OMAP3 devices, and are deprecated, * since they waste energy. However, until the OMAP2/3 IP block * enable/disable sequence can be converted to match the OMAP4 * sequence, they are needed. * * Must be called only after all of the SoC clockdomains are * registered, since the function will resolve autodep clockdomain * names into clockdomain pointers. * * The struct clkdm_autodep @ia array must be static, as this function * does not copy the array elements. * * Returns -EACCES if called before any clockdomains have been * registered, -EINVAL if called with a null @ia argument, -EEXIST if * autodeps have already been registered, or 0 upon success. */ int clkdm_register_autodeps(struct clkdm_autodep *ia) { struct clkdm_autodep *a = NULL; if (list_empty(&clkdm_list)) return -EACCES; if (!ia) return -EINVAL; if (autodeps) return -EEXIST; autodeps = ia; for (a = autodeps; a->clkdm.ptr; a++) _autodep_lookup(a); return 0; } static int cpu_notifier(struct notifier_block *nb, unsigned long cmd, void *v) { switch (cmd) { case CPU_CLUSTER_PM_ENTER: if (enable_off_mode) clkdm_save_context(); break; case CPU_CLUSTER_PM_EXIT: if (enable_off_mode) clkdm_restore_context(); break; } return NOTIFY_OK; } /** * clkdm_complete_init - set up the clockdomain layer * * Put all clockdomains into software-supervised mode; PM code should * later enable hardware-supervised mode as appropriate. Must be * called after clkdm_register_clkdms(). Returns -EACCES if called * before clkdm_register_clkdms(), or 0 upon success. */ int clkdm_complete_init(void) { struct clockdomain *clkdm; static struct notifier_block nb; if (list_empty(&clkdm_list)) return -EACCES; list_for_each_entry(clkdm, &clkdm_list, node) { clkdm_deny_idle(clkdm); _resolve_clkdm_deps(clkdm, clkdm->wkdep_srcs); clkdm_clear_all_wkdeps(clkdm); _resolve_clkdm_deps(clkdm, clkdm->sleepdep_srcs); clkdm_clear_all_sleepdeps(clkdm); } /* Only AM43XX can lose clkdm context during rtc-ddr suspend */ if (soc_is_am43xx()) { nb.notifier_call = cpu_notifier; cpu_pm_register_notifier(&nb); } return 0; } /** * clkdm_lookup - look up a clockdomain by name, return a pointer * @name: name of clockdomain * * Find a registered clockdomain by its name @name. Returns a pointer * to the struct clockdomain if found, or NULL otherwise. */ struct clockdomain *clkdm_lookup(const char *name) { struct clockdomain *clkdm, *temp_clkdm; if (!name) return NULL; clkdm = NULL; list_for_each_entry(temp_clkdm, &clkdm_list, node) { if (!strcmp(name, temp_clkdm->name)) { clkdm = temp_clkdm; break; } } return clkdm; } /** * clkdm_for_each - call function on each registered clockdomain * @fn: callback function * * * Call the supplied function @fn for each registered clockdomain. * The callback function @fn can return anything but 0 to bail * out early from the iterator. The callback function is called with * the clkdm_mutex held, so no clockdomain structure manipulation * functions should be called from the callback, although hardware * clockdomain control functions are fine. Returns the last return * value of the callback function, which should be 0 for success or * anything else to indicate failure; or -EINVAL if the function pointer * is null. */ int clkdm_for_each(int (*fn)(struct clockdomain *clkdm, void *user), void *user) { struct clockdomain *clkdm; int ret = 0; if (!fn) return -EINVAL; list_for_each_entry(clkdm, &clkdm_list, node) { ret = (*fn)(clkdm, user); if (ret) break; } return ret; } /** * clkdm_get_pwrdm - return a ptr to the pwrdm that this clkdm resides in * @clkdm: struct clockdomain * * * Return a pointer to the struct powerdomain that the specified clockdomain * @clkdm exists in, or returns NULL if @clkdm is NULL. */ struct powerdomain *clkdm_get_pwrdm(struct clockdomain *clkdm) { if (!clkdm) return NULL; return clkdm->pwrdm.ptr; } /* Hardware clockdomain control */ /** * clkdm_add_wkdep - add a wakeup dependency from clkdm2 to clkdm1 * @clkdm1: wake this struct clockdomain * up (dependent) * @clkdm2: when this struct clockdomain * wakes up (source) * * When the clockdomain represented by @clkdm2 wakes up, wake up * @clkdm1. Implemented in hardware on the OMAP, this feature is * designed to reduce wakeup latency of the dependent clockdomain @clkdm1. * Returns -EINVAL if presented with invalid clockdomain pointers, * -ENOENT if @clkdm2 cannot wake up clkdm1 in hardware, or 0 upon * success. */ int clkdm_add_wkdep(struct clockdomain *clkdm1, struct clockdomain *clkdm2) { struct clkdm_dep *cd; int ret; if (!clkdm1 || !clkdm2) return -EINVAL; cd = _clkdm_deps_lookup(clkdm2, clkdm1->wkdep_srcs); if (IS_ERR(cd)) return PTR_ERR(cd); pwrdm_lock(cd->clkdm->pwrdm.ptr); ret = _clkdm_add_wkdep(clkdm1, clkdm2); pwrdm_unlock(cd->clkdm->pwrdm.ptr); return ret; } /** * clkdm_del_wkdep - remove a wakeup dependency from clkdm2 to clkdm1 * @clkdm1: wake this struct clockdomain * up (dependent) * @clkdm2: when this struct clockdomain * wakes up (source) * * Remove a wakeup dependency causing @clkdm1 to wake up when @clkdm2 * wakes up. Returns -EINVAL if presented with invalid clockdomain * pointers, -ENOENT if @clkdm2 cannot wake up clkdm1 in hardware, or * 0 upon success. */ int clkdm_del_wkdep(struct clockdomain *clkdm1, struct clockdomain *clkdm2) { struct clkdm_dep *cd; int ret; if (!clkdm1 || !clkdm2) return -EINVAL; cd = _clkdm_deps_lookup(clkdm2, clkdm1->wkdep_srcs); if (IS_ERR(cd)) return PTR_ERR(cd); pwrdm_lock(cd->clkdm->pwrdm.ptr); ret = _clkdm_del_wkdep(clkdm1, clkdm2); pwrdm_unlock(cd->clkdm->pwrdm.ptr); return ret; } /** * clkdm_read_wkdep - read wakeup dependency state from clkdm2 to clkdm1 * @clkdm1: wake this struct clockdomain * up (dependent) * @clkdm2: when this struct clockdomain * wakes up (source) * * Return 1 if a hardware wakeup dependency exists wherein @clkdm1 will be * awoken when @clkdm2 wakes up; 0 if dependency is not set; -EINVAL * if either clockdomain pointer is invalid; or -ENOENT if the hardware * is incapable. * * REVISIT: Currently this function only represents software-controllable * wakeup dependencies. Wakeup dependencies fixed in hardware are not * yet handled here. */ int clkdm_read_wkdep(struct clockdomain *clkdm1, struct clockdomain *clkdm2) { struct clkdm_dep *cd; int ret = 0; if (!clkdm1 || !clkdm2) return -EINVAL; cd = _clkdm_deps_lookup(clkdm2, clkdm1->wkdep_srcs); if (IS_ERR(cd)) ret = PTR_ERR(cd); if (!arch_clkdm || !arch_clkdm->clkdm_read_wkdep) ret = -EINVAL; if (ret) { pr_debug("clockdomain: hardware cannot set/clear wake up of %s when %s wakes up\n", clkdm1->name, clkdm2->name); return ret; } /* XXX It's faster to return the wkdep_usecount */ return arch_clkdm->clkdm_read_wkdep(clkdm1, clkdm2); } /** * clkdm_clear_all_wkdeps - remove all wakeup dependencies from target clkdm * @clkdm: struct clockdomain * to remove all wakeup dependencies from * * Remove all inter-clockdomain wakeup dependencies that could cause * @clkdm to wake. Intended to be used during boot to initialize the * PRCM to a known state, after all clockdomains are put into swsup idle * and woken up. Returns -EINVAL if @clkdm pointer is invalid, or * 0 upon success. */ int clkdm_clear_all_wkdeps(struct clockdomain *clkdm) { if (!clkdm) return -EINVAL; if (!arch_clkdm || !arch_clkdm->clkdm_clear_all_wkdeps) return -EINVAL; return arch_clkdm->clkdm_clear_all_wkdeps(clkdm); } /** * clkdm_add_sleepdep - add a sleep dependency from clkdm2 to clkdm1 * @clkdm1: prevent this struct clockdomain * from sleeping (dependent) * @clkdm2: when this struct clockdomain * is active (source) * * Prevent @clkdm1 from automatically going inactive (and then to * retention or off) if @clkdm2 is active. Returns -EINVAL if * presented with invalid clockdomain pointers or called on a machine * that does not support software-configurable hardware sleep * dependencies, -ENOENT if the specified dependency cannot be set in * hardware, or 0 upon success. */ int clkdm_add_sleepdep(struct clockdomain *clkdm1, struct clockdomain *clkdm2) { struct clkdm_dep *cd; int ret; if (!clkdm1 || !clkdm2) return -EINVAL; cd = _clkdm_deps_lookup(clkdm2, clkdm1->wkdep_srcs); if (IS_ERR(cd)) return PTR_ERR(cd); pwrdm_lock(cd->clkdm->pwrdm.ptr); ret = _clkdm_add_sleepdep(clkdm1, clkdm2); pwrdm_unlock(cd->clkdm->pwrdm.ptr); return ret; } /** * clkdm_del_sleepdep - remove a sleep dependency from clkdm2 to clkdm1 * @clkdm1: prevent this struct clockdomain * from sleeping (dependent) * @clkdm2: when this struct clockdomain * is active (source) * * Allow @clkdm1 to automatically go inactive (and then to retention or * off), independent of the activity state of @clkdm2. Returns -EINVAL * if presented with invalid clockdomain pointers or called on a machine * that does not support software-configurable hardware sleep dependencies, * -ENOENT if the specified dependency cannot be cleared in hardware, or * 0 upon success. */ int clkdm_del_sleepdep(struct clockdomain *clkdm1, struct clockdomain *clkdm2) { struct clkdm_dep *cd; int ret; if (!clkdm1 || !clkdm2) return -EINVAL; cd = _clkdm_deps_lookup(clkdm2, clkdm1->wkdep_srcs); if (IS_ERR(cd)) return PTR_ERR(cd); pwrdm_lock(cd->clkdm->pwrdm.ptr); ret = _clkdm_del_sleepdep(clkdm1, clkdm2); pwrdm_unlock(cd->clkdm->pwrdm.ptr); return ret; } /** * clkdm_read_sleepdep - read sleep dependency state from clkdm2 to clkdm1 * @clkdm1: prevent this struct clockdomain * from sleeping (dependent) * @clkdm2: when this struct clockdomain * is active (source) * * Return 1 if a hardware sleep dependency exists wherein @clkdm1 will * not be allowed to automatically go inactive if @clkdm2 is active; * 0 if @clkdm1's automatic power state inactivity transition is independent * of @clkdm2's; -EINVAL if either clockdomain pointer is invalid or called * on a machine that does not support software-configurable hardware sleep * dependencies; or -ENOENT if the hardware is incapable. * * REVISIT: Currently this function only represents software-controllable * sleep dependencies. Sleep dependencies fixed in hardware are not * yet handled here. */ int clkdm_read_sleepdep(struct clockdomain *clkdm1, struct clockdomain *clkdm2) { struct clkdm_dep *cd; int ret = 0; if (!clkdm1 || !clkdm2) return -EINVAL; cd = _clkdm_deps_lookup(clkdm2, clkdm1->sleepdep_srcs); if (IS_ERR(cd)) ret = PTR_ERR(cd); if (!arch_clkdm || !arch_clkdm->clkdm_read_sleepdep) ret = -EINVAL; if (ret) { pr_debug("clockdomain: hardware cannot set/clear sleep dependency affecting %s from %s\n", clkdm1->name, clkdm2->name); return ret; } /* XXX It's faster to return the sleepdep_usecount */ return arch_clkdm->clkdm_read_sleepdep(clkdm1, clkdm2); } /** * clkdm_clear_all_sleepdeps - remove all sleep dependencies from target clkdm * @clkdm: struct clockdomain * to remove all sleep dependencies from * * Remove all inter-clockdomain sleep dependencies that could prevent * @clkdm from idling. Intended to be used during boot to initialize the * PRCM to a known state, after all clockdomains are put into swsup idle * and woken up. Returns -EINVAL if @clkdm pointer is invalid, or * 0 upon success. */ int clkdm_clear_all_sleepdeps(struct clockdomain *clkdm) { if (!clkdm) return -EINVAL; if (!arch_clkdm || !arch_clkdm->clkdm_clear_all_sleepdeps) return -EINVAL; return arch_clkdm->clkdm_clear_all_sleepdeps(clkdm); } /** * clkdm_sleep_nolock - force clockdomain sleep transition (lockless) * @clkdm: struct clockdomain * * * Instruct the CM to force a sleep transition on the specified * clockdomain @clkdm. Only for use by the powerdomain code. Returns * -EINVAL if @clkdm is NULL or if clockdomain does not support * software-initiated sleep; 0 upon success. */ static int clkdm_sleep_nolock(struct clockdomain *clkdm) { int ret; if (!clkdm) return -EINVAL; if (!(clkdm->flags & CLKDM_CAN_FORCE_SLEEP)) { pr_debug("clockdomain: %s does not support forcing sleep via software\n", clkdm->name); return -EINVAL; } if (!arch_clkdm || !arch_clkdm->clkdm_sleep) return -EINVAL; pr_debug("clockdomain: forcing sleep on %s\n", clkdm->name); clkdm->_flags &= ~_CLKDM_FLAG_HWSUP_ENABLED; ret = arch_clkdm->clkdm_sleep(clkdm); ret |= pwrdm_state_switch_nolock(clkdm->pwrdm.ptr); return ret; } /** * clkdm_sleep - force clockdomain sleep transition * @clkdm: struct clockdomain * * * Instruct the CM to force a sleep transition on the specified * clockdomain @clkdm. Returns -EINVAL if @clkdm is NULL or if * clockdomain does not support software-initiated sleep; 0 upon * success. */ int clkdm_sleep(struct clockdomain *clkdm) { int ret; pwrdm_lock(clkdm->pwrdm.ptr); ret = clkdm_sleep_nolock(clkdm); pwrdm_unlock(clkdm->pwrdm.ptr); return ret; } /** * clkdm_wakeup_nolock - force clockdomain wakeup transition (lockless) * @clkdm: struct clockdomain * * * Instruct the CM to force a wakeup transition on the specified * clockdomain @clkdm. Only for use by the powerdomain code. Returns * -EINVAL if @clkdm is NULL or if the clockdomain does not support * software-controlled wakeup; 0 upon success. */ static int clkdm_wakeup_nolock(struct clockdomain *clkdm) { int ret; if (!clkdm) return -EINVAL; if (!(clkdm->flags & CLKDM_CAN_FORCE_WAKEUP)) { pr_debug("clockdomain: %s does not support forcing wakeup via software\n", clkdm->name); return -EINVAL; } if (!arch_clkdm || !arch_clkdm->clkdm_wakeup) return -EINVAL; pr_debug("clockdomain: forcing wakeup on %s\n", clkdm->name); clkdm->_flags &= ~_CLKDM_FLAG_HWSUP_ENABLED; ret = arch_clkdm->clkdm_wakeup(clkdm); ret |= pwrdm_state_switch_nolock(clkdm->pwrdm.ptr); return ret; } /** * clkdm_wakeup - force clockdomain wakeup transition * @clkdm: struct clockdomain * * * Instruct the CM to force a wakeup transition on the specified * clockdomain @clkdm. Returns -EINVAL if @clkdm is NULL or if the * clockdomain does not support software-controlled wakeup; 0 upon * success. */ int clkdm_wakeup(struct clockdomain *clkdm) { int ret; pwrdm_lock(clkdm->pwrdm.ptr); ret = clkdm_wakeup_nolock(clkdm); pwrdm_unlock(clkdm->pwrdm.ptr); return ret; } /** * clkdm_allow_idle_nolock - enable hwsup idle transitions for clkdm * @clkdm: struct clockdomain * * * Allow the hardware to automatically switch the clockdomain @clkdm * into active or idle states, as needed by downstream clocks. If the * clockdomain has any downstream clocks enabled in the clock * framework, wkdep/sleepdep autodependencies are added; this is so * device drivers can read and write to the device. Only for use by * the powerdomain code. No return value. */ void clkdm_allow_idle_nolock(struct clockdomain *clkdm) { if (!clkdm) return; if (!WARN_ON(!clkdm->forcewake_count)) clkdm->forcewake_count--; if (clkdm->forcewake_count) return; if (!clkdm->usecount && (clkdm->flags & CLKDM_CAN_FORCE_SLEEP)) clkdm_sleep_nolock(clkdm); if (!(clkdm->flags & CLKDM_CAN_ENABLE_AUTO)) return; if (clkdm->flags & CLKDM_MISSING_IDLE_REPORTING) return; if (!arch_clkdm || !arch_clkdm->clkdm_allow_idle) return; pr_debug("clockdomain: enabling automatic idle transitions for %s\n", clkdm->name); clkdm->_flags |= _CLKDM_FLAG_HWSUP_ENABLED; arch_clkdm->clkdm_allow_idle(clkdm); pwrdm_state_switch_nolock(clkdm->pwrdm.ptr); } /** * clkdm_allow_idle - enable hwsup idle transitions for clkdm * @clkdm: struct clockdomain * * * Allow the hardware to automatically switch the clockdomain @clkdm into * active or idle states, as needed by downstream clocks. If the * clockdomain has any downstream clocks enabled in the clock * framework, wkdep/sleepdep autodependencies are added; this is so * device drivers can read and write to the device. No return value. */ void clkdm_allow_idle(struct clockdomain *clkdm) { pwrdm_lock(clkdm->pwrdm.ptr); clkdm_allow_idle_nolock(clkdm); pwrdm_unlock(clkdm->pwrdm.ptr); } /** * clkdm_deny_idle - disable hwsup idle transitions for clkdm * @clkdm: struct clockdomain * * * Prevent the hardware from automatically switching the clockdomain * @clkdm into inactive or idle states. If the clockdomain has * downstream clocks enabled in the clock framework, wkdep/sleepdep * autodependencies are removed. Only for use by the powerdomain * code. No return value. */ void clkdm_deny_idle_nolock(struct clockdomain *clkdm) { if (!clkdm) return; if (clkdm->forcewake_count++) return; if (clkdm->flags & CLKDM_CAN_FORCE_WAKEUP) clkdm_wakeup_nolock(clkdm); if (!(clkdm->flags & CLKDM_CAN_DISABLE_AUTO)) return; if (clkdm->flags & CLKDM_MISSING_IDLE_REPORTING) return; if (!arch_clkdm || !arch_clkdm->clkdm_deny_idle) return; pr_debug("clockdomain: disabling automatic idle transitions for %s\n", clkdm->name); clkdm->_flags &= ~_CLKDM_FLAG_HWSUP_ENABLED; arch_clkdm->clkdm_deny_idle(clkdm); pwrdm_state_switch_nolock(clkdm->pwrdm.ptr); } /** * clkdm_deny_idle - disable hwsup idle transitions for clkdm * @clkdm: struct clockdomain * * * Prevent the hardware from automatically switching the clockdomain * @clkdm into inactive or idle states. If the clockdomain has * downstream clocks enabled in the clock framework, wkdep/sleepdep * autodependencies are removed. No return value. */ void clkdm_deny_idle(struct clockdomain *clkdm) { pwrdm_lock(clkdm->pwrdm.ptr); clkdm_deny_idle_nolock(clkdm); pwrdm_unlock(clkdm->pwrdm.ptr); } /* Public autodep handling functions (deprecated) */ /** * clkdm_add_autodeps - add auto sleepdeps/wkdeps to clkdm upon clock enable * @clkdm: struct clockdomain * * * Add the "autodep" sleep & wakeup dependencies to clockdomain 'clkdm' * in hardware-supervised mode. Meant to be called from clock framework * when a clock inside clockdomain 'clkdm' is enabled. No return value. * * XXX autodeps are deprecated and should be removed at the earliest * opportunity */ void clkdm_add_autodeps(struct clockdomain *clkdm) { struct clkdm_autodep *autodep; if (!autodeps || clkdm->flags & CLKDM_NO_AUTODEPS) return; for (autodep = autodeps; autodep->clkdm.ptr; autodep++) { if (IS_ERR(autodep->clkdm.ptr)) continue; pr_debug("clockdomain: %s: adding %s sleepdep/wkdep\n", clkdm->name, autodep->clkdm.ptr->name); _clkdm_add_sleepdep(clkdm, autodep->clkdm.ptr); _clkdm_add_wkdep(clkdm, autodep->clkdm.ptr); } } /** * clkdm_del_autodeps - remove auto sleepdeps/wkdeps from clkdm * @clkdm: struct clockdomain * * * Remove the "autodep" sleep & wakeup dependencies from clockdomain 'clkdm' * in hardware-supervised mode. Meant to be called from clock framework * when a clock inside clockdomain 'clkdm' is disabled. No return value. * * XXX autodeps are deprecated and should be removed at the earliest * opportunity */ void clkdm_del_autodeps(struct clockdomain *clkdm) { struct clkdm_autodep *autodep; if (!autodeps || clkdm->flags & CLKDM_NO_AUTODEPS) return; for (autodep = autodeps; autodep->clkdm.ptr; autodep++) { if (IS_ERR(autodep->clkdm.ptr)) continue; pr_debug("clockdomain: %s: removing %s sleepdep/wkdep\n", clkdm->name, autodep->clkdm.ptr->name); _clkdm_del_sleepdep(clkdm, autodep->clkdm.ptr); _clkdm_del_wkdep(clkdm, autodep->clkdm.ptr); } } /* Clockdomain-to-clock/hwmod framework interface code */ /** * clkdm_clk_enable - add an enabled downstream clock to this clkdm * @clkdm: struct clockdomain * * @clk: struct clk * of the enabled downstream clock * * Increment the usecount of the clockdomain @clkdm and ensure that it * is awake before @clk is enabled. Intended to be called by * clk_enable() code. If the clockdomain is in software-supervised * idle mode, force the clockdomain to wake. If the clockdomain is in * hardware-supervised idle mode, add clkdm-pwrdm autodependencies, to * ensure that devices in the clockdomain can be read from/written to * by on-chip processors. Returns -EINVAL if passed null pointers; * returns 0 upon success or if the clockdomain is in hwsup idle mode. */ int clkdm_clk_enable(struct clockdomain *clkdm, struct clk *unused) { if (!clkdm || !arch_clkdm || !arch_clkdm->clkdm_clk_enable) return -EINVAL; pwrdm_lock(clkdm->pwrdm.ptr); /* * For arch's with no autodeps, clkcm_clk_enable * should be called for every clock instance or hwmod that is * enabled, so the clkdm can be force woken up. */ clkdm->usecount++; if (clkdm->usecount > 1 && autodeps) { pwrdm_unlock(clkdm->pwrdm.ptr); return 0; } arch_clkdm->clkdm_clk_enable(clkdm); pwrdm_state_switch_nolock(clkdm->pwrdm.ptr); pwrdm_unlock(clkdm->pwrdm.ptr); pr_debug("clockdomain: %s: enabled\n", clkdm->name); return 0; } /** * clkdm_clk_disable - remove an enabled downstream clock from this clkdm * @clkdm: struct clockdomain * * @clk: struct clk * of the disabled downstream clock * * Decrement the usecount of this clockdomain @clkdm when @clk is * disabled. Intended to be called by clk_disable() code. If the * clockdomain usecount goes to 0, put the clockdomain to sleep * (software-supervised mode) or remove the clkdm autodependencies * (hardware-supervised mode). Returns -EINVAL if passed null * pointers; -ERANGE if the @clkdm usecount underflows; or returns 0 * upon success or if the clockdomain is in hwsup idle mode. */ int clkdm_clk_disable(struct clockdomain *clkdm, struct clk *clk) { if (!clkdm || !arch_clkdm || !arch_clkdm->clkdm_clk_disable) return -EINVAL; pwrdm_lock(clkdm->pwrdm.ptr); /* corner case: disabling unused clocks */ if (clk && (__clk_get_enable_count(clk) == 0) && clkdm->usecount == 0) goto ccd_exit; if (clkdm->usecount == 0) { pwrdm_unlock(clkdm->pwrdm.ptr); WARN_ON(1); /* underflow */ return -ERANGE; } clkdm->usecount--; if (clkdm->usecount > 0) { pwrdm_unlock(clkdm->pwrdm.ptr); return 0; } arch_clkdm->clkdm_clk_disable(clkdm); pwrdm_state_switch_nolock(clkdm->pwrdm.ptr); pr_debug("clockdomain: %s: disabled\n", clkdm->name); ccd_exit: pwrdm_unlock(clkdm->pwrdm.ptr); return 0; } /** * clkdm_hwmod_enable - add an enabled downstream hwmod to this clkdm * @clkdm: struct clockdomain * * @oh: struct omap_hwmod * of the enabled downstream hwmod * * Increment the usecount of the clockdomain @clkdm and ensure that it * is awake before @oh is enabled. Intended to be called by * module_enable() code. * If the clockdomain is in software-supervised idle mode, force the * clockdomain to wake. If the clockdomain is in hardware-supervised idle * mode, add clkdm-pwrdm autodependencies, to ensure that devices in the * clockdomain can be read from/written to by on-chip processors. * Returns -EINVAL if passed null pointers; * returns 0 upon success or if the clockdomain is in hwsup idle mode. */ int clkdm_hwmod_enable(struct clockdomain *clkdm, struct omap_hwmod *oh) { /* The clkdm attribute does not exist yet prior OMAP4 */ if (cpu_is_omap24xx() || cpu_is_omap34xx()) return 0; /* * XXX Rewrite this code to maintain a list of enabled * downstream hwmods for debugging purposes? */ if (!oh) return -EINVAL; return clkdm_clk_enable(clkdm, NULL); } /** * clkdm_hwmod_disable - remove an enabled downstream hwmod from this clkdm * @clkdm: struct clockdomain * * @oh: struct omap_hwmod * of the disabled downstream hwmod * * Decrement the usecount of this clockdomain @clkdm when @oh is * disabled. Intended to be called by module_disable() code. * If the clockdomain usecount goes to 0, put the clockdomain to sleep * (software-supervised mode) or remove the clkdm autodependencies * (hardware-supervised mode). * Returns -EINVAL if passed null pointers; -ERANGE if the @clkdm usecount * underflows; or returns 0 upon success or if the clockdomain is in hwsup * idle mode. */ int clkdm_hwmod_disable(struct clockdomain *clkdm, struct omap_hwmod *oh) { /* The clkdm attribute does not exist yet prior OMAP4 */ if (cpu_is_omap24xx() || cpu_is_omap34xx()) return 0; if (!oh) return -EINVAL; return clkdm_clk_disable(clkdm, NULL); } /** * _clkdm_save_context - save the context for the control of this clkdm * * Due to a suspend or hibernation operation, the state of the registers * controlling this clkdm will be lost, save their context. */ static int _clkdm_save_context(struct clockdomain *clkdm, void *unused) { if (!arch_clkdm || !arch_clkdm->clkdm_save_context) return -EINVAL; return arch_clkdm->clkdm_save_context(clkdm); } /** * _clkdm_restore_context - restore context for control of this clkdm * * Restore the register values for this clockdomain. */ static int _clkdm_restore_context(struct clockdomain *clkdm, void *unused) { if (!arch_clkdm || !arch_clkdm->clkdm_restore_context) return -EINVAL; return arch_clkdm->clkdm_restore_context(clkdm); } /** * clkdm_save_context - Saves the context for each registered clkdm * * Save the context for each registered clockdomain. */ void clkdm_save_context(void) { clkdm_for_each(_clkdm_save_context, NULL); } /** * clkdm_restore_context - Restores the context for each registered clkdm * * Restore the context for each registered clockdomain. */ void clkdm_restore_context(void) { clkdm_for_each(_clkdm_restore_context, NULL); }
linux-master
arch/arm/mach-omap2/clockdomain.c
// SPDX-License-Identifier: GPL-2.0-only /* * OMAP2xxx DVFS virtual clock functions * * Copyright (C) 2005-2008, 2012 Texas Instruments, Inc. * Copyright (C) 2004-2010 Nokia Corporation * * Contacts: * Richard Woodruff <[email protected]> * Paul Walmsley * * Based on earlier work by Tuukka Tikkanen, Tony Lindgren, * Gordon McNutt and RidgeRun, Inc. * * XXX Some of this code should be replaceable by the upcoming OPP layer * code. However, some notion of "rate set" is probably still necessary * for OMAP2xxx at least. Rate sets should be generalized so they can be * used for any OMAP chip, not just OMAP2xxx. In particular, Richard Woodruff * has in the past expressed a preference to use rate sets for OPP changes, * rather than dynamically recalculating the clock tree, so if someone wants * this badly enough to write the code to handle it, we should support it * as an option. */ #undef DEBUG #include <linux/kernel.h> #include <linux/errno.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/cpufreq.h> #include <linux/slab.h> #include "soc.h" #include "clock.h" #include "clock2xxx.h" #include "opp2xxx.h" #include "cm2xxx.h" #include "cm-regbits-24xx.h" #include "sdrc.h" #include "sram.h" static u16 cpu_mask; const struct prcm_config *curr_prcm_set; const struct prcm_config *rate_table; /* * sys_ck_rate: the rate of the external high-frequency clock * oscillator on the board. Set by the SoC-specific clock init code. * Once set during a boot, will not change. */ static unsigned long sys_ck_rate; /** * omap2_table_mpu_recalc - just return the MPU speed * @clk: virt_prcm_set struct clk * * Set virt_prcm_set's rate to the mpu_speed field of the current PRCM set. */ static unsigned long omap2_table_mpu_recalc(struct clk_hw *clk, unsigned long parent_rate) { return curr_prcm_set->mpu_speed; } /* * Look for a rate equal or less than the target rate given a configuration set. * * What's not entirely clear is "which" field represents the key field. * Some might argue L3-DDR, others ARM, others IVA. This code is simple and * just uses the ARM rates. */ static long omap2_round_to_table_rate(struct clk_hw *hw, unsigned long rate, unsigned long *parent_rate) { const struct prcm_config *ptr; long highest_rate; highest_rate = -EINVAL; for (ptr = rate_table; ptr->mpu_speed; ptr++) { if (!(ptr->flags & cpu_mask)) continue; if (ptr->xtal_speed != sys_ck_rate) continue; highest_rate = ptr->mpu_speed; /* Can check only after xtal frequency check */ if (ptr->mpu_speed <= rate) break; } return highest_rate; } /* Sets basic clocks based on the specified rate */ static int omap2_select_table_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { u32 cur_rate, done_rate, bypass = 0; const struct prcm_config *prcm; unsigned long found_speed = 0; unsigned long flags; for (prcm = rate_table; prcm->mpu_speed; prcm++) { if (!(prcm->flags & cpu_mask)) continue; if (prcm->xtal_speed != sys_ck_rate) continue; if (prcm->mpu_speed <= rate) { found_speed = prcm->mpu_speed; break; } } if (!found_speed) { printk(KERN_INFO "Could not set MPU rate to %luMHz\n", rate / 1000000); return -EINVAL; } curr_prcm_set = prcm; cur_rate = omap2xxx_clk_get_core_rate(); if (prcm->dpll_speed == cur_rate / 2) { omap2xxx_sdrc_reprogram(CORE_CLK_SRC_DPLL, 1); } else if (prcm->dpll_speed == cur_rate * 2) { omap2xxx_sdrc_reprogram(CORE_CLK_SRC_DPLL_X2, 1); } else if (prcm->dpll_speed != cur_rate) { local_irq_save(flags); if (prcm->dpll_speed == prcm->xtal_speed) bypass = 1; if ((prcm->cm_clksel2_pll & OMAP24XX_CORE_CLK_SRC_MASK) == CORE_CLK_SRC_DPLL_X2) done_rate = CORE_CLK_SRC_DPLL_X2; else done_rate = CORE_CLK_SRC_DPLL; omap2xxx_cm_set_mod_dividers(prcm->cm_clksel_mpu, prcm->cm_clksel_dsp, prcm->cm_clksel_gfx, prcm->cm_clksel1_core, prcm->cm_clksel_mdm); /* x2 to enter omap2xxx_sdrc_init_params() */ omap2xxx_sdrc_reprogram(CORE_CLK_SRC_DPLL_X2, 1); omap2_set_prcm(prcm->cm_clksel1_pll, prcm->base_sdrc_rfr, bypass); omap2xxx_sdrc_init_params(omap2xxx_sdrc_dll_is_unlocked()); omap2xxx_sdrc_reprogram(done_rate, 0); local_irq_restore(flags); } return 0; } /** * omap2xxx_clkt_vps_check_bootloader_rate - determine which of the rate * table sets matches the current CORE DPLL hardware rate * * Check the MPU rate set by bootloader. Sets the 'curr_prcm_set' * global to point to the active rate set when found; otherwise, sets * it to NULL. No return value; */ static void omap2xxx_clkt_vps_check_bootloader_rates(void) { const struct prcm_config *prcm = NULL; unsigned long rate; rate = omap2xxx_clk_get_core_rate(); for (prcm = rate_table; prcm->mpu_speed; prcm++) { if (!(prcm->flags & cpu_mask)) continue; if (prcm->xtal_speed != sys_ck_rate) continue; if (prcm->dpll_speed <= rate) break; } curr_prcm_set = prcm; } /** * omap2xxx_clkt_vps_late_init - store a copy of the sys_ck rate * * Store a copy of the sys_ck rate for later use by the OMAP2xxx DVFS * code. (The sys_ck rate does not -- or rather, must not -- change * during kernel runtime.) Must be called after we have a valid * sys_ck rate, but before the virt_prcm_set clock rate is * recalculated. No return value. */ static void omap2xxx_clkt_vps_late_init(void) { struct clk *c; c = clk_get(NULL, "sys_ck"); if (IS_ERR(c)) { WARN(1, "could not locate sys_ck\n"); } else { sys_ck_rate = clk_get_rate(c); clk_put(c); } } #ifdef CONFIG_OF #include <linux/clk-provider.h> #include <linux/clkdev.h> static const struct clk_ops virt_prcm_set_ops = { .recalc_rate = &omap2_table_mpu_recalc, .set_rate = &omap2_select_table_rate, .round_rate = &omap2_round_to_table_rate, }; /** * omap2xxx_clkt_vps_init - initialize virt_prcm_set clock * * Does a manual init for the virtual prcm DVFS clock for OMAP2. This * function is called only from omap2 DT clock init, as the virtual * node is not modelled in the DT clock data. */ void omap2xxx_clkt_vps_init(void) { struct clk_init_data init = { NULL }; struct clk_hw_omap *hw = NULL; struct clk *clk; const char *parent_name = "mpu_ck"; omap2xxx_clkt_vps_late_init(); omap2xxx_clkt_vps_check_bootloader_rates(); hw = kzalloc(sizeof(*hw), GFP_KERNEL); if (!hw) return; init.name = "virt_prcm_set"; init.ops = &virt_prcm_set_ops; init.parent_names = &parent_name; init.num_parents = 1; hw->hw.init = &init; clk = clk_register(NULL, &hw->hw); if (IS_ERR(clk)) { printk(KERN_ERR "Failed to register clock\n"); kfree(hw); return; } clkdev_create(clk, "cpufreq_ck", NULL); } #endif
linux-master
arch/arm/mach-omap2/clkt2xxx_virt_prcm_set.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/kernel.h> #include <linux/init.h> #include "common.h" #include "voltage.h" #include "vp.h" #include "prm-regbits-34xx.h" #include "prm-regbits-44xx.h" #include "prm44xx.h" static u32 _vp_set_init_voltage(struct voltagedomain *voltdm, u32 volt) { struct omap_vp_instance *vp = voltdm->vp; u32 vpconfig; char vsel; vsel = voltdm->pmic->uv_to_vsel(volt); vpconfig = voltdm->read(vp->vpconfig); vpconfig &= ~(vp->common->vpconfig_initvoltage_mask | vp->common->vpconfig_forceupdate | vp->common->vpconfig_initvdd); vpconfig |= vsel << __ffs(vp->common->vpconfig_initvoltage_mask); voltdm->write(vpconfig, vp->vpconfig); /* Trigger initVDD value copy to voltage processor */ voltdm->write((vpconfig | vp->common->vpconfig_initvdd), vp->vpconfig); /* Clear initVDD copy trigger bit */ voltdm->write(vpconfig, vp->vpconfig); return vpconfig; } /* Generic voltage init functions */ void __init omap_vp_init(struct voltagedomain *voltdm) { struct omap_vp_instance *vp = voltdm->vp; u32 val, sys_clk_rate, timeout, waittime; u32 vddmin, vddmax, vstepmin, vstepmax; if (!voltdm->pmic || !voltdm->pmic->uv_to_vsel) { pr_err("%s: No PMIC info for vdd_%s\n", __func__, voltdm->name); return; } if (!voltdm->read || !voltdm->write) { pr_err("%s: No read/write API for accessing vdd_%s regs\n", __func__, voltdm->name); return; } vp->enabled = false; /* Divide to avoid overflow */ sys_clk_rate = voltdm->sys_clk.rate / 1000; timeout = (sys_clk_rate * voltdm->pmic->vp_timeout_us) / 1000; vddmin = max(voltdm->vp_param->vddmin, voltdm->pmic->vddmin); vddmax = min(voltdm->vp_param->vddmax, voltdm->pmic->vddmax); vddmin = voltdm->pmic->uv_to_vsel(vddmin); vddmax = voltdm->pmic->uv_to_vsel(vddmax); waittime = DIV_ROUND_UP(voltdm->pmic->step_size * sys_clk_rate, 1000 * voltdm->pmic->slew_rate); vstepmin = voltdm->pmic->vp_vstepmin; vstepmax = voltdm->pmic->vp_vstepmax; /* * VP_CONFIG: error gain is not set here, it will be updated * on each scale, based on OPP. */ val = (voltdm->pmic->vp_erroroffset << __ffs(voltdm->vp->common->vpconfig_erroroffset_mask)) | vp->common->vpconfig_timeouten; voltdm->write(val, vp->vpconfig); /* VSTEPMIN */ val = (waittime << vp->common->vstepmin_smpswaittimemin_shift) | (vstepmin << vp->common->vstepmin_stepmin_shift); voltdm->write(val, vp->vstepmin); /* VSTEPMAX */ val = (vstepmax << vp->common->vstepmax_stepmax_shift) | (waittime << vp->common->vstepmax_smpswaittimemax_shift); voltdm->write(val, vp->vstepmax); /* VLIMITTO */ val = (vddmax << vp->common->vlimitto_vddmax_shift) | (vddmin << vp->common->vlimitto_vddmin_shift) | (timeout << vp->common->vlimitto_timeout_shift); voltdm->write(val, vp->vlimitto); } int omap_vp_update_errorgain(struct voltagedomain *voltdm, unsigned long target_volt) { struct omap_volt_data *volt_data; if (!voltdm->vp) return -EINVAL; /* Get volt_data corresponding to target_volt */ volt_data = omap_voltage_get_voltdata(voltdm, target_volt); if (IS_ERR(volt_data)) return -EINVAL; /* Setting vp errorgain based on the voltage */ voltdm->rmw(voltdm->vp->common->vpconfig_errorgain_mask, volt_data->vp_errgain << __ffs(voltdm->vp->common->vpconfig_errorgain_mask), voltdm->vp->vpconfig); return 0; } /* VP force update method of voltage scaling */ int omap_vp_forceupdate_scale(struct voltagedomain *voltdm, unsigned long target_volt) { struct omap_vp_instance *vp = voltdm->vp; u32 vpconfig; u8 target_vsel, current_vsel; int ret, timeout = 0; ret = omap_vc_pre_scale(voltdm, target_volt, &target_vsel, &current_vsel); if (ret) return ret; /* * Clear all pending TransactionDone interrupt/status. Typical latency * is <3us */ while (timeout++ < VP_TRANXDONE_TIMEOUT) { vp->common->ops->clear_txdone(vp->id); if (!vp->common->ops->check_txdone(vp->id)) break; udelay(1); } if (timeout >= VP_TRANXDONE_TIMEOUT) { pr_warn("%s: vdd_%s TRANXDONE timeout exceeded. Voltage change aborted\n", __func__, voltdm->name); return -ETIMEDOUT; } vpconfig = _vp_set_init_voltage(voltdm, target_volt); /* Force update of voltage */ voltdm->write(vpconfig | vp->common->vpconfig_forceupdate, voltdm->vp->vpconfig); /* * Wait for TransactionDone. Typical latency is <200us. * Depends on SMPSWAITTIMEMIN/MAX and voltage change */ timeout = 0; omap_test_timeout(vp->common->ops->check_txdone(vp->id), VP_TRANXDONE_TIMEOUT, timeout); if (timeout >= VP_TRANXDONE_TIMEOUT) pr_err("%s: vdd_%s TRANXDONE timeout exceeded. TRANXDONE never got set after the voltage update\n", __func__, voltdm->name); omap_vc_post_scale(voltdm, target_volt, target_vsel, current_vsel); /* * Disable TransactionDone interrupt , clear all status, clear * control registers */ timeout = 0; while (timeout++ < VP_TRANXDONE_TIMEOUT) { vp->common->ops->clear_txdone(vp->id); if (!vp->common->ops->check_txdone(vp->id)) break; udelay(1); } if (timeout >= VP_TRANXDONE_TIMEOUT) pr_warn("%s: vdd_%s TRANXDONE timeout exceeded while trying to clear the TRANXDONE status\n", __func__, voltdm->name); /* Clear force bit */ voltdm->write(vpconfig, vp->vpconfig); return 0; } /** * omap_vp_enable() - API to enable a particular VP * @voltdm: pointer to the VDD whose VP is to be enabled. * * This API enables a particular voltage processor. Needed by the smartreflex * class drivers. */ void omap_vp_enable(struct voltagedomain *voltdm) { struct omap_vp_instance *vp; u32 vpconfig, volt; if (!voltdm || IS_ERR(voltdm)) { pr_warn("%s: VDD specified does not exist!\n", __func__); return; } vp = voltdm->vp; if (!voltdm->read || !voltdm->write) { pr_err("%s: No read/write API for accessing vdd_%s regs\n", __func__, voltdm->name); return; } /* If VP is already enabled, do nothing. Return */ if (vp->enabled) return; volt = voltdm_get_voltage(voltdm); if (!volt) { pr_warn("%s: unable to find current voltage for %s\n", __func__, voltdm->name); return; } vpconfig = _vp_set_init_voltage(voltdm, volt); /* Enable VP */ vpconfig |= vp->common->vpconfig_vpenable; voltdm->write(vpconfig, vp->vpconfig); vp->enabled = true; } /** * omap_vp_disable() - API to disable a particular VP * @voltdm: pointer to the VDD whose VP is to be disabled. * * This API disables a particular voltage processor. Needed by the smartreflex * class drivers. */ void omap_vp_disable(struct voltagedomain *voltdm) { struct omap_vp_instance *vp; u32 vpconfig; int timeout; if (!voltdm || IS_ERR(voltdm)) { pr_warn("%s: VDD specified does not exist!\n", __func__); return; } vp = voltdm->vp; if (!voltdm->read || !voltdm->write) { pr_err("%s: No read/write API for accessing vdd_%s regs\n", __func__, voltdm->name); return; } /* If VP is already disabled, do nothing. Return */ if (!vp->enabled) { pr_warn("%s: Trying to disable VP for vdd_%s when it is already disabled\n", __func__, voltdm->name); return; } /* Disable VP */ vpconfig = voltdm->read(vp->vpconfig); vpconfig &= ~vp->common->vpconfig_vpenable; voltdm->write(vpconfig, vp->vpconfig); /* * Wait for VP idle Typical latency is <2us. Maximum latency is ~100us */ omap_test_timeout((voltdm->read(vp->vstatus)), VP_IDLE_TIMEOUT, timeout); if (timeout >= VP_IDLE_TIMEOUT) pr_warn("%s: vdd_%s idle timedout\n", __func__, voltdm->name); vp->enabled = false; return; }
linux-master
arch/arm/mach-omap2/vp.c
// SPDX-License-Identifier: GPL-2.0-only /* * OMAP3 Power Management Routines * * Copyright (C) 2006-2008 Nokia Corporation * Tony Lindgren <[email protected]> * Jouni Hogander * * Copyright (C) 2007 Texas Instruments, Inc. * Rajendra Nayak <[email protected]> * * Copyright (C) 2005 Texas Instruments, Inc. * Richard Woodruff <[email protected]> * * Based on pm.c for omap1 */ #include <linux/cpu_pm.h> #include <linux/pm.h> #include <linux/suspend.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/list.h> #include <linux/err.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/of.h> #include <linux/cpuidle.h> #include <trace/events/power.h> #include <asm/fncpy.h> #include <asm/suspend.h> #include <asm/system_misc.h> #include "clockdomain.h" #include "powerdomain.h" #include "soc.h" #include "common.h" #include "cm3xxx.h" #include "cm-regbits-34xx.h" #include "prm-regbits-34xx.h" #include "prm3xxx.h" #include "pm.h" #include "sdrc.h" #include "omap-secure.h" #include "sram.h" #include "control.h" #include "vc.h" /* pm34xx errata defined in pm.h */ u16 pm34xx_errata; struct power_state { struct powerdomain *pwrdm; u32 next_state; #ifdef CONFIG_SUSPEND u32 saved_state; #endif struct list_head node; }; static LIST_HEAD(pwrst_list); void (*omap3_do_wfi_sram)(void); static struct powerdomain *mpu_pwrdm, *neon_pwrdm; static struct powerdomain *core_pwrdm, *per_pwrdm; static void omap3_core_save_context(void) { omap3_ctrl_save_padconf(); /* * Force write last pad into memory, as this can fail in some * cases according to errata 1.157, 1.185 */ omap_ctrl_writel(omap_ctrl_readl(OMAP343X_PADCONF_ETK_D14), OMAP343X_CONTROL_MEM_WKUP + 0x2a0); /* Save the Interrupt controller context */ omap_intc_save_context(); /* Save the system control module context, padconf already save above*/ omap3_control_save_context(); } static void omap3_core_restore_context(void) { /* Restore the control module context, padconf restored by h/w */ omap3_control_restore_context(); /* Restore the interrupt controller context */ omap_intc_restore_context(); } /* * FIXME: This function should be called before entering off-mode after * OMAP3 secure services have been accessed. Currently it is only called * once during boot sequence, but this works as we are not using secure * services. */ static void omap3_save_secure_ram_context(void) { u32 ret; int mpu_next_state = pwrdm_read_next_pwrst(mpu_pwrdm); if (omap_type() != OMAP2_DEVICE_TYPE_GP) { /* * MPU next state must be set to POWER_ON temporarily, * otherwise the WFI executed inside the ROM code * will hang the system. */ pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_ON); ret = omap3_save_secure_ram(omap3_secure_ram_storage, OMAP3_SAVE_SECURE_RAM_SZ); pwrdm_set_next_pwrst(mpu_pwrdm, mpu_next_state); /* Following is for error tracking, it should not happen */ if (ret) { pr_err("save_secure_sram() returns %08x\n", ret); while (1) ; } } } static irqreturn_t _prcm_int_handle_io(int irq, void *unused) { int c; c = omap_prm_clear_mod_irqs(WKUP_MOD, 1, OMAP3430_ST_IO_MASK | OMAP3430_ST_IO_CHAIN_MASK); return c ? IRQ_HANDLED : IRQ_NONE; } static irqreturn_t _prcm_int_handle_wakeup(int irq, void *unused) { int c; /* * Clear all except ST_IO and ST_IO_CHAIN for wkup module, * these are handled in a separate handler to avoid acking * IO events before parsing in mux code */ c = omap_prm_clear_mod_irqs(WKUP_MOD, 1, ~(OMAP3430_ST_IO_MASK | OMAP3430_ST_IO_CHAIN_MASK)); c += omap_prm_clear_mod_irqs(CORE_MOD, 1, ~0); c += omap_prm_clear_mod_irqs(OMAP3430_PER_MOD, 1, ~0); if (omap_rev() > OMAP3430_REV_ES1_0) { c += omap_prm_clear_mod_irqs(CORE_MOD, 3, ~0); c += omap_prm_clear_mod_irqs(OMAP3430ES2_USBHOST_MOD, 1, ~0); } return c ? IRQ_HANDLED : IRQ_NONE; } static void omap34xx_save_context(u32 *save) { u32 val; /* Read Auxiliary Control Register */ asm("mrc p15, 0, %0, c1, c0, 1" : "=r" (val)); *save++ = 1; *save++ = val; /* Read L2 AUX ctrl register */ asm("mrc p15, 1, %0, c9, c0, 2" : "=r" (val)); *save++ = 1; *save++ = val; } static int omap34xx_do_sram_idle(unsigned long save_state) { omap34xx_cpu_suspend(save_state); return 0; } __cpuidle void omap_sram_idle(bool rcuidle) { /* Variable to tell what needs to be saved and restored * in omap_sram_idle*/ /* save_state = 0 => Nothing to save and restored */ /* save_state = 1 => Only L1 and logic lost */ /* save_state = 2 => Only L2 lost */ /* save_state = 3 => L1, L2 and logic lost */ int save_state = 0; int mpu_next_state = PWRDM_POWER_ON; int per_next_state = PWRDM_POWER_ON; int core_next_state = PWRDM_POWER_ON; u32 sdrc_pwr = 0; int error; mpu_next_state = pwrdm_read_next_pwrst(mpu_pwrdm); switch (mpu_next_state) { case PWRDM_POWER_ON: case PWRDM_POWER_RET: /* No need to save context */ save_state = 0; break; case PWRDM_POWER_OFF: save_state = 3; break; default: /* Invalid state */ pr_err("Invalid mpu state in sram_idle\n"); return; } /* NEON control */ if (pwrdm_read_pwrst(neon_pwrdm) == PWRDM_POWER_ON) pwrdm_set_next_pwrst(neon_pwrdm, mpu_next_state); /* Enable IO-PAD and IO-CHAIN wakeups */ per_next_state = pwrdm_read_next_pwrst(per_pwrdm); core_next_state = pwrdm_read_next_pwrst(core_pwrdm); pwrdm_pre_transition(NULL); /* PER */ if (per_next_state == PWRDM_POWER_OFF) { error = cpu_cluster_pm_enter(); if (error) return; } /* CORE */ if (core_next_state < PWRDM_POWER_ON) { if (core_next_state == PWRDM_POWER_OFF) { omap3_core_save_context(); omap3_cm_save_context(); } } /* Configure PMIC signaling for I2C4 or sys_off_mode */ omap3_vc_set_pmic_signaling(core_next_state); omap3_intc_prepare_idle(); /* * On EMU/HS devices ROM code restores a SRDC value * from scratchpad which has automatic self refresh on timeout * of AUTO_CNT = 1 enabled. This takes care of erratum ID i443. * Hence store/restore the SDRC_POWER register here. */ if (cpu_is_omap3430() && omap_rev() >= OMAP3430_REV_ES3_0 && (omap_type() == OMAP2_DEVICE_TYPE_EMU || omap_type() == OMAP2_DEVICE_TYPE_SEC) && core_next_state == PWRDM_POWER_OFF) sdrc_pwr = sdrc_read_reg(SDRC_POWER); /* * omap3_arm_context is the location where some ARM context * get saved. The rest is placed on the stack, and restored * from there before resuming. */ if (save_state) omap34xx_save_context(omap3_arm_context); if (rcuidle) ct_cpuidle_enter(); if (save_state == 1 || save_state == 3) cpu_suspend(save_state, omap34xx_do_sram_idle); else omap34xx_do_sram_idle(save_state); if (rcuidle) ct_cpuidle_exit(); /* Restore normal SDRC POWER settings */ if (cpu_is_omap3430() && omap_rev() >= OMAP3430_REV_ES3_0 && (omap_type() == OMAP2_DEVICE_TYPE_EMU || omap_type() == OMAP2_DEVICE_TYPE_SEC) && core_next_state == PWRDM_POWER_OFF) sdrc_write_reg(sdrc_pwr, SDRC_POWER); /* CORE */ if (core_next_state < PWRDM_POWER_ON && pwrdm_read_prev_pwrst(core_pwrdm) == PWRDM_POWER_OFF) { omap3_core_restore_context(); omap3_cm_restore_context(); omap3_sram_restore_context(); omap2_sms_restore_context(); } else { /* * In off-mode resume path above, omap3_core_restore_context * also handles the INTC autoidle restore done here so limit * this to non-off mode resume paths so we don't do it twice. */ omap3_intc_resume_idle(); } pwrdm_post_transition(NULL); /* PER */ if (per_next_state == PWRDM_POWER_OFF) cpu_cluster_pm_exit(); } static void omap3_pm_idle(void) { if (omap_irq_pending()) return; omap3_do_wfi(); } #ifdef CONFIG_SUSPEND static int omap3_pm_suspend(void) { struct power_state *pwrst; int state, ret = 0; /* Read current next_pwrsts */ list_for_each_entry(pwrst, &pwrst_list, node) pwrst->saved_state = pwrdm_read_next_pwrst(pwrst->pwrdm); /* Set ones wanted by suspend */ list_for_each_entry(pwrst, &pwrst_list, node) { if (omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state)) goto restore; if (pwrdm_clear_all_prev_pwrst(pwrst->pwrdm)) goto restore; } omap3_intc_suspend(); omap_sram_idle(false); restore: /* Restore next_pwrsts */ list_for_each_entry(pwrst, &pwrst_list, node) { state = pwrdm_read_prev_pwrst(pwrst->pwrdm); if (state > pwrst->next_state) { pr_info("Powerdomain (%s) didn't enter target state %d\n", pwrst->pwrdm->name, pwrst->next_state); ret = -1; } omap_set_pwrdm_state(pwrst->pwrdm, pwrst->saved_state); } if (ret) pr_err("Could not enter target state in pm_suspend\n"); else pr_info("Successfully put all powerdomains to target state\n"); return ret; } #else #define omap3_pm_suspend NULL #endif /* CONFIG_SUSPEND */ static void __init prcm_setup_regs(void) { omap3_ctrl_init(); omap3_prm_init_pm(cpu_is_omap3630(), omap3_has_iva()); } void omap3_pm_off_mode_enable(int enable) { struct power_state *pwrst; u32 state; if (enable) state = PWRDM_POWER_OFF; else state = PWRDM_POWER_RET; list_for_each_entry(pwrst, &pwrst_list, node) { if (IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583) && pwrst->pwrdm == core_pwrdm && state == PWRDM_POWER_OFF) { pwrst->next_state = PWRDM_POWER_RET; pr_warn("%s: Core OFF disabled due to errata i583\n", __func__); } else { pwrst->next_state = state; } omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state); } } int omap3_pm_get_suspend_state(struct powerdomain *pwrdm) { struct power_state *pwrst; list_for_each_entry(pwrst, &pwrst_list, node) { if (pwrst->pwrdm == pwrdm) return pwrst->next_state; } return -EINVAL; } int omap3_pm_set_suspend_state(struct powerdomain *pwrdm, int state) { struct power_state *pwrst; list_for_each_entry(pwrst, &pwrst_list, node) { if (pwrst->pwrdm == pwrdm) { pwrst->next_state = state; return 0; } } return -EINVAL; } static int __init pwrdms_setup(struct powerdomain *pwrdm, void *unused) { struct power_state *pwrst; if (!pwrdm->pwrsts) return 0; pwrst = kmalloc(sizeof(struct power_state), GFP_ATOMIC); if (!pwrst) return -ENOMEM; pwrst->pwrdm = pwrdm; if (enable_off_mode) pwrst->next_state = PWRDM_POWER_OFF; else pwrst->next_state = PWRDM_POWER_RET; list_add(&pwrst->node, &pwrst_list); if (pwrdm_has_hdwr_sar(pwrdm)) pwrdm_enable_hdwr_sar(pwrdm); return omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state); } /* * Push functions to SRAM * * The minimum set of functions is pushed to SRAM for execution: * - omap3_do_wfi for erratum i581 WA, */ void omap_push_sram_idle(void) { omap3_do_wfi_sram = omap_sram_push(omap3_do_wfi, omap3_do_wfi_sz); } static void __init pm_errata_configure(void) { if (cpu_is_omap3630()) { pm34xx_errata |= PM_RTA_ERRATUM_i608; /* Enable the l2 cache toggling in sleep logic */ enable_omap3630_toggle_l2_on_restore(); if (omap_rev() < OMAP3630_REV_ES1_2) pm34xx_errata |= (PM_SDRC_WAKEUP_ERRATUM_i583 | PM_PER_MEMORIES_ERRATUM_i582); } else if (cpu_is_omap34xx()) { pm34xx_errata |= PM_PER_MEMORIES_ERRATUM_i582; } } static void __init omap3_pm_check_pmic(void) { struct device_node *np; np = of_find_compatible_node(NULL, NULL, "ti,twl4030-power-idle"); if (!np) np = of_find_compatible_node(NULL, NULL, "ti,twl4030-power-idle-osc-off"); if (np) { of_node_put(np); enable_off_mode = 1; } else { enable_off_mode = 0; } } int __init omap3_pm_init(void) { struct power_state *pwrst, *tmp; struct clockdomain *neon_clkdm, *mpu_clkdm, *per_clkdm, *wkup_clkdm; int ret; if (!omap3_has_io_chain_ctrl()) pr_warn("PM: no software I/O chain control; some wakeups may be lost\n"); pm_errata_configure(); /* XXX prcm_setup_regs needs to be before enabling hw * supervised mode for powerdomains */ prcm_setup_regs(); ret = request_irq(omap_prcm_event_to_irq("wkup"), _prcm_int_handle_wakeup, IRQF_NO_SUSPEND, "pm_wkup", NULL); if (ret) { pr_err("pm: Failed to request pm_wkup irq\n"); goto err1; } /* IO interrupt is shared with mux code */ ret = request_irq(omap_prcm_event_to_irq("io"), _prcm_int_handle_io, IRQF_SHARED | IRQF_NO_SUSPEND, "pm_io", omap3_pm_init); if (ret) { pr_err("pm: Failed to request pm_io irq\n"); goto err2; } omap3_pm_check_pmic(); ret = pwrdm_for_each(pwrdms_setup, NULL); if (ret) { pr_err("Failed to setup powerdomains\n"); goto err3; } (void) clkdm_for_each(omap_pm_clkdms_setup, NULL); mpu_pwrdm = pwrdm_lookup("mpu_pwrdm"); if (mpu_pwrdm == NULL) { pr_err("Failed to get mpu_pwrdm\n"); ret = -EINVAL; goto err3; } neon_pwrdm = pwrdm_lookup("neon_pwrdm"); per_pwrdm = pwrdm_lookup("per_pwrdm"); core_pwrdm = pwrdm_lookup("core_pwrdm"); neon_clkdm = clkdm_lookup("neon_clkdm"); mpu_clkdm = clkdm_lookup("mpu_clkdm"); per_clkdm = clkdm_lookup("per_clkdm"); wkup_clkdm = clkdm_lookup("wkup_clkdm"); omap_common_suspend_init(omap3_pm_suspend); arm_pm_idle = omap3_pm_idle; omap3_idle_init(); /* * RTA is disabled during initialization as per erratum i608 * it is safer to disable RTA by the bootloader, but we would like * to be doubly sure here and prevent any mishaps. */ if (IS_PM34XX_ERRATUM(PM_RTA_ERRATUM_i608)) omap3630_ctrl_disable_rta(); /* * The UART3/4 FIFO and the sidetone memory in McBSP2/3 are * not correctly reset when the PER powerdomain comes back * from OFF or OSWR when the CORE powerdomain is kept active. * See OMAP36xx Erratum i582 "PER Domain reset issue after * Domain-OFF/OSWR Wakeup". This wakeup dependency is not a * complete workaround. The kernel must also prevent the PER * powerdomain from going to OSWR/OFF while the CORE * powerdomain is not going to OSWR/OFF. And if PER last * power state was off while CORE last power state was ON, the * UART3/4 and McBSP2/3 SIDETONE devices need to run a * self-test using their loopback tests; if that fails, those * devices are unusable until the PER/CORE can complete a transition * from ON to OSWR/OFF and then back to ON. * * XXX Technically this workaround is only needed if off-mode * or OSWR is enabled. */ if (IS_PM34XX_ERRATUM(PM_PER_MEMORIES_ERRATUM_i582)) clkdm_add_wkdep(per_clkdm, wkup_clkdm); clkdm_add_wkdep(neon_clkdm, mpu_clkdm); if (omap_type() != OMAP2_DEVICE_TYPE_GP) { omap3_secure_ram_storage = kmalloc(OMAP3_SAVE_SECURE_RAM_SZ, GFP_KERNEL); if (!omap3_secure_ram_storage) pr_err("Memory allocation failed when allocating for secure sram context\n"); local_irq_disable(); omap3_save_secure_ram_context(); local_irq_enable(); } omap3_save_scratchpad_contents(); return ret; err3: list_for_each_entry_safe(pwrst, tmp, &pwrst_list, node) { list_del(&pwrst->node); kfree(pwrst); } free_irq(omap_prcm_event_to_irq("io"), omap3_pm_init); err2: free_irq(omap_prcm_event_to_irq("wkup"), NULL); err1: return ret; }
linux-master
arch/arm/mach-omap2/pm34xx.c
// SPDX-License-Identifier: GPL-2.0-only /* * OMAP2xxx CM module functions * * Copyright (C) 2009 Nokia Corporation * Copyright (C) 2008-2010, 2012 Texas Instruments, Inc. * Paul Walmsley * Rajendra Nayak <[email protected]> */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/io.h> #include "prm2xxx.h" #include "cm.h" #include "cm2xxx.h" #include "cm-regbits-24xx.h" #include "clockdomain.h" /* CM_AUTOIDLE_PLL.AUTO_* bit values for DPLLs */ #define DPLL_AUTOIDLE_DISABLE 0x0 #define OMAP2XXX_DPLL_AUTOIDLE_LOW_POWER_STOP 0x3 /* CM_AUTOIDLE_PLL.AUTO_* bit values for APLLs (OMAP2xxx only) */ #define OMAP2XXX_APLL_AUTOIDLE_DISABLE 0x0 #define OMAP2XXX_APLL_AUTOIDLE_LOW_POWER_STOP 0x3 /* CM_IDLEST_PLL bit value offset for APLLs (OMAP2xxx only) */ #define EN_APLL_LOCKED 3 static const u8 omap2xxx_cm_idlest_offs[] = { CM_IDLEST1, CM_IDLEST2, OMAP2430_CM_IDLEST3, OMAP24XX_CM_IDLEST4 }; /* * */ static void _write_clktrctrl(u8 c, s16 module, u32 mask) { u32 v; v = omap2_cm_read_mod_reg(module, OMAP2_CM_CLKSTCTRL); v &= ~mask; v |= c << __ffs(mask); omap2_cm_write_mod_reg(v, module, OMAP2_CM_CLKSTCTRL); } static bool omap2xxx_cm_is_clkdm_in_hwsup(s16 module, u32 mask) { u32 v; v = omap2_cm_read_mod_reg(module, OMAP2_CM_CLKSTCTRL); v &= mask; v >>= __ffs(mask); return (v == OMAP24XX_CLKSTCTRL_ENABLE_AUTO) ? 1 : 0; } static void omap2xxx_cm_clkdm_enable_hwsup(s16 module, u32 mask) { _write_clktrctrl(OMAP24XX_CLKSTCTRL_ENABLE_AUTO, module, mask); } static void omap2xxx_cm_clkdm_disable_hwsup(s16 module, u32 mask) { _write_clktrctrl(OMAP24XX_CLKSTCTRL_DISABLE_AUTO, module, mask); } /* * DPLL autoidle control */ static void _omap2xxx_set_dpll_autoidle(u8 m) { u32 v; v = omap2_cm_read_mod_reg(PLL_MOD, CM_AUTOIDLE); v &= ~OMAP24XX_AUTO_DPLL_MASK; v |= m << OMAP24XX_AUTO_DPLL_SHIFT; omap2_cm_write_mod_reg(v, PLL_MOD, CM_AUTOIDLE); } void omap2xxx_cm_set_dpll_disable_autoidle(void) { _omap2xxx_set_dpll_autoidle(OMAP2XXX_DPLL_AUTOIDLE_LOW_POWER_STOP); } void omap2xxx_cm_set_dpll_auto_low_power_stop(void) { _omap2xxx_set_dpll_autoidle(DPLL_AUTOIDLE_DISABLE); } /** * omap2xxx_cm_split_idlest_reg - split CM_IDLEST reg addr into its components * @idlest_reg: CM_IDLEST* virtual address * @prcm_inst: pointer to an s16 to return the PRCM instance offset * @idlest_reg_id: pointer to a u8 to return the CM_IDLESTx register ID * * XXX This function is only needed until absolute register addresses are * removed from the OMAP struct clk records. */ static int omap2xxx_cm_split_idlest_reg(struct clk_omap_reg *idlest_reg, s16 *prcm_inst, u8 *idlest_reg_id) { unsigned long offs; u8 idlest_offs; int i; idlest_offs = idlest_reg->offset & 0xff; for (i = 0; i < ARRAY_SIZE(omap2xxx_cm_idlest_offs); i++) { if (idlest_offs == omap2xxx_cm_idlest_offs[i]) { *idlest_reg_id = i + 1; break; } } if (i == ARRAY_SIZE(omap2xxx_cm_idlest_offs)) return -EINVAL; offs = idlest_reg->offset; offs &= 0xff00; *prcm_inst = offs; return 0; } /* * */ /** * omap2xxx_cm_wait_module_ready - wait for a module to leave idle or standby * @part: PRCM partition, ignored for OMAP2 * @prcm_mod: PRCM module offset * @idlest_id: CM_IDLESTx register ID (i.e., x = 1, 2, 3) * @idlest_shift: shift of the bit in the CM_IDLEST* register to check * * Wait for the PRCM to indicate that the module identified by * (@prcm_mod, @idlest_id, @idlest_shift) is clocked. Return 0 upon * success or -EBUSY if the module doesn't enable in time. */ static int omap2xxx_cm_wait_module_ready(u8 part, s16 prcm_mod, u16 idlest_id, u8 idlest_shift) { int ena = 0, i = 0; u8 cm_idlest_reg; u32 mask; if (!idlest_id || (idlest_id > ARRAY_SIZE(omap2xxx_cm_idlest_offs))) return -EINVAL; cm_idlest_reg = omap2xxx_cm_idlest_offs[idlest_id - 1]; mask = 1 << idlest_shift; ena = mask; omap_test_timeout(((omap2_cm_read_mod_reg(prcm_mod, cm_idlest_reg) & mask) == ena), MAX_MODULE_READY_TIME, i); return (i < MAX_MODULE_READY_TIME) ? 0 : -EBUSY; } /* Clockdomain low-level functions */ static void omap2xxx_clkdm_allow_idle(struct clockdomain *clkdm) { omap2xxx_cm_clkdm_enable_hwsup(clkdm->pwrdm.ptr->prcm_offs, clkdm->clktrctrl_mask); } static void omap2xxx_clkdm_deny_idle(struct clockdomain *clkdm) { omap2xxx_cm_clkdm_disable_hwsup(clkdm->pwrdm.ptr->prcm_offs, clkdm->clktrctrl_mask); } static int omap2xxx_clkdm_clk_enable(struct clockdomain *clkdm) { bool hwsup = false; if (!clkdm->clktrctrl_mask) return 0; hwsup = omap2xxx_cm_is_clkdm_in_hwsup(clkdm->pwrdm.ptr->prcm_offs, clkdm->clktrctrl_mask); if (!hwsup && clkdm->flags & CLKDM_CAN_FORCE_WAKEUP) omap2xxx_clkdm_wakeup(clkdm); return 0; } static int omap2xxx_clkdm_clk_disable(struct clockdomain *clkdm) { bool hwsup = false; if (!clkdm->clktrctrl_mask) return 0; hwsup = omap2xxx_cm_is_clkdm_in_hwsup(clkdm->pwrdm.ptr->prcm_offs, clkdm->clktrctrl_mask); if (!hwsup && clkdm->flags & CLKDM_CAN_FORCE_SLEEP) omap2xxx_clkdm_sleep(clkdm); return 0; } struct clkdm_ops omap2_clkdm_operations = { .clkdm_add_wkdep = omap2_clkdm_add_wkdep, .clkdm_del_wkdep = omap2_clkdm_del_wkdep, .clkdm_read_wkdep = omap2_clkdm_read_wkdep, .clkdm_clear_all_wkdeps = omap2_clkdm_clear_all_wkdeps, .clkdm_sleep = omap2xxx_clkdm_sleep, .clkdm_wakeup = omap2xxx_clkdm_wakeup, .clkdm_allow_idle = omap2xxx_clkdm_allow_idle, .clkdm_deny_idle = omap2xxx_clkdm_deny_idle, .clkdm_clk_enable = omap2xxx_clkdm_clk_enable, .clkdm_clk_disable = omap2xxx_clkdm_clk_disable, }; int omap2xxx_cm_fclks_active(void) { u32 f1, f2; f1 = omap2_cm_read_mod_reg(CORE_MOD, CM_FCLKEN1); f2 = omap2_cm_read_mod_reg(CORE_MOD, OMAP24XX_CM_FCLKEN2); return (f1 | f2) ? 1 : 0; } int omap2xxx_cm_mpu_retention_allowed(void) { u32 l; /* Check for MMC, UART2, UART1, McSPI2, McSPI1 and DSS1. */ l = omap2_cm_read_mod_reg(CORE_MOD, CM_FCLKEN1); if (l & (OMAP2420_EN_MMC_MASK | OMAP24XX_EN_UART2_MASK | OMAP24XX_EN_UART1_MASK | OMAP24XX_EN_MCSPI2_MASK | OMAP24XX_EN_MCSPI1_MASK | OMAP24XX_EN_DSS1_MASK)) return 0; /* Check for UART3. */ l = omap2_cm_read_mod_reg(CORE_MOD, OMAP24XX_CM_FCLKEN2); if (l & OMAP24XX_EN_UART3_MASK) return 0; return 1; } u32 omap2xxx_cm_get_core_clk_src(void) { u32 v; v = omap2_cm_read_mod_reg(PLL_MOD, CM_CLKSEL2); v &= OMAP24XX_CORE_CLK_SRC_MASK; return v; } u32 omap2xxx_cm_get_core_pll_config(void) { return omap2_cm_read_mod_reg(PLL_MOD, CM_CLKSEL2); } void omap2xxx_cm_set_mod_dividers(u32 mpu, u32 dsp, u32 gfx, u32 core, u32 mdm) { u32 tmp; omap2_cm_write_mod_reg(mpu, MPU_MOD, CM_CLKSEL); omap2_cm_write_mod_reg(dsp, OMAP24XX_DSP_MOD, CM_CLKSEL); omap2_cm_write_mod_reg(gfx, GFX_MOD, CM_CLKSEL); tmp = omap2_cm_read_mod_reg(CORE_MOD, CM_CLKSEL1) & OMAP24XX_CLKSEL_DSS2_MASK; omap2_cm_write_mod_reg(core | tmp, CORE_MOD, CM_CLKSEL1); if (mdm) omap2_cm_write_mod_reg(mdm, OMAP2430_MDM_MOD, CM_CLKSEL); } /* * */ static const struct cm_ll_data omap2xxx_cm_ll_data = { .split_idlest_reg = &omap2xxx_cm_split_idlest_reg, .wait_module_ready = &omap2xxx_cm_wait_module_ready, }; int __init omap2xxx_cm_init(const struct omap_prcm_init_data *data) { return cm_register(&omap2xxx_cm_ll_data); } static void __exit omap2xxx_cm_exit(void) { cm_unregister(&omap2xxx_cm_ll_data); } __exitcall(omap2xxx_cm_exit);
linux-master
arch/arm/mach-omap2/cm2xxx.c
// SPDX-License-Identifier: GPL-2.0-only #include <linux/kernel.h> #include <linux/init.h> #include <linux/reboot.h> #include "iomap.h" #include "common.h" #include "control.h" #include "prm3xxx.h" #define TI81XX_PRM_DEVICE_RSTCTRL 0x00a0 #define TI81XX_GLOBAL_RST_COLD BIT(1) /** * ti81xx_restart - trigger a software restart of the SoC * @mode: the "reboot mode", see arch/arm/kernel/{setup,process}.c * @cmd: passed from the userspace program rebooting the system (if provided) * * Resets the SoC. For @cmd, see the 'reboot' syscall in * kernel/sys.c. No return value. * * NOTE: Warm reset does not seem to work, may require resetting * clocks to bypass mode. */ void ti81xx_restart(enum reboot_mode mode, const char *cmd) { omap2_prm_set_mod_reg_bits(TI81XX_GLOBAL_RST_COLD, 0, TI81XX_PRM_DEVICE_RSTCTRL); while (1) ; }
linux-master
arch/arm/mach-omap2/ti81xx-restart.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * This file configures the internal USB PHY in OMAP4430. Used * with TWL6030 transceiver and MUSB on OMAP4430. * * Copyright (C) 2010 Texas Instruments Incorporated - https://www.ti.com * Author: Hema HK <[email protected]> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/types.h> #include <linux/delay.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/err.h> #include <linux/usb.h> #include <linux/usb/musb.h> #include "soc.h" #include "control.h" #define CONTROL_DEV_CONF 0x300 #define PHY_PD 0x1 /** * omap4430_phy_power_down: disable MUSB PHY during early init * * OMAP4 MUSB PHY module is enabled by default on reset, but this will * prevent core retention if not disabled by SW. USB driver will * later on enable this, once and if the driver needs it. */ static int __init omap4430_phy_power_down(void) { void __iomem *ctrl_base; if (!cpu_is_omap44xx()) return 0; ctrl_base = ioremap(OMAP443X_SCM_BASE, SZ_1K); if (!ctrl_base) { pr_err("control module ioremap failed\n"); return -ENOMEM; } /* Power down the phy */ writel_relaxed(PHY_PD, ctrl_base + CONTROL_DEV_CONF); iounmap(ctrl_base); return 0; } omap_early_initcall(omap4430_phy_power_down);
linux-master
arch/arm/mach-omap2/omap_phy_internal.c
// SPDX-License-Identifier: GPL-2.0-only /* * OMAP3 voltage domain data * * Copyright (C) 2007, 2010 Texas Instruments, Inc. * Rajendra Nayak <[email protected]> * Lesly A M <[email protected]> * Thara Gopinath <[email protected]> * * Copyright (C) 2008, 2011 Nokia Corporation * Kalle Jokiniemi * Paul Walmsley */ #include <linux/kernel.h> #include <linux/err.h> #include <linux/init.h> #include "soc.h" #include "common.h" #include "prm-regbits-34xx.h" #include "omap_opp_data.h" #include "voltage.h" #include "vc.h" #include "vp.h" /* * VDD data */ /* OMAP3-common voltagedomain data */ static struct voltagedomain omap3_voltdm_wkup = { .name = "wakeup", }; /* 34xx/36xx voltagedomain data */ static const struct omap_vfsm_instance omap3_vdd1_vfsm = { .voltsetup_reg = OMAP3_PRM_VOLTSETUP1_OFFSET, .voltsetup_mask = OMAP3430_SETUP_TIME1_MASK, }; static const struct omap_vfsm_instance omap3_vdd2_vfsm = { .voltsetup_reg = OMAP3_PRM_VOLTSETUP1_OFFSET, .voltsetup_mask = OMAP3430_SETUP_TIME2_MASK, }; static struct voltagedomain omap3_voltdm_mpu = { .name = "mpu_iva", .scalable = true, .read = omap3_prm_vcvp_read, .write = omap3_prm_vcvp_write, .rmw = omap3_prm_vcvp_rmw, .vc = &omap3_vc_mpu, .vfsm = &omap3_vdd1_vfsm, .vp = &omap3_vp_mpu, }; static struct voltagedomain omap3_voltdm_core = { .name = "core", .scalable = true, .read = omap3_prm_vcvp_read, .write = omap3_prm_vcvp_write, .rmw = omap3_prm_vcvp_rmw, .vc = &omap3_vc_core, .vfsm = &omap3_vdd2_vfsm, .vp = &omap3_vp_core, }; static struct voltagedomain *voltagedomains_omap3[] __initdata = { &omap3_voltdm_mpu, &omap3_voltdm_core, &omap3_voltdm_wkup, NULL, }; /* AM35xx voltagedomain data */ static struct voltagedomain am35xx_voltdm_mpu = { .name = "mpu_iva", }; static struct voltagedomain am35xx_voltdm_core = { .name = "core", }; static struct voltagedomain *voltagedomains_am35xx[] __initdata = { &am35xx_voltdm_mpu, &am35xx_voltdm_core, &omap3_voltdm_wkup, NULL, }; static const char *const sys_clk_name __initconst = "sys_ck"; void __init omap3xxx_voltagedomains_init(void) { struct voltagedomain *voltdm; struct voltagedomain **voltdms; int i; /* * XXX Will depend on the process, validation, and binning * for the currently-running IC */ #ifdef CONFIG_PM_OPP if (cpu_is_omap3630()) { omap3_voltdm_mpu.volt_data = omap36xx_vddmpu_volt_data; omap3_voltdm_core.volt_data = omap36xx_vddcore_volt_data; } else { omap3_voltdm_mpu.volt_data = omap34xx_vddmpu_volt_data; omap3_voltdm_core.volt_data = omap34xx_vddcore_volt_data; } #endif omap3_voltdm_mpu.vp_param = &omap3_mpu_vp_data; omap3_voltdm_core.vp_param = &omap3_core_vp_data; omap3_voltdm_mpu.vc_param = &omap3_mpu_vc_data; omap3_voltdm_core.vc_param = &omap3_core_vc_data; if (soc_is_am35xx()) voltdms = voltagedomains_am35xx; else voltdms = voltagedomains_omap3; for (i = 0; voltdm = voltdms[i], voltdm; i++) voltdm->sys_clk.name = sys_clk_name; voltdm_init(voltdms); };
linux-master
arch/arm/mach-omap2/voltagedomains3xxx_data.c
// SPDX-License-Identifier: GPL-2.0 /* * opp2430_data.c - old-style "OPP" table for OMAP2430 * * Copyright (C) 2005-2009 Texas Instruments, Inc. * Copyright (C) 2004-2009 Nokia Corporation * * Richard Woodruff <[email protected]> * * The OMAP2 processor can be run at several discrete 'PRCM configurations'. * These configurations are characterized by voltage and speed for clocks. * The device is only validated for certain combinations. One way to express * these combinations is via the 'ratios' which the clocks operate with * respect to each other. These ratio sets are for a given voltage/DPLL * setting. All configurations can be described by a DPLL setting and a ratio. * * 2430 differs from 2420 in that there are no more phase synchronizers used. * They both have a slightly different clock domain setup. 2420(iva1,dsp) vs * 2430 (iva2.1, NOdsp, mdm) * * XXX Missing voltage data. * XXX Missing 19.2MHz sys_clk rate sets. * * THe format described in this file is deprecated. Once a reasonable * OPP API exists, the data in this file should be converted to use it. * * This is technically part of the OMAP2xxx clock code. */ #include <linux/kernel.h> #include "opp2xxx.h" #include "sdrc.h" #include "clock.h" /* * Key dividers which make up a PRCM set. Ratios for a PRCM are mandated. * xtal_speed, dpll_speed, mpu_speed, CM_CLKSEL_MPU, * CM_CLKSEL_DSP, CM_CLKSEL_GFX, CM_CLKSEL1_CORE, CM_CLKSEL1_PLL, * CM_CLKSEL2_PLL, CM_CLKSEL_MDM * * Filling in table based on 2430-SDPs variants available. There are * quite a few more rate combinations which could be defined. * * When multiple values are defined the start up will try and choose * the fastest one. If a 'fast' value is defined, then automatically, * the /2 one should be included as it can be used. Generally having * more than one fast set does not make sense, as static timings need * to be changed to change the set. The exception is the bypass * setting which is available for low power bypass. * * Note: This table needs to be sorted, fastest to slowest. */ const struct prcm_config omap2430_rate_table[] = { /* PRCM #4 - ratio2 (ES2.1) - FAST */ {S13M, S798M, S399M, R2_CM_CLKSEL_MPU_VAL, /* 399MHz ARM */ R2_CM_CLKSEL_DSP_VAL, R2_CM_CLKSEL_GFX_VAL, R2_CM_CLKSEL1_CORE_VAL, M4_CM_CLKSEL1_PLL_13_VAL, MX_CLKSEL2_PLL_2x_VAL, R2_CM_CLKSEL_MDM_VAL, SDRC_RFR_CTRL_133MHz, RATE_IN_243X}, /* PRCM #2 - ratio1 (ES2) - FAST */ {S13M, S658M, S329M, R1_CM_CLKSEL_MPU_VAL, /* 330MHz ARM */ R1_CM_CLKSEL_DSP_VAL, R1_CM_CLKSEL_GFX_VAL, R1_CM_CLKSEL1_CORE_VAL, M2_CM_CLKSEL1_PLL_13_VAL, MX_CLKSEL2_PLL_2x_VAL, R1_CM_CLKSEL_MDM_VAL, SDRC_RFR_CTRL_165MHz, RATE_IN_243X}, /* PRCM #5a - ratio1 - FAST */ {S13M, S532M, S266M, R1_CM_CLKSEL_MPU_VAL, /* 266MHz ARM */ R1_CM_CLKSEL_DSP_VAL, R1_CM_CLKSEL_GFX_VAL, R1_CM_CLKSEL1_CORE_VAL, M5A_CM_CLKSEL1_PLL_13_VAL, MX_CLKSEL2_PLL_2x_VAL, R1_CM_CLKSEL_MDM_VAL, SDRC_RFR_CTRL_133MHz, RATE_IN_243X}, /* PRCM #5b - ratio1 - FAST */ {S13M, S400M, S200M, R1_CM_CLKSEL_MPU_VAL, /* 200MHz ARM */ R1_CM_CLKSEL_DSP_VAL, R1_CM_CLKSEL_GFX_VAL, R1_CM_CLKSEL1_CORE_VAL, M5B_CM_CLKSEL1_PLL_13_VAL, MX_CLKSEL2_PLL_2x_VAL, R1_CM_CLKSEL_MDM_VAL, SDRC_RFR_CTRL_100MHz, RATE_IN_243X}, /* PRCM #4 - ratio1 (ES2.1) - SLOW */ {S13M, S399M, S199M, R2_CM_CLKSEL_MPU_VAL, /* 200MHz ARM */ R2_CM_CLKSEL_DSP_VAL, R2_CM_CLKSEL_GFX_VAL, R2_CM_CLKSEL1_CORE_VAL, M4_CM_CLKSEL1_PLL_13_VAL, MX_CLKSEL2_PLL_1x_VAL, R2_CM_CLKSEL_MDM_VAL, SDRC_RFR_CTRL_133MHz, RATE_IN_243X}, /* PRCM #2 - ratio1 (ES2) - SLOW */ {S13M, S329M, S164M, R1_CM_CLKSEL_MPU_VAL, /* 165MHz ARM */ R1_CM_CLKSEL_DSP_VAL, R1_CM_CLKSEL_GFX_VAL, R1_CM_CLKSEL1_CORE_VAL, M2_CM_CLKSEL1_PLL_13_VAL, MX_CLKSEL2_PLL_1x_VAL, R1_CM_CLKSEL_MDM_VAL, SDRC_RFR_CTRL_165MHz, RATE_IN_243X}, /* PRCM #5a - ratio1 - SLOW */ {S13M, S266M, S133M, R1_CM_CLKSEL_MPU_VAL, /* 133MHz ARM */ R1_CM_CLKSEL_DSP_VAL, R1_CM_CLKSEL_GFX_VAL, R1_CM_CLKSEL1_CORE_VAL, M5A_CM_CLKSEL1_PLL_13_VAL, MX_CLKSEL2_PLL_1x_VAL, R1_CM_CLKSEL_MDM_VAL, SDRC_RFR_CTRL_133MHz, RATE_IN_243X}, /* PRCM #5b - ratio1 - SLOW*/ {S13M, S200M, S100M, R1_CM_CLKSEL_MPU_VAL, /* 100MHz ARM */ R1_CM_CLKSEL_DSP_VAL, R1_CM_CLKSEL_GFX_VAL, R1_CM_CLKSEL1_CORE_VAL, M5B_CM_CLKSEL1_PLL_13_VAL, MX_CLKSEL2_PLL_1x_VAL, R1_CM_CLKSEL_MDM_VAL, SDRC_RFR_CTRL_100MHz, RATE_IN_243X}, /* PRCM-boot/bypass */ {S13M, S13M, S13M, RB_CM_CLKSEL_MPU_VAL, /* 13MHz */ RB_CM_CLKSEL_DSP_VAL, RB_CM_CLKSEL_GFX_VAL, RB_CM_CLKSEL1_CORE_VAL, MB_CM_CLKSEL1_PLL_13_VAL, MX_CLKSEL2_PLL_2x_VAL, RB_CM_CLKSEL_MDM_VAL, SDRC_RFR_CTRL_BYPASS, RATE_IN_243X}, /* PRCM-boot/bypass */ {S12M, S12M, S12M, RB_CM_CLKSEL_MPU_VAL, /* 12MHz */ RB_CM_CLKSEL_DSP_VAL, RB_CM_CLKSEL_GFX_VAL, RB_CM_CLKSEL1_CORE_VAL, MB_CM_CLKSEL1_PLL_12_VAL, MX_CLKSEL2_PLL_2x_VAL, RB_CM_CLKSEL_MDM_VAL, SDRC_RFR_CTRL_BYPASS, RATE_IN_243X}, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, };
linux-master
arch/arm/mach-omap2/opp2430_data.c
// SPDX-License-Identifier: GPL-2.0-only /* * omap_hwmod_2xxx_ipblock_data.c - common IP block data for OMAP2xxx * * Copyright (C) 2011 Nokia Corporation * Paul Walmsley */ #include <linux/types.h> #include "omap_hwmod.h" #include "omap_hwmod_common_data.h" #include "cm-regbits-24xx.h" #include "prm-regbits-24xx.h" #include "wd_timer.h" /* * 'dispc' class * display controller */ static struct omap_hwmod_class_sysconfig omap2_dispc_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .syss_offs = 0x0014, .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE | SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap2_dispc_hwmod_class = { .name = "dispc", .sysc = &omap2_dispc_sysc, }; /* OMAP2xxx Timer Common */ static struct omap_hwmod_class_sysconfig omap2xxx_timer_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .syss_offs = 0x0014, .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap2xxx_timer_hwmod_class = { .name = "timer", .sysc = &omap2xxx_timer_sysc, }; /* * 'wd_timer' class * 32-bit watchdog upward counter that generates a pulse on the reset pin on * overflow condition */ static struct omap_hwmod_class_sysconfig omap2xxx_wd_timer_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .syss_offs = 0x0014, .sysc_flags = (SYSC_HAS_EMUFREE | SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap2xxx_wd_timer_hwmod_class = { .name = "wd_timer", .sysc = &omap2xxx_wd_timer_sysc, .pre_shutdown = &omap2_wd_timer_disable, .reset = &omap2_wd_timer_reset, }; /* * 'gpio' class * general purpose io module */ static struct omap_hwmod_class_sysconfig omap2xxx_gpio_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .syss_offs = 0x0014, .sysc_flags = (SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), .sysc_fields = &omap_hwmod_sysc_type1, }; struct omap_hwmod_class omap2xxx_gpio_hwmod_class = { .name = "gpio", .sysc = &omap2xxx_gpio_sysc, }; /* * 'mailbox' class * mailbox module allowing communication between the on-chip processors * using a queued mailbox-interrupt mechanism. */ static struct omap_hwmod_class_sysconfig omap2xxx_mailbox_sysc = { .rev_offs = 0x000, .sysc_offs = 0x010, .syss_offs = 0x014, .sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), .sysc_fields = &omap_hwmod_sysc_type1, }; struct omap_hwmod_class omap2xxx_mailbox_hwmod_class = { .name = "mailbox", .sysc = &omap2xxx_mailbox_sysc, }; /* * 'mcspi' class * multichannel serial port interface (mcspi) / master/slave synchronous serial * bus */ static struct omap_hwmod_class_sysconfig omap2xxx_mcspi_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .syss_offs = 0x0014, .sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE | SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), .sysc_fields = &omap_hwmod_sysc_type1, }; struct omap_hwmod_class omap2xxx_mcspi_class = { .name = "mcspi", .sysc = &omap2xxx_mcspi_sysc, }; /* * 'gpmc' class * general purpose memory controller */ static struct omap_hwmod_class_sysconfig omap2xxx_gpmc_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .syss_offs = 0x0014, .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap2xxx_gpmc_hwmod_class = { .name = "gpmc", .sysc = &omap2xxx_gpmc_sysc, }; /* * IP blocks */ /* L3 */ struct omap_hwmod omap2xxx_l3_main_hwmod = { .name = "l3_main", .class = &l3_hwmod_class, .flags = HWMOD_NO_IDLEST, }; /* L4 CORE */ struct omap_hwmod omap2xxx_l4_core_hwmod = { .name = "l4_core", .class = &l4_hwmod_class, .flags = HWMOD_NO_IDLEST, }; /* L4 WKUP */ struct omap_hwmod omap2xxx_l4_wkup_hwmod = { .name = "l4_wkup", .class = &l4_hwmod_class, .flags = HWMOD_NO_IDLEST, }; /* MPU */ struct omap_hwmod omap2xxx_mpu_hwmod = { .name = "mpu", .class = &mpu_hwmod_class, .main_clk = "mpu_ck", }; /* timer3 */ struct omap_hwmod omap2xxx_timer3_hwmod = { .name = "timer3", .main_clk = "gpt3_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP24XX_ST_GPT3_SHIFT, }, }, .class = &omap2xxx_timer_hwmod_class, .flags = HWMOD_SET_DEFAULT_CLOCKACT, }; /* timer4 */ struct omap_hwmod omap2xxx_timer4_hwmod = { .name = "timer4", .main_clk = "gpt4_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP24XX_ST_GPT4_SHIFT, }, }, .class = &omap2xxx_timer_hwmod_class, .flags = HWMOD_SET_DEFAULT_CLOCKACT, }; /* timer5 */ struct omap_hwmod omap2xxx_timer5_hwmod = { .name = "timer5", .main_clk = "gpt5_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP24XX_ST_GPT5_SHIFT, }, }, .class = &omap2xxx_timer_hwmod_class, .flags = HWMOD_SET_DEFAULT_CLOCKACT, }; /* timer6 */ struct omap_hwmod omap2xxx_timer6_hwmod = { .name = "timer6", .main_clk = "gpt6_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP24XX_ST_GPT6_SHIFT, }, }, .class = &omap2xxx_timer_hwmod_class, .flags = HWMOD_SET_DEFAULT_CLOCKACT, }; /* timer7 */ struct omap_hwmod omap2xxx_timer7_hwmod = { .name = "timer7", .main_clk = "gpt7_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP24XX_ST_GPT7_SHIFT, }, }, .class = &omap2xxx_timer_hwmod_class, .flags = HWMOD_SET_DEFAULT_CLOCKACT, }; /* timer8 */ struct omap_hwmod omap2xxx_timer8_hwmod = { .name = "timer8", .main_clk = "gpt8_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP24XX_ST_GPT8_SHIFT, }, }, .class = &omap2xxx_timer_hwmod_class, .flags = HWMOD_SET_DEFAULT_CLOCKACT, }; /* timer9 */ struct omap_hwmod omap2xxx_timer9_hwmod = { .name = "timer9", .main_clk = "gpt9_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP24XX_ST_GPT9_SHIFT, }, }, .class = &omap2xxx_timer_hwmod_class, .flags = HWMOD_SET_DEFAULT_CLOCKACT, }; /* timer10 */ struct omap_hwmod omap2xxx_timer10_hwmod = { .name = "timer10", .main_clk = "gpt10_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP24XX_ST_GPT10_SHIFT, }, }, .class = &omap2xxx_timer_hwmod_class, .flags = HWMOD_SET_DEFAULT_CLOCKACT, }; /* timer11 */ struct omap_hwmod omap2xxx_timer11_hwmod = { .name = "timer11", .main_clk = "gpt11_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP24XX_ST_GPT11_SHIFT, }, }, .class = &omap2xxx_timer_hwmod_class, .flags = HWMOD_SET_DEFAULT_CLOCKACT, }; /* timer12 */ struct omap_hwmod omap2xxx_timer12_hwmod = { .name = "timer12", .main_clk = "gpt12_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP24XX_ST_GPT12_SHIFT, }, }, .class = &omap2xxx_timer_hwmod_class, .flags = HWMOD_SET_DEFAULT_CLOCKACT, }; /* wd_timer2 */ struct omap_hwmod omap2xxx_wd_timer2_hwmod = { .name = "wd_timer2", .class = &omap2xxx_wd_timer_hwmod_class, .main_clk = "mpu_wdt_fck", .prcm = { .omap2 = { .module_offs = WKUP_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP24XX_ST_MPU_WDT_SHIFT, }, }, }; /* UART1 */ struct omap_hwmod omap2xxx_uart1_hwmod = { .name = "uart1", .main_clk = "uart1_fck", .flags = DEBUG_OMAP2UART1_FLAGS | HWMOD_SWSUP_SIDLE_ACT, .prcm = { .omap2 = { .module_offs = CORE_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP24XX_EN_UART1_SHIFT, }, }, .class = &omap2_uart_class, }; /* UART2 */ struct omap_hwmod omap2xxx_uart2_hwmod = { .name = "uart2", .main_clk = "uart2_fck", .flags = DEBUG_OMAP2UART2_FLAGS | HWMOD_SWSUP_SIDLE_ACT, .prcm = { .omap2 = { .module_offs = CORE_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP24XX_EN_UART2_SHIFT, }, }, .class = &omap2_uart_class, }; /* UART3 */ struct omap_hwmod omap2xxx_uart3_hwmod = { .name = "uart3", .main_clk = "uart3_fck", .flags = DEBUG_OMAP2UART3_FLAGS | HWMOD_SWSUP_SIDLE_ACT, .prcm = { .omap2 = { .module_offs = CORE_MOD, .idlest_reg_id = 2, .idlest_idle_bit = OMAP24XX_EN_UART3_SHIFT, }, }, .class = &omap2_uart_class, }; /* dss */ static struct omap_hwmod_opt_clk dss_opt_clks[] = { /* * The DSS HW needs all DSS clocks enabled during reset. The dss_core * driver does not use these clocks. */ { .role = "tv_clk", .clk = "dss_54m_fck" }, { .role = "sys_clk", .clk = "dss2_fck" }, }; struct omap_hwmod omap2xxx_dss_core_hwmod = { .name = "dss_core", .class = &omap2_dss_hwmod_class, .main_clk = "dss1_fck", /* instead of dss_fck */ .prcm = { .omap2 = { .module_offs = CORE_MOD, .idlest_reg_id = 1, }, }, .opt_clks = dss_opt_clks, .opt_clks_cnt = ARRAY_SIZE(dss_opt_clks), .flags = HWMOD_NO_IDLEST | HWMOD_CONTROL_OPT_CLKS_IN_RESET, }; struct omap_hwmod omap2xxx_dss_dispc_hwmod = { .name = "dss_dispc", .class = &omap2_dispc_hwmod_class, .main_clk = "dss1_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .idlest_reg_id = 1, }, }, .flags = HWMOD_NO_IDLEST, .dev_attr = &omap2_3_dss_dispc_dev_attr, }; static struct omap_hwmod_opt_clk dss_rfbi_opt_clks[] = { { .role = "ick", .clk = "dss_ick" }, }; struct omap_hwmod omap2xxx_dss_rfbi_hwmod = { .name = "dss_rfbi", .class = &omap2_rfbi_hwmod_class, .main_clk = "dss1_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, }, }, .opt_clks = dss_rfbi_opt_clks, .opt_clks_cnt = ARRAY_SIZE(dss_rfbi_opt_clks), .flags = HWMOD_NO_IDLEST, }; struct omap_hwmod omap2xxx_dss_venc_hwmod = { .name = "dss_venc", .class = &omap2_venc_hwmod_class, .main_clk = "dss_54m_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, }, }, .flags = HWMOD_NO_IDLEST, }; /* gpio1 */ struct omap_hwmod omap2xxx_gpio1_hwmod = { .name = "gpio1", .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, .main_clk = "gpios_fck", .prcm = { .omap2 = { .module_offs = WKUP_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP24XX_ST_GPIOS_SHIFT, }, }, .class = &omap2xxx_gpio_hwmod_class, }; /* gpio2 */ struct omap_hwmod omap2xxx_gpio2_hwmod = { .name = "gpio2", .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, .main_clk = "gpios_fck", .prcm = { .omap2 = { .module_offs = WKUP_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP24XX_ST_GPIOS_SHIFT, }, }, .class = &omap2xxx_gpio_hwmod_class, }; /* gpio3 */ struct omap_hwmod omap2xxx_gpio3_hwmod = { .name = "gpio3", .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, .main_clk = "gpios_fck", .prcm = { .omap2 = { .module_offs = WKUP_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP24XX_ST_GPIOS_SHIFT, }, }, .class = &omap2xxx_gpio_hwmod_class, }; /* gpio4 */ struct omap_hwmod omap2xxx_gpio4_hwmod = { .name = "gpio4", .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, .main_clk = "gpios_fck", .prcm = { .omap2 = { .module_offs = WKUP_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP24XX_ST_GPIOS_SHIFT, }, }, .class = &omap2xxx_gpio_hwmod_class, }; /* mcspi1 */ struct omap_hwmod omap2xxx_mcspi1_hwmod = { .name = "mcspi1", .main_clk = "mcspi1_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP24XX_ST_MCSPI1_SHIFT, }, }, .class = &omap2xxx_mcspi_class, }; /* mcspi2 */ struct omap_hwmod omap2xxx_mcspi2_hwmod = { .name = "mcspi2", .main_clk = "mcspi2_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP24XX_ST_MCSPI2_SHIFT, }, }, .class = &omap2xxx_mcspi_class, }; /* gpmc */ struct omap_hwmod omap2xxx_gpmc_hwmod = { .name = "gpmc", .class = &omap2xxx_gpmc_hwmod_class, .main_clk = "gpmc_fck", /* Skip reset for CONFIG_OMAP_GPMC_DEBUG for bootloader timings */ .flags = HWMOD_NO_IDLEST | DEBUG_OMAP_GPMC_HWMOD_FLAGS, .prcm = { .omap2 = { .module_offs = CORE_MOD, }, }, }; /* RNG */ static struct omap_hwmod_class_sysconfig omap2_rng_sysc = { .rev_offs = 0x3c, .sysc_offs = 0x40, .syss_offs = 0x44, .sysc_flags = (SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap2_rng_hwmod_class = { .name = "rng", .sysc = &omap2_rng_sysc, }; struct omap_hwmod omap2xxx_rng_hwmod = { .name = "rng", .main_clk = "l4_ck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .idlest_reg_id = 4, .idlest_idle_bit = OMAP24XX_ST_RNG_SHIFT, }, }, /* * XXX The first read from the SYSSTATUS register of the RNG * after the SYSCONFIG SOFTRESET bit is set triggers an * imprecise external abort. It's unclear why this happens. * Until this is analyzed, skip the IP block reset. */ .flags = HWMOD_INIT_NO_RESET, .class = &omap2_rng_hwmod_class, }; /* SHAM */ static struct omap_hwmod_class_sysconfig omap2_sham_sysc = { .rev_offs = 0x5c, .sysc_offs = 0x60, .syss_offs = 0x64, .sysc_flags = (SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap2xxx_sham_class = { .name = "sham", .sysc = &omap2_sham_sysc, }; struct omap_hwmod omap2xxx_sham_hwmod = { .name = "sham", .main_clk = "l4_ck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .idlest_reg_id = 4, .idlest_idle_bit = OMAP24XX_ST_SHA_SHIFT, }, }, .class = &omap2xxx_sham_class, }; /* AES */ static struct omap_hwmod_class_sysconfig omap2_aes_sysc = { .rev_offs = 0x44, .sysc_offs = 0x48, .syss_offs = 0x4c, .sysc_flags = (SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap2xxx_aes_class = { .name = "aes", .sysc = &omap2_aes_sysc, }; struct omap_hwmod omap2xxx_aes_hwmod = { .name = "aes", .main_clk = "l4_ck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .idlest_reg_id = 4, .idlest_idle_bit = OMAP24XX_ST_AES_SHIFT, }, }, .class = &omap2xxx_aes_class, };
linux-master
arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
// SPDX-License-Identifier: GPL-2.0 /* * OMAP3xxx clockdomains * * Copyright (C) 2008-2011 Texas Instruments, Inc. * Copyright (C) 2008-2010 Nokia Corporation * * Paul Walmsley, Jouni Högander * * This file contains clockdomains and clockdomain wakeup/sleep * dependencies for the OMAP3xxx chips. Some notes: * * A useful validation rule for struct clockdomain: Any clockdomain * referenced by a wkdep_srcs or sleepdep_srcs array must have a * dep_bit assigned. So wkdep_srcs/sleepdep_srcs are really just * software-controllable dependencies. Non-software-controllable * dependencies do exist, but they are not encoded below (yet). * * The overly-specific dep_bit names are due to a bit name collision * with CM_FCLKEN_{DSP,IVA2}. The DSP/IVA2 PM_WKDEP and CM_SLEEPDEP shift * value are the same for all powerdomains: 2 * * XXX should dep_bit be a mask, so we can test to see if it is 0 as a * sanity check? * XXX encode hardware fixed wakeup dependencies -- esp. for 3430 CORE */ /* * To-Do List * -> Port the Sleep/Wakeup dependencies for the domains * from the Power domain framework */ #include <linux/kernel.h> #include <linux/io.h> #include "soc.h" #include "clockdomain.h" #include "prm2xxx_3xxx.h" #include "cm2xxx_3xxx.h" #include "cm-regbits-34xx.h" #include "prm-regbits-34xx.h" /* * Clockdomain dependencies for wkdeps/sleepdeps * * XXX Hardware dependencies (e.g., dependencies that cannot be * changed in software) are not included here yet, but should be. */ /* OMAP3-specific possible dependencies */ /* * 3430ES1 PM_WKDEP_GFX: adds IVA2, removes CORE * 3430ES2 PM_WKDEP_SGX: adds IVA2, removes CORE */ static struct clkdm_dep gfx_sgx_3xxx_wkdeps[] = { { .clkdm_name = "iva2_clkdm" }, { .clkdm_name = "mpu_clkdm" }, { .clkdm_name = "wkup_clkdm" }, { NULL }, }; static struct clkdm_dep gfx_sgx_am35x_wkdeps[] = { { .clkdm_name = "mpu_clkdm" }, { .clkdm_name = "wkup_clkdm" }, { NULL }, }; /* 3430: PM_WKDEP_PER: CORE, IVA2, MPU, WKUP */ static struct clkdm_dep per_wkdeps[] = { { .clkdm_name = "core_l3_clkdm" }, { .clkdm_name = "core_l4_clkdm" }, { .clkdm_name = "iva2_clkdm" }, { .clkdm_name = "mpu_clkdm" }, { .clkdm_name = "wkup_clkdm" }, { NULL }, }; static struct clkdm_dep per_am35x_wkdeps[] = { { .clkdm_name = "core_l3_clkdm" }, { .clkdm_name = "core_l4_clkdm" }, { .clkdm_name = "mpu_clkdm" }, { .clkdm_name = "wkup_clkdm" }, { NULL }, }; /* 3430ES2: PM_WKDEP_USBHOST: CORE, IVA2, MPU, WKUP */ static struct clkdm_dep usbhost_wkdeps[] = { { .clkdm_name = "core_l3_clkdm" }, { .clkdm_name = "core_l4_clkdm" }, { .clkdm_name = "iva2_clkdm" }, { .clkdm_name = "mpu_clkdm" }, { .clkdm_name = "wkup_clkdm" }, { NULL }, }; static struct clkdm_dep usbhost_am35x_wkdeps[] = { { .clkdm_name = "core_l3_clkdm" }, { .clkdm_name = "core_l4_clkdm" }, { .clkdm_name = "mpu_clkdm" }, { .clkdm_name = "wkup_clkdm" }, { NULL }, }; /* 3430 PM_WKDEP_MPU: CORE, IVA2, DSS, PER */ static struct clkdm_dep mpu_3xxx_wkdeps[] = { { .clkdm_name = "core_l3_clkdm" }, { .clkdm_name = "core_l4_clkdm" }, { .clkdm_name = "iva2_clkdm" }, { .clkdm_name = "dss_clkdm" }, { .clkdm_name = "per_clkdm" }, { NULL }, }; static struct clkdm_dep mpu_am35x_wkdeps[] = { { .clkdm_name = "core_l3_clkdm" }, { .clkdm_name = "core_l4_clkdm" }, { .clkdm_name = "dss_clkdm" }, { .clkdm_name = "per_clkdm" }, { NULL }, }; /* 3430 PM_WKDEP_IVA2: CORE, MPU, WKUP, DSS, PER */ static struct clkdm_dep iva2_wkdeps[] = { { .clkdm_name = "core_l3_clkdm" }, { .clkdm_name = "core_l4_clkdm" }, { .clkdm_name = "mpu_clkdm" }, { .clkdm_name = "wkup_clkdm" }, { .clkdm_name = "dss_clkdm" }, { .clkdm_name = "per_clkdm" }, { NULL }, }; /* 3430 PM_WKDEP_CAM: IVA2, MPU, WKUP */ static struct clkdm_dep cam_wkdeps[] = { { .clkdm_name = "iva2_clkdm" }, { .clkdm_name = "mpu_clkdm" }, { .clkdm_name = "wkup_clkdm" }, { NULL }, }; /* 3430 PM_WKDEP_DSS: IVA2, MPU, WKUP */ static struct clkdm_dep dss_wkdeps[] = { { .clkdm_name = "iva2_clkdm" }, { .clkdm_name = "mpu_clkdm" }, { .clkdm_name = "wkup_clkdm" }, { NULL }, }; static struct clkdm_dep dss_am35x_wkdeps[] = { { .clkdm_name = "mpu_clkdm" }, { .clkdm_name = "wkup_clkdm" }, { NULL }, }; /* 3430: PM_WKDEP_NEON: MPU */ static struct clkdm_dep neon_wkdeps[] = { { .clkdm_name = "mpu_clkdm" }, { NULL }, }; /* Sleep dependency source arrays for OMAP3-specific clkdms */ /* 3430: CM_SLEEPDEP_DSS: MPU, IVA */ static struct clkdm_dep dss_sleepdeps[] = { { .clkdm_name = "mpu_clkdm" }, { .clkdm_name = "iva2_clkdm" }, { NULL }, }; static struct clkdm_dep dss_am35x_sleepdeps[] = { { .clkdm_name = "mpu_clkdm" }, { NULL }, }; /* 3430: CM_SLEEPDEP_PER: MPU, IVA */ static struct clkdm_dep per_sleepdeps[] = { { .clkdm_name = "mpu_clkdm" }, { .clkdm_name = "iva2_clkdm" }, { NULL }, }; static struct clkdm_dep per_am35x_sleepdeps[] = { { .clkdm_name = "mpu_clkdm" }, { NULL }, }; /* 3430ES2: CM_SLEEPDEP_USBHOST: MPU, IVA */ static struct clkdm_dep usbhost_sleepdeps[] = { { .clkdm_name = "mpu_clkdm" }, { .clkdm_name = "iva2_clkdm" }, { NULL }, }; static struct clkdm_dep usbhost_am35x_sleepdeps[] = { { .clkdm_name = "mpu_clkdm" }, { NULL }, }; /* 3430: CM_SLEEPDEP_CAM: MPU */ static struct clkdm_dep cam_sleepdeps[] = { { .clkdm_name = "mpu_clkdm" }, { NULL }, }; /* * 3430ES1: CM_SLEEPDEP_GFX: MPU * 3430ES2: CM_SLEEPDEP_SGX: MPU * These can share data since they will never be present simultaneously * on the same device. */ static struct clkdm_dep gfx_sgx_sleepdeps[] = { { .clkdm_name = "mpu_clkdm" }, { NULL }, }; /* * OMAP3 clockdomains */ static struct clockdomain mpu_3xxx_clkdm = { .name = "mpu_clkdm", .pwrdm = { .name = "mpu_pwrdm" }, .flags = CLKDM_CAN_HWSUP | CLKDM_CAN_FORCE_WAKEUP, .dep_bit = OMAP3430_EN_MPU_SHIFT, .wkdep_srcs = mpu_3xxx_wkdeps, .clktrctrl_mask = OMAP3430_CLKTRCTRL_MPU_MASK, }; static struct clockdomain mpu_am35x_clkdm = { .name = "mpu_clkdm", .pwrdm = { .name = "mpu_pwrdm" }, .flags = CLKDM_CAN_HWSUP | CLKDM_CAN_FORCE_WAKEUP, .dep_bit = OMAP3430_EN_MPU_SHIFT, .wkdep_srcs = mpu_am35x_wkdeps, .clktrctrl_mask = OMAP3430_CLKTRCTRL_MPU_MASK, }; static struct clockdomain neon_clkdm = { .name = "neon_clkdm", .pwrdm = { .name = "neon_pwrdm" }, .flags = CLKDM_CAN_HWSUP_SWSUP, .wkdep_srcs = neon_wkdeps, .clktrctrl_mask = OMAP3430_CLKTRCTRL_NEON_MASK, }; static struct clockdomain iva2_clkdm = { .name = "iva2_clkdm", .pwrdm = { .name = "iva2_pwrdm" }, .flags = CLKDM_CAN_SWSUP, .dep_bit = OMAP3430_PM_WKDEP_MPU_EN_IVA2_SHIFT, .wkdep_srcs = iva2_wkdeps, .clktrctrl_mask = OMAP3430_CLKTRCTRL_IVA2_MASK, }; static struct clockdomain gfx_3430es1_clkdm = { .name = "gfx_clkdm", .pwrdm = { .name = "gfx_pwrdm" }, .flags = CLKDM_CAN_HWSUP_SWSUP, .wkdep_srcs = gfx_sgx_3xxx_wkdeps, .sleepdep_srcs = gfx_sgx_sleepdeps, .clktrctrl_mask = OMAP3430ES1_CLKTRCTRL_GFX_MASK, }; static struct clockdomain sgx_clkdm = { .name = "sgx_clkdm", .pwrdm = { .name = "sgx_pwrdm" }, .flags = CLKDM_CAN_HWSUP_SWSUP, .wkdep_srcs = gfx_sgx_3xxx_wkdeps, .sleepdep_srcs = gfx_sgx_sleepdeps, .clktrctrl_mask = OMAP3430ES2_CLKTRCTRL_SGX_MASK, }; static struct clockdomain sgx_am35x_clkdm = { .name = "sgx_clkdm", .pwrdm = { .name = "sgx_pwrdm" }, .flags = CLKDM_CAN_HWSUP_SWSUP, .wkdep_srcs = gfx_sgx_am35x_wkdeps, .sleepdep_srcs = gfx_sgx_sleepdeps, .clktrctrl_mask = OMAP3430ES2_CLKTRCTRL_SGX_MASK, }; /* * The die-to-die clockdomain was documented in the 34xx ES1 TRM, but * then that information was removed from the 34xx ES2+ TRM. It is * unclear whether the core is still there, but the clockdomain logic * is there, and must be programmed to an appropriate state if the * CORE clockdomain is to become inactive. */ static struct clockdomain d2d_clkdm = { .name = "d2d_clkdm", .pwrdm = { .name = "core_pwrdm" }, .flags = CLKDM_CAN_HWSUP_SWSUP, .clktrctrl_mask = OMAP3430ES1_CLKTRCTRL_D2D_MASK, }; /* * XXX add usecounting for clkdm dependencies, otherwise the presence * of a single dep bit for core_l3_3xxx_clkdm and core_l4_3xxx_clkdm * could cause trouble */ static struct clockdomain core_l3_3xxx_clkdm = { .name = "core_l3_clkdm", .pwrdm = { .name = "core_pwrdm" }, .flags = CLKDM_CAN_HWSUP, .dep_bit = OMAP3430_EN_CORE_SHIFT, .clktrctrl_mask = OMAP3430_CLKTRCTRL_L3_MASK, }; /* * XXX add usecounting for clkdm dependencies, otherwise the presence * of a single dep bit for core_l3_3xxx_clkdm and core_l4_3xxx_clkdm * could cause trouble */ static struct clockdomain core_l4_3xxx_clkdm = { .name = "core_l4_clkdm", .pwrdm = { .name = "core_pwrdm" }, .flags = CLKDM_CAN_HWSUP, .dep_bit = OMAP3430_EN_CORE_SHIFT, .clktrctrl_mask = OMAP3430_CLKTRCTRL_L4_MASK, }; /* Another case of bit name collisions between several registers: EN_DSS */ static struct clockdomain dss_3xxx_clkdm = { .name = "dss_clkdm", .pwrdm = { .name = "dss_pwrdm" }, .flags = CLKDM_CAN_HWSUP_SWSUP, .dep_bit = OMAP3430_PM_WKDEP_MPU_EN_DSS_SHIFT, .wkdep_srcs = dss_wkdeps, .sleepdep_srcs = dss_sleepdeps, .clktrctrl_mask = OMAP3430_CLKTRCTRL_DSS_MASK, }; static struct clockdomain dss_am35x_clkdm = { .name = "dss_clkdm", .pwrdm = { .name = "dss_pwrdm" }, .flags = CLKDM_CAN_HWSUP_SWSUP, .dep_bit = OMAP3430_PM_WKDEP_MPU_EN_DSS_SHIFT, .wkdep_srcs = dss_am35x_wkdeps, .sleepdep_srcs = dss_am35x_sleepdeps, .clktrctrl_mask = OMAP3430_CLKTRCTRL_DSS_MASK, }; static struct clockdomain cam_clkdm = { .name = "cam_clkdm", .pwrdm = { .name = "cam_pwrdm" }, .flags = CLKDM_CAN_HWSUP_SWSUP, .wkdep_srcs = cam_wkdeps, .sleepdep_srcs = cam_sleepdeps, .clktrctrl_mask = OMAP3430_CLKTRCTRL_CAM_MASK, }; static struct clockdomain usbhost_clkdm = { .name = "usbhost_clkdm", .pwrdm = { .name = "usbhost_pwrdm" }, .flags = CLKDM_CAN_HWSUP_SWSUP, .wkdep_srcs = usbhost_wkdeps, .sleepdep_srcs = usbhost_sleepdeps, .clktrctrl_mask = OMAP3430ES2_CLKTRCTRL_USBHOST_MASK, }; static struct clockdomain usbhost_am35x_clkdm = { .name = "usbhost_clkdm", .pwrdm = { .name = "core_pwrdm" }, .flags = CLKDM_CAN_HWSUP_SWSUP, .wkdep_srcs = usbhost_am35x_wkdeps, .sleepdep_srcs = usbhost_am35x_sleepdeps, .clktrctrl_mask = OMAP3430ES2_CLKTRCTRL_USBHOST_MASK, }; static struct clockdomain per_clkdm = { .name = "per_clkdm", .pwrdm = { .name = "per_pwrdm" }, .flags = CLKDM_CAN_HWSUP_SWSUP, .dep_bit = OMAP3430_EN_PER_SHIFT, .wkdep_srcs = per_wkdeps, .sleepdep_srcs = per_sleepdeps, .clktrctrl_mask = OMAP3430_CLKTRCTRL_PER_MASK, }; static struct clockdomain per_am35x_clkdm = { .name = "per_clkdm", .pwrdm = { .name = "per_pwrdm" }, .flags = CLKDM_CAN_HWSUP_SWSUP, .dep_bit = OMAP3430_EN_PER_SHIFT, .wkdep_srcs = per_am35x_wkdeps, .sleepdep_srcs = per_am35x_sleepdeps, .clktrctrl_mask = OMAP3430_CLKTRCTRL_PER_MASK, }; static struct clockdomain emu_clkdm = { .name = "emu_clkdm", .pwrdm = { .name = "emu_pwrdm" }, .flags = (CLKDM_CAN_ENABLE_AUTO | CLKDM_CAN_SWSUP | CLKDM_MISSING_IDLE_REPORTING), .clktrctrl_mask = OMAP3430_CLKTRCTRL_EMU_MASK, }; static struct clockdomain dpll1_clkdm = { .name = "dpll1_clkdm", .pwrdm = { .name = "dpll1_pwrdm" }, }; static struct clockdomain dpll2_clkdm = { .name = "dpll2_clkdm", .pwrdm = { .name = "dpll2_pwrdm" }, }; static struct clockdomain dpll3_clkdm = { .name = "dpll3_clkdm", .pwrdm = { .name = "dpll3_pwrdm" }, }; static struct clockdomain dpll4_clkdm = { .name = "dpll4_clkdm", .pwrdm = { .name = "dpll4_pwrdm" }, }; static struct clockdomain dpll5_clkdm = { .name = "dpll5_clkdm", .pwrdm = { .name = "dpll5_pwrdm" }, }; /* * Clockdomain hwsup dependencies */ static struct clkdm_autodep clkdm_autodeps[] = { { .clkdm = { .name = "mpu_clkdm" }, }, { .clkdm = { .name = "iva2_clkdm" }, }, { .clkdm = { .name = NULL }, } }; static struct clkdm_autodep clkdm_am35x_autodeps[] = { { .clkdm = { .name = "mpu_clkdm" }, }, { .clkdm = { .name = NULL }, } }; /* * */ static struct clockdomain *clockdomains_common[] __initdata = { &wkup_common_clkdm, &neon_clkdm, &core_l3_3xxx_clkdm, &core_l4_3xxx_clkdm, &emu_clkdm, &dpll1_clkdm, &dpll3_clkdm, &dpll4_clkdm, NULL }; static struct clockdomain *clockdomains_omap3430[] __initdata = { &mpu_3xxx_clkdm, &iva2_clkdm, &d2d_clkdm, &dss_3xxx_clkdm, &cam_clkdm, &per_clkdm, &dpll2_clkdm, NULL }; static struct clockdomain *clockdomains_omap3430es1[] __initdata = { &gfx_3430es1_clkdm, NULL, }; static struct clockdomain *clockdomains_omap3430es2plus[] __initdata = { &sgx_clkdm, &dpll5_clkdm, &usbhost_clkdm, NULL, }; static struct clockdomain *clockdomains_am35x[] __initdata = { &mpu_am35x_clkdm, &sgx_am35x_clkdm, &dss_am35x_clkdm, &per_am35x_clkdm, &usbhost_am35x_clkdm, &dpll5_clkdm, NULL }; void __init omap3xxx_clockdomains_init(void) { struct clockdomain **sc; unsigned int rev; if (!cpu_is_omap34xx()) return; clkdm_register_platform_funcs(&omap3_clkdm_operations); clkdm_register_clkdms(clockdomains_common); rev = omap_rev(); if (rev == AM35XX_REV_ES1_0 || rev == AM35XX_REV_ES1_1) { clkdm_register_clkdms(clockdomains_am35x); clkdm_register_autodeps(clkdm_am35x_autodeps); } else { clkdm_register_clkdms(clockdomains_omap3430); sc = (rev == OMAP3430_REV_ES1_0) ? clockdomains_omap3430es1 : clockdomains_omap3430es2plus; clkdm_register_clkdms(sc); clkdm_register_autodeps(clkdm_autodeps); } clkdm_complete_init(); }
linux-master
arch/arm/mach-omap2/clockdomains3xxx_data.c
// SPDX-License-Identifier: GPL-2.0-only /* * OMAP4 CM instance functions * * Copyright (C) 2009 Nokia Corporation * Copyright (C) 2008-2011 Texas Instruments, Inc. * Paul Walmsley * Rajendra Nayak <[email protected]> * * This is needed since CM instances can be in the PRM, PRCM_MPU, CM1, * or CM2 hardware modules. For example, the EMU_CM CM instance is in * the PRM hardware module. What a mess... */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/io.h> #include "clockdomain.h" #include "cm.h" #include "cm1_44xx.h" #include "cm2_44xx.h" #include "cm44xx.h" #include "cm-regbits-34xx.h" #include "prcm44xx.h" #include "prm44xx.h" #include "prcm_mpu44xx.h" #include "prcm-common.h" #define OMAP4430_IDLEST_SHIFT 16 #define OMAP4430_IDLEST_MASK (0x3 << 16) #define OMAP4430_CLKTRCTRL_SHIFT 0 #define OMAP4430_CLKTRCTRL_MASK (0x3 << 0) #define OMAP4430_MODULEMODE_SHIFT 0 #define OMAP4430_MODULEMODE_MASK (0x3 << 0) /* * CLKCTRL_IDLEST_*: possible values for the CM_*_CLKCTRL.IDLEST bitfield: * * 0x0 func: Module is fully functional, including OCP * 0x1 trans: Module is performing transition: wakeup, or sleep, or sleep * abortion * 0x2 idle: Module is in Idle mode (only OCP part). It is functional if * using separate functional clock * 0x3 disabled: Module is disabled and cannot be accessed * */ #define CLKCTRL_IDLEST_FUNCTIONAL 0x0 #define CLKCTRL_IDLEST_INTRANSITION 0x1 #define CLKCTRL_IDLEST_INTERFACE_IDLE 0x2 #define CLKCTRL_IDLEST_DISABLED 0x3 static struct omap_domain_base _cm_bases[OMAP4_MAX_PRCM_PARTITIONS]; /** * omap_cm_base_init - Populates the cm partitions * * Populates the base addresses of the _cm_bases * array used for read/write of cm module registers. */ static void omap_cm_base_init(void) { memcpy(&_cm_bases[OMAP4430_PRM_PARTITION], &prm_base, sizeof(prm_base)); memcpy(&_cm_bases[OMAP4430_CM1_PARTITION], &cm_base, sizeof(cm_base)); memcpy(&_cm_bases[OMAP4430_CM2_PARTITION], &cm2_base, sizeof(cm2_base)); memcpy(&_cm_bases[OMAP4430_PRCM_MPU_PARTITION], &prcm_mpu_base, sizeof(prcm_mpu_base)); } /* Private functions */ static u32 omap4_cminst_read_inst_reg(u8 part, u16 inst, u16 idx); /** * _clkctrl_idlest - read a CM_*_CLKCTRL register; mask & shift IDLEST bitfield * @part: PRCM partition ID that the CM_CLKCTRL register exists in * @inst: CM instance register offset (*_INST macro) * @clkctrl_offs: Module clock control register offset (*_CLKCTRL macro) * * Return the IDLEST bitfield of a CM_*_CLKCTRL register, shifted down to * bit 0. */ static u32 _clkctrl_idlest(u8 part, u16 inst, u16 clkctrl_offs) { u32 v = omap4_cminst_read_inst_reg(part, inst, clkctrl_offs); v &= OMAP4430_IDLEST_MASK; v >>= OMAP4430_IDLEST_SHIFT; return v; } /** * _is_module_ready - can module registers be accessed without causing an abort? * @part: PRCM partition ID that the CM_CLKCTRL register exists in * @inst: CM instance register offset (*_INST macro) * @clkctrl_offs: Module clock control register offset (*_CLKCTRL macro) * * Returns true if the module's CM_*_CLKCTRL.IDLEST bitfield is either * *FUNCTIONAL or *INTERFACE_IDLE; false otherwise. */ static bool _is_module_ready(u8 part, u16 inst, u16 clkctrl_offs) { u32 v; v = _clkctrl_idlest(part, inst, clkctrl_offs); return (v == CLKCTRL_IDLEST_FUNCTIONAL || v == CLKCTRL_IDLEST_INTERFACE_IDLE) ? true : false; } /* Read a register in a CM instance */ static u32 omap4_cminst_read_inst_reg(u8 part, u16 inst, u16 idx) { BUG_ON(part >= OMAP4_MAX_PRCM_PARTITIONS || part == OMAP4430_INVALID_PRCM_PARTITION || !_cm_bases[part].va); return readl_relaxed(_cm_bases[part].va + inst + idx); } /* Write into a register in a CM instance */ static void omap4_cminst_write_inst_reg(u32 val, u8 part, u16 inst, u16 idx) { BUG_ON(part >= OMAP4_MAX_PRCM_PARTITIONS || part == OMAP4430_INVALID_PRCM_PARTITION || !_cm_bases[part].va); writel_relaxed(val, _cm_bases[part].va + inst + idx); } /* Read-modify-write a register in CM1. Caller must lock */ static u32 omap4_cminst_rmw_inst_reg_bits(u32 mask, u32 bits, u8 part, u16 inst, s16 idx) { u32 v; v = omap4_cminst_read_inst_reg(part, inst, idx); v &= ~mask; v |= bits; omap4_cminst_write_inst_reg(v, part, inst, idx); return v; } static u32 omap4_cminst_set_inst_reg_bits(u32 bits, u8 part, u16 inst, s16 idx) { return omap4_cminst_rmw_inst_reg_bits(bits, bits, part, inst, idx); } static u32 omap4_cminst_clear_inst_reg_bits(u32 bits, u8 part, u16 inst, s16 idx) { return omap4_cminst_rmw_inst_reg_bits(bits, 0x0, part, inst, idx); } static u32 omap4_cminst_read_inst_reg_bits(u8 part, u16 inst, s16 idx, u32 mask) { u32 v; v = omap4_cminst_read_inst_reg(part, inst, idx); v &= mask; v >>= __ffs(mask); return v; } /* * */ /** * _clktrctrl_write - write @c to a CM_CLKSTCTRL.CLKTRCTRL register bitfield * @c: CLKTRCTRL register bitfield (LSB = bit 0, i.e., unshifted) * @part: PRCM partition ID that the CM_CLKSTCTRL register exists in * @inst: CM instance register offset (*_INST macro) * @cdoffs: Clockdomain register offset (*_CDOFFS macro) * * @c must be the unshifted value for CLKTRCTRL - i.e., this function * will handle the shift itself. */ static void _clktrctrl_write(u8 c, u8 part, u16 inst, u16 cdoffs) { u32 v; v = omap4_cminst_read_inst_reg(part, inst, cdoffs + OMAP4_CM_CLKSTCTRL); v &= ~OMAP4430_CLKTRCTRL_MASK; v |= c << OMAP4430_CLKTRCTRL_SHIFT; omap4_cminst_write_inst_reg(v, part, inst, cdoffs + OMAP4_CM_CLKSTCTRL); } /** * omap4_cminst_is_clkdm_in_hwsup - is a clockdomain in hwsup idle mode? * @part: PRCM partition ID that the CM_CLKSTCTRL register exists in * @inst: CM instance register offset (*_INST macro) * @cdoffs: Clockdomain register offset (*_CDOFFS macro) * * Returns true if the clockdomain referred to by (@part, @inst, @cdoffs) * is in hardware-supervised idle mode, or 0 otherwise. */ static bool omap4_cminst_is_clkdm_in_hwsup(u8 part, u16 inst, u16 cdoffs) { u32 v; v = omap4_cminst_read_inst_reg(part, inst, cdoffs + OMAP4_CM_CLKSTCTRL); v &= OMAP4430_CLKTRCTRL_MASK; v >>= OMAP4430_CLKTRCTRL_SHIFT; return (v == OMAP34XX_CLKSTCTRL_ENABLE_AUTO) ? true : false; } /** * omap4_cminst_clkdm_enable_hwsup - put a clockdomain in hwsup-idle mode * @part: PRCM partition ID that the clockdomain registers exist in * @inst: CM instance register offset (*_INST macro) * @cdoffs: Clockdomain register offset (*_CDOFFS macro) * * Put a clockdomain referred to by (@part, @inst, @cdoffs) into * hardware-supervised idle mode. No return value. */ static void omap4_cminst_clkdm_enable_hwsup(u8 part, u16 inst, u16 cdoffs) { _clktrctrl_write(OMAP34XX_CLKSTCTRL_ENABLE_AUTO, part, inst, cdoffs); } /** * omap4_cminst_clkdm_disable_hwsup - put a clockdomain in swsup-idle mode * @part: PRCM partition ID that the clockdomain registers exist in * @inst: CM instance register offset (*_INST macro) * @cdoffs: Clockdomain register offset (*_CDOFFS macro) * * Put a clockdomain referred to by (@part, @inst, @cdoffs) into * software-supervised idle mode, i.e., controlled manually by the * Linux OMAP clockdomain code. No return value. */ static void omap4_cminst_clkdm_disable_hwsup(u8 part, u16 inst, u16 cdoffs) { _clktrctrl_write(OMAP34XX_CLKSTCTRL_DISABLE_AUTO, part, inst, cdoffs); } /** * omap4_cminst_clkdm_force_sleep - try to take a clockdomain out of idle * @part: PRCM partition ID that the clockdomain registers exist in * @inst: CM instance register offset (*_INST macro) * @cdoffs: Clockdomain register offset (*_CDOFFS macro) * * Take a clockdomain referred to by (@part, @inst, @cdoffs) out of idle, * waking it up. No return value. */ static void omap4_cminst_clkdm_force_wakeup(u8 part, u16 inst, u16 cdoffs) { _clktrctrl_write(OMAP34XX_CLKSTCTRL_FORCE_WAKEUP, part, inst, cdoffs); } /* * */ static void omap4_cminst_clkdm_force_sleep(u8 part, u16 inst, u16 cdoffs) { _clktrctrl_write(OMAP34XX_CLKSTCTRL_FORCE_SLEEP, part, inst, cdoffs); } /** * omap4_cminst_wait_module_ready - wait for a module to be in 'func' state * @part: PRCM partition ID that the CM_CLKCTRL register exists in * @inst: CM instance register offset (*_INST macro) * @clkctrl_offs: Module clock control register offset (*_CLKCTRL macro) * @bit_shift: bit shift for the register, ignored for OMAP4+ * * Wait for the module IDLEST to be functional. If the idle state is in any * the non functional state (trans, idle or disabled), module and thus the * sysconfig cannot be accessed and will probably lead to an "imprecise * external abort" */ static int omap4_cminst_wait_module_ready(u8 part, s16 inst, u16 clkctrl_offs, u8 bit_shift) { int i = 0; omap_test_timeout(_is_module_ready(part, inst, clkctrl_offs), MAX_MODULE_READY_TIME, i); return (i < MAX_MODULE_READY_TIME) ? 0 : -EBUSY; } /** * omap4_cminst_wait_module_idle - wait for a module to be in 'disabled' * state * @part: PRCM partition ID that the CM_CLKCTRL register exists in * @inst: CM instance register offset (*_INST macro) * @clkctrl_offs: Module clock control register offset (*_CLKCTRL macro) * @bit_shift: Bit shift for the register, ignored for OMAP4+ * * Wait for the module IDLEST to be disabled. Some PRCM transition, * like reset assertion or parent clock de-activation must wait the * module to be fully disabled. */ static int omap4_cminst_wait_module_idle(u8 part, s16 inst, u16 clkctrl_offs, u8 bit_shift) { int i = 0; omap_test_timeout((_clkctrl_idlest(part, inst, clkctrl_offs) == CLKCTRL_IDLEST_DISABLED), MAX_MODULE_DISABLE_TIME, i); return (i < MAX_MODULE_DISABLE_TIME) ? 0 : -EBUSY; } /** * omap4_cminst_module_enable - Enable the modulemode inside CLKCTRL * @mode: Module mode (SW or HW) * @part: PRCM partition ID that the CM_CLKCTRL register exists in * @inst: CM instance register offset (*_INST macro) * @clkctrl_offs: Module clock control register offset (*_CLKCTRL macro) * * No return value. */ static void omap4_cminst_module_enable(u8 mode, u8 part, u16 inst, u16 clkctrl_offs) { u32 v; v = omap4_cminst_read_inst_reg(part, inst, clkctrl_offs); v &= ~OMAP4430_MODULEMODE_MASK; v |= mode << OMAP4430_MODULEMODE_SHIFT; omap4_cminst_write_inst_reg(v, part, inst, clkctrl_offs); } /** * omap4_cminst_module_disable - Disable the module inside CLKCTRL * @part: PRCM partition ID that the CM_CLKCTRL register exists in * @inst: CM instance register offset (*_INST macro) * @clkctrl_offs: Module clock control register offset (*_CLKCTRL macro) * * No return value. */ static void omap4_cminst_module_disable(u8 part, u16 inst, u16 clkctrl_offs) { u32 v; v = omap4_cminst_read_inst_reg(part, inst, clkctrl_offs); v &= ~OMAP4430_MODULEMODE_MASK; omap4_cminst_write_inst_reg(v, part, inst, clkctrl_offs); } /* * Clockdomain low-level functions */ static int omap4_clkdm_add_wkup_sleep_dep(struct clockdomain *clkdm1, struct clockdomain *clkdm2) { omap4_cminst_set_inst_reg_bits((1 << clkdm2->dep_bit), clkdm1->prcm_partition, clkdm1->cm_inst, clkdm1->clkdm_offs + OMAP4_CM_STATICDEP); return 0; } static int omap4_clkdm_del_wkup_sleep_dep(struct clockdomain *clkdm1, struct clockdomain *clkdm2) { omap4_cminst_clear_inst_reg_bits((1 << clkdm2->dep_bit), clkdm1->prcm_partition, clkdm1->cm_inst, clkdm1->clkdm_offs + OMAP4_CM_STATICDEP); return 0; } static int omap4_clkdm_read_wkup_sleep_dep(struct clockdomain *clkdm1, struct clockdomain *clkdm2) { return omap4_cminst_read_inst_reg_bits(clkdm1->prcm_partition, clkdm1->cm_inst, clkdm1->clkdm_offs + OMAP4_CM_STATICDEP, (1 << clkdm2->dep_bit)); } static int omap4_clkdm_clear_all_wkup_sleep_deps(struct clockdomain *clkdm) { struct clkdm_dep *cd; u32 mask = 0; if (!clkdm->prcm_partition) return 0; for (cd = clkdm->wkdep_srcs; cd && cd->clkdm_name; cd++) { if (!cd->clkdm) continue; /* only happens if data is erroneous */ mask |= 1 << cd->clkdm->dep_bit; cd->wkdep_usecount = 0; } omap4_cminst_clear_inst_reg_bits(mask, clkdm->prcm_partition, clkdm->cm_inst, clkdm->clkdm_offs + OMAP4_CM_STATICDEP); return 0; } static int omap4_clkdm_sleep(struct clockdomain *clkdm) { if (clkdm->flags & CLKDM_CAN_HWSUP) omap4_cminst_clkdm_enable_hwsup(clkdm->prcm_partition, clkdm->cm_inst, clkdm->clkdm_offs); else if (clkdm->flags & CLKDM_CAN_FORCE_SLEEP) omap4_cminst_clkdm_force_sleep(clkdm->prcm_partition, clkdm->cm_inst, clkdm->clkdm_offs); else return -EINVAL; return 0; } static int omap4_clkdm_wakeup(struct clockdomain *clkdm) { omap4_cminst_clkdm_force_wakeup(clkdm->prcm_partition, clkdm->cm_inst, clkdm->clkdm_offs); return 0; } static void omap4_clkdm_allow_idle(struct clockdomain *clkdm) { omap4_cminst_clkdm_enable_hwsup(clkdm->prcm_partition, clkdm->cm_inst, clkdm->clkdm_offs); } static void omap4_clkdm_deny_idle(struct clockdomain *clkdm) { if (clkdm->flags & CLKDM_CAN_FORCE_WAKEUP) omap4_clkdm_wakeup(clkdm); else omap4_cminst_clkdm_disable_hwsup(clkdm->prcm_partition, clkdm->cm_inst, clkdm->clkdm_offs); } static int omap4_clkdm_clk_enable(struct clockdomain *clkdm) { if (clkdm->flags & CLKDM_CAN_FORCE_WAKEUP) return omap4_clkdm_wakeup(clkdm); return 0; } static int omap4_clkdm_clk_disable(struct clockdomain *clkdm) { bool hwsup = false; if (!clkdm->prcm_partition) return 0; /* * The CLKDM_MISSING_IDLE_REPORTING flag documentation has * more details on the unpleasant problem this is working * around */ if (clkdm->flags & CLKDM_MISSING_IDLE_REPORTING && !(clkdm->flags & CLKDM_CAN_FORCE_SLEEP)) { omap4_clkdm_allow_idle(clkdm); return 0; } hwsup = omap4_cminst_is_clkdm_in_hwsup(clkdm->prcm_partition, clkdm->cm_inst, clkdm->clkdm_offs); if (!hwsup && (clkdm->flags & CLKDM_CAN_FORCE_SLEEP)) omap4_clkdm_sleep(clkdm); return 0; } static u32 omap4_cminst_xlate_clkctrl(u8 part, u16 inst, u16 offset) { return _cm_bases[part].pa + inst + offset; } /** * omap4_clkdm_save_context - Save the clockdomain modulemode context * @clkdm: The clockdomain pointer whose context needs to be saved * * Save the clockdomain modulemode context. */ static int omap4_clkdm_save_context(struct clockdomain *clkdm) { clkdm->context = omap4_cminst_read_inst_reg(clkdm->prcm_partition, clkdm->cm_inst, clkdm->clkdm_offs + OMAP4_CM_CLKSTCTRL); clkdm->context &= OMAP4430_MODULEMODE_MASK; return 0; } /** * omap4_clkdm_restore_context - Restore the clockdomain modulemode context * @clkdm: The clockdomain pointer whose context needs to be restored * * Restore the clockdomain modulemode context. */ static int omap4_clkdm_restore_context(struct clockdomain *clkdm) { switch (clkdm->context) { case OMAP34XX_CLKSTCTRL_DISABLE_AUTO: omap4_clkdm_deny_idle(clkdm); break; case OMAP34XX_CLKSTCTRL_FORCE_SLEEP: omap4_clkdm_sleep(clkdm); break; case OMAP34XX_CLKSTCTRL_FORCE_WAKEUP: omap4_clkdm_wakeup(clkdm); break; case OMAP34XX_CLKSTCTRL_ENABLE_AUTO: omap4_clkdm_allow_idle(clkdm); break; } return 0; } struct clkdm_ops omap4_clkdm_operations = { .clkdm_add_wkdep = omap4_clkdm_add_wkup_sleep_dep, .clkdm_del_wkdep = omap4_clkdm_del_wkup_sleep_dep, .clkdm_read_wkdep = omap4_clkdm_read_wkup_sleep_dep, .clkdm_clear_all_wkdeps = omap4_clkdm_clear_all_wkup_sleep_deps, .clkdm_add_sleepdep = omap4_clkdm_add_wkup_sleep_dep, .clkdm_del_sleepdep = omap4_clkdm_del_wkup_sleep_dep, .clkdm_read_sleepdep = omap4_clkdm_read_wkup_sleep_dep, .clkdm_clear_all_sleepdeps = omap4_clkdm_clear_all_wkup_sleep_deps, .clkdm_sleep = omap4_clkdm_sleep, .clkdm_wakeup = omap4_clkdm_wakeup, .clkdm_allow_idle = omap4_clkdm_allow_idle, .clkdm_deny_idle = omap4_clkdm_deny_idle, .clkdm_clk_enable = omap4_clkdm_clk_enable, .clkdm_clk_disable = omap4_clkdm_clk_disable, .clkdm_save_context = omap4_clkdm_save_context, .clkdm_restore_context = omap4_clkdm_restore_context, }; struct clkdm_ops am43xx_clkdm_operations = { .clkdm_sleep = omap4_clkdm_sleep, .clkdm_wakeup = omap4_clkdm_wakeup, .clkdm_allow_idle = omap4_clkdm_allow_idle, .clkdm_deny_idle = omap4_clkdm_deny_idle, .clkdm_clk_enable = omap4_clkdm_clk_enable, .clkdm_clk_disable = omap4_clkdm_clk_disable, }; static const struct cm_ll_data omap4xxx_cm_ll_data = { .wait_module_ready = &omap4_cminst_wait_module_ready, .wait_module_idle = &omap4_cminst_wait_module_idle, .module_enable = &omap4_cminst_module_enable, .module_disable = &omap4_cminst_module_disable, .xlate_clkctrl = &omap4_cminst_xlate_clkctrl, }; int __init omap4_cm_init(const struct omap_prcm_init_data *data) { omap_cm_base_init(); return cm_register(&omap4xxx_cm_ll_data); } static void __exit omap4_cm_exit(void) { cm_unregister(&omap4xxx_cm_ll_data); } __exitcall(omap4_cm_exit);
linux-master
arch/arm/mach-omap2/cminst44xx.c
// SPDX-License-Identifier: GPL-2.0 /* * opp2420_data.c - old-style "OPP" table for OMAP2420 * * Copyright (C) 2005-2009 Texas Instruments, Inc. * Copyright (C) 2004-2009 Nokia Corporation * * Richard Woodruff <[email protected]> * * The OMAP2 processor can be run at several discrete 'PRCM configurations'. * These configurations are characterized by voltage and speed for clocks. * The device is only validated for certain combinations. One way to express * these combinations is via the 'ratios' which the clocks operate with * respect to each other. These ratio sets are for a given voltage/DPLL * setting. All configurations can be described by a DPLL setting and a ratio. * * XXX Missing voltage data. * XXX Missing 19.2MHz sys_clk rate sets (needed for N800/N810) * * THe format described in this file is deprecated. Once a reasonable * OPP API exists, the data in this file should be converted to use it. * * This is technically part of the OMAP2xxx clock code. * * Considerable work is still needed to fully support dynamic frequency * changes on OMAP2xxx-series chips. Readers interested in such a * project are encouraged to review the Maemo Diablo RX-34 and RX-44 * kernel source at: * http://repository.maemo.org/pool/diablo/free/k/kernel-source-diablo/ */ #include <linux/kernel.h> #include "opp2xxx.h" #include "sdrc.h" #include "clock.h" /* * Key dividers which make up a PRCM set. Ratios for a PRCM are mandated. * xtal_speed, dpll_speed, mpu_speed, CM_CLKSEL_MPU, * CM_CLKSEL_DSP, CM_CLKSEL_GFX, CM_CLKSEL1_CORE, CM_CLKSEL1_PLL, * CM_CLKSEL2_PLL, CM_CLKSEL_MDM * * Filling in table based on H4 boards available. There are quite a * few more rate combinations which could be defined. * * When multiple values are defined the start up will try and choose * the fastest one. If a 'fast' value is defined, then automatically, * the /2 one should be included as it can be used. Generally having * more than one fast set does not make sense, as static timings need * to be changed to change the set. The exception is the bypass * setting which is available for low power bypass. * * Note: This table needs to be sorted, fastest to slowest. **/ const struct prcm_config omap2420_rate_table[] = { /* PRCM I - FAST */ {S12M, S660M, S330M, RI_CM_CLKSEL_MPU_VAL, /* 330MHz ARM */ RI_CM_CLKSEL_DSP_VAL, RI_CM_CLKSEL_GFX_VAL, RI_CM_CLKSEL1_CORE_VAL, MI_CM_CLKSEL1_PLL_12_VAL, MX_CLKSEL2_PLL_2x_VAL, 0, SDRC_RFR_CTRL_165MHz, RATE_IN_242X}, /* PRCM II - FAST */ {S12M, S600M, S300M, RII_CM_CLKSEL_MPU_VAL, /* 300MHz ARM */ RII_CM_CLKSEL_DSP_VAL, RII_CM_CLKSEL_GFX_VAL, RII_CM_CLKSEL1_CORE_VAL, MII_CM_CLKSEL1_PLL_12_VAL, MX_CLKSEL2_PLL_2x_VAL, 0, SDRC_RFR_CTRL_100MHz, RATE_IN_242X}, {S13M, S600M, S300M, RII_CM_CLKSEL_MPU_VAL, /* 300MHz ARM */ RII_CM_CLKSEL_DSP_VAL, RII_CM_CLKSEL_GFX_VAL, RII_CM_CLKSEL1_CORE_VAL, MII_CM_CLKSEL1_PLL_13_VAL, MX_CLKSEL2_PLL_2x_VAL, 0, SDRC_RFR_CTRL_100MHz, RATE_IN_242X}, /* PRCM III - FAST */ {S12M, S532M, S266M, RIII_CM_CLKSEL_MPU_VAL, /* 266MHz ARM */ RIII_CM_CLKSEL_DSP_VAL, RIII_CM_CLKSEL_GFX_VAL, RIII_CM_CLKSEL1_CORE_VAL, MIII_CM_CLKSEL1_PLL_12_VAL, MX_CLKSEL2_PLL_2x_VAL, 0, SDRC_RFR_CTRL_133MHz, RATE_IN_242X}, {S13M, S532M, S266M, RIII_CM_CLKSEL_MPU_VAL, /* 266MHz ARM */ RIII_CM_CLKSEL_DSP_VAL, RIII_CM_CLKSEL_GFX_VAL, RIII_CM_CLKSEL1_CORE_VAL, MIII_CM_CLKSEL1_PLL_13_VAL, MX_CLKSEL2_PLL_2x_VAL, 0, SDRC_RFR_CTRL_133MHz, RATE_IN_242X}, /* PRCM II - SLOW */ {S12M, S300M, S150M, RII_CM_CLKSEL_MPU_VAL, /* 150MHz ARM */ RII_CM_CLKSEL_DSP_VAL, RII_CM_CLKSEL_GFX_VAL, RII_CM_CLKSEL1_CORE_VAL, MII_CM_CLKSEL1_PLL_12_VAL, MX_CLKSEL2_PLL_2x_VAL, 0, SDRC_RFR_CTRL_100MHz, RATE_IN_242X}, {S13M, S300M, S150M, RII_CM_CLKSEL_MPU_VAL, /* 150MHz ARM */ RII_CM_CLKSEL_DSP_VAL, RII_CM_CLKSEL_GFX_VAL, RII_CM_CLKSEL1_CORE_VAL, MII_CM_CLKSEL1_PLL_13_VAL, MX_CLKSEL2_PLL_2x_VAL, 0, SDRC_RFR_CTRL_100MHz, RATE_IN_242X}, /* PRCM III - SLOW */ {S12M, S266M, S133M, RIII_CM_CLKSEL_MPU_VAL, /* 133MHz ARM */ RIII_CM_CLKSEL_DSP_VAL, RIII_CM_CLKSEL_GFX_VAL, RIII_CM_CLKSEL1_CORE_VAL, MIII_CM_CLKSEL1_PLL_12_VAL, MX_CLKSEL2_PLL_2x_VAL, 0, SDRC_RFR_CTRL_133MHz, RATE_IN_242X}, {S13M, S266M, S133M, RIII_CM_CLKSEL_MPU_VAL, /* 133MHz ARM */ RIII_CM_CLKSEL_DSP_VAL, RIII_CM_CLKSEL_GFX_VAL, RIII_CM_CLKSEL1_CORE_VAL, MIII_CM_CLKSEL1_PLL_13_VAL, MX_CLKSEL2_PLL_2x_VAL, 0, SDRC_RFR_CTRL_133MHz, RATE_IN_242X}, /* PRCM-VII (boot-bypass) */ {S12M, S12M, S12M, RVII_CM_CLKSEL_MPU_VAL, /* 12MHz ARM*/ RVII_CM_CLKSEL_DSP_VAL, RVII_CM_CLKSEL_GFX_VAL, RVII_CM_CLKSEL1_CORE_VAL, MVII_CM_CLKSEL1_PLL_12_VAL, MX_CLKSEL2_PLL_2x_VAL, 0, SDRC_RFR_CTRL_BYPASS, RATE_IN_242X}, /* PRCM-VII (boot-bypass) */ {S13M, S13M, S13M, RVII_CM_CLKSEL_MPU_VAL, /* 13MHz ARM */ RVII_CM_CLKSEL_DSP_VAL, RVII_CM_CLKSEL_GFX_VAL, RVII_CM_CLKSEL1_CORE_VAL, MVII_CM_CLKSEL1_PLL_13_VAL, MX_CLKSEL2_PLL_2x_VAL, 0, SDRC_RFR_CTRL_BYPASS, RATE_IN_242X}, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, };
linux-master
arch/arm/mach-omap2/opp2420_data.c
// SPDX-License-Identifier: GPL-2.0-only /* * OMAP4 Clock domains framework * * Copyright (C) 2009-2011 Texas Instruments, Inc. * Copyright (C) 2009-2011 Nokia Corporation * * Abhijit Pagare ([email protected]) * Benoit Cousson ([email protected]) * Paul Walmsley ([email protected]) * * This file is automatically generated from the OMAP hardware databases. * We respectfully ask that any modifications to this file be coordinated * with the public [email protected] mailing list and the * authors above to ensure that the autogeneration scripts are kept * up-to-date with the file contents. */ #include <linux/kernel.h> #include <linux/io.h> #include "clockdomain.h" #include "cm1_44xx.h" #include "cm2_44xx.h" #include "cm-regbits-44xx.h" #include "prm44xx.h" #include "prcm44xx.h" #include "prcm_mpu44xx.h" /* Static Dependencies for OMAP4 Clock Domains */ static struct clkdm_dep d2d_wkup_sleep_deps[] = { { .clkdm_name = "abe_clkdm" }, { .clkdm_name = "ivahd_clkdm" }, { .clkdm_name = "l3_1_clkdm" }, { .clkdm_name = "l3_2_clkdm" }, { .clkdm_name = "l3_emif_clkdm" }, { .clkdm_name = "l3_init_clkdm" }, { .clkdm_name = "l4_cfg_clkdm" }, { .clkdm_name = "l4_per_clkdm" }, { NULL }, }; static struct clkdm_dep ducati_wkup_sleep_deps[] = { { .clkdm_name = "abe_clkdm" }, { .clkdm_name = "ivahd_clkdm" }, { .clkdm_name = "l3_1_clkdm" }, { .clkdm_name = "l3_2_clkdm" }, { .clkdm_name = "l3_dss_clkdm" }, { .clkdm_name = "l3_emif_clkdm" }, { .clkdm_name = "l3_gfx_clkdm" }, { .clkdm_name = "l3_init_clkdm" }, { .clkdm_name = "l4_cfg_clkdm" }, { .clkdm_name = "l4_per_clkdm" }, { .clkdm_name = "l4_secure_clkdm" }, { .clkdm_name = "l4_wkup_clkdm" }, { .clkdm_name = "tesla_clkdm" }, { NULL }, }; static struct clkdm_dep iss_wkup_sleep_deps[] = { { .clkdm_name = "ivahd_clkdm" }, { .clkdm_name = "l3_1_clkdm" }, { .clkdm_name = "l3_emif_clkdm" }, { NULL }, }; static struct clkdm_dep ivahd_wkup_sleep_deps[] = { { .clkdm_name = "l3_1_clkdm" }, { .clkdm_name = "l3_emif_clkdm" }, { NULL }, }; static struct clkdm_dep l3_dma_wkup_sleep_deps[] = { { .clkdm_name = "abe_clkdm" }, { .clkdm_name = "ducati_clkdm" }, { .clkdm_name = "ivahd_clkdm" }, { .clkdm_name = "l3_1_clkdm" }, { .clkdm_name = "l3_dss_clkdm" }, { .clkdm_name = "l3_emif_clkdm" }, { .clkdm_name = "l3_init_clkdm" }, { .clkdm_name = "l4_cfg_clkdm" }, { .clkdm_name = "l4_per_clkdm" }, { .clkdm_name = "l4_secure_clkdm" }, { .clkdm_name = "l4_wkup_clkdm" }, { NULL }, }; static struct clkdm_dep l3_dss_wkup_sleep_deps[] = { { .clkdm_name = "ivahd_clkdm" }, { .clkdm_name = "l3_2_clkdm" }, { .clkdm_name = "l3_emif_clkdm" }, { NULL }, }; static struct clkdm_dep l3_gfx_wkup_sleep_deps[] = { { .clkdm_name = "ivahd_clkdm" }, { .clkdm_name = "l3_1_clkdm" }, { .clkdm_name = "l3_emif_clkdm" }, { NULL }, }; static struct clkdm_dep l3_init_wkup_sleep_deps[] = { { .clkdm_name = "abe_clkdm" }, { .clkdm_name = "ivahd_clkdm" }, { .clkdm_name = "l3_emif_clkdm" }, { .clkdm_name = "l4_cfg_clkdm" }, { .clkdm_name = "l4_per_clkdm" }, { .clkdm_name = "l4_secure_clkdm" }, { .clkdm_name = "l4_wkup_clkdm" }, { NULL }, }; static struct clkdm_dep l4_secure_wkup_sleep_deps[] = { { .clkdm_name = "l3_1_clkdm" }, { .clkdm_name = "l3_emif_clkdm" }, { .clkdm_name = "l4_per_clkdm" }, { NULL }, }; static struct clkdm_dep mpu_wkup_sleep_deps[] = { { .clkdm_name = "abe_clkdm" }, { .clkdm_name = "ducati_clkdm" }, { .clkdm_name = "ivahd_clkdm" }, { .clkdm_name = "l3_1_clkdm" }, { .clkdm_name = "l3_2_clkdm" }, { .clkdm_name = "l3_dss_clkdm" }, { .clkdm_name = "l3_emif_clkdm" }, { .clkdm_name = "l3_gfx_clkdm" }, { .clkdm_name = "l3_init_clkdm" }, { .clkdm_name = "l4_cfg_clkdm" }, { .clkdm_name = "l4_per_clkdm" }, { .clkdm_name = "l4_secure_clkdm" }, { .clkdm_name = "l4_wkup_clkdm" }, { .clkdm_name = "tesla_clkdm" }, { NULL }, }; static struct clkdm_dep tesla_wkup_sleep_deps[] = { { .clkdm_name = "abe_clkdm" }, { .clkdm_name = "ivahd_clkdm" }, { .clkdm_name = "l3_1_clkdm" }, { .clkdm_name = "l3_2_clkdm" }, { .clkdm_name = "l3_emif_clkdm" }, { .clkdm_name = "l3_init_clkdm" }, { .clkdm_name = "l4_cfg_clkdm" }, { .clkdm_name = "l4_per_clkdm" }, { .clkdm_name = "l4_wkup_clkdm" }, { NULL }, }; static struct clockdomain l4_cefuse_44xx_clkdm = { .name = "l4_cefuse_clkdm", .pwrdm = { .name = "cefuse_pwrdm" }, .prcm_partition = OMAP4430_CM2_PARTITION, .cm_inst = OMAP4430_CM2_CEFUSE_INST, .clkdm_offs = OMAP4430_CM2_CEFUSE_CEFUSE_CDOFFS, .flags = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP, }; static struct clockdomain l4_cfg_44xx_clkdm = { .name = "l4_cfg_clkdm", .pwrdm = { .name = "core_pwrdm" }, .prcm_partition = OMAP4430_CM2_PARTITION, .cm_inst = OMAP4430_CM2_CORE_INST, .clkdm_offs = OMAP4430_CM2_CORE_L4CFG_CDOFFS, .dep_bit = OMAP4430_L4CFG_STATDEP_SHIFT, .flags = CLKDM_CAN_HWSUP, }; static struct clockdomain tesla_44xx_clkdm = { .name = "tesla_clkdm", .pwrdm = { .name = "tesla_pwrdm" }, .prcm_partition = OMAP4430_CM1_PARTITION, .cm_inst = OMAP4430_CM1_TESLA_INST, .clkdm_offs = OMAP4430_CM1_TESLA_TESLA_CDOFFS, .dep_bit = OMAP4430_TESLA_STATDEP_SHIFT, .wkdep_srcs = tesla_wkup_sleep_deps, .sleepdep_srcs = tesla_wkup_sleep_deps, .flags = CLKDM_CAN_HWSUP_SWSUP, }; static struct clockdomain l3_gfx_44xx_clkdm = { .name = "l3_gfx_clkdm", .pwrdm = { .name = "gfx_pwrdm" }, .prcm_partition = OMAP4430_CM2_PARTITION, .cm_inst = OMAP4430_CM2_GFX_INST, .clkdm_offs = OMAP4430_CM2_GFX_GFX_CDOFFS, .dep_bit = OMAP4430_GFX_STATDEP_SHIFT, .wkdep_srcs = l3_gfx_wkup_sleep_deps, .sleepdep_srcs = l3_gfx_wkup_sleep_deps, .flags = CLKDM_CAN_HWSUP_SWSUP, }; static struct clockdomain ivahd_44xx_clkdm = { .name = "ivahd_clkdm", .pwrdm = { .name = "ivahd_pwrdm" }, .prcm_partition = OMAP4430_CM2_PARTITION, .cm_inst = OMAP4430_CM2_IVAHD_INST, .clkdm_offs = OMAP4430_CM2_IVAHD_IVAHD_CDOFFS, .dep_bit = OMAP4430_IVAHD_STATDEP_SHIFT, .wkdep_srcs = ivahd_wkup_sleep_deps, .sleepdep_srcs = ivahd_wkup_sleep_deps, .flags = CLKDM_CAN_HWSUP_SWSUP, }; static struct clockdomain l4_secure_44xx_clkdm = { .name = "l4_secure_clkdm", .pwrdm = { .name = "l4per_pwrdm" }, .prcm_partition = OMAP4430_CM2_PARTITION, .cm_inst = OMAP4430_CM2_L4PER_INST, .clkdm_offs = OMAP4430_CM2_L4PER_L4SEC_CDOFFS, .dep_bit = OMAP4430_L4SEC_STATDEP_SHIFT, .wkdep_srcs = l4_secure_wkup_sleep_deps, .sleepdep_srcs = l4_secure_wkup_sleep_deps, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain l4_per_44xx_clkdm = { .name = "l4_per_clkdm", .pwrdm = { .name = "l4per_pwrdm" }, .prcm_partition = OMAP4430_CM2_PARTITION, .cm_inst = OMAP4430_CM2_L4PER_INST, .clkdm_offs = OMAP4430_CM2_L4PER_L4PER_CDOFFS, .dep_bit = OMAP4430_L4PER_STATDEP_SHIFT, .flags = CLKDM_CAN_HWSUP_SWSUP, }; static struct clockdomain abe_44xx_clkdm = { .name = "abe_clkdm", .pwrdm = { .name = "abe_pwrdm" }, .prcm_partition = OMAP4430_CM1_PARTITION, .cm_inst = OMAP4430_CM1_ABE_INST, .clkdm_offs = OMAP4430_CM1_ABE_ABE_CDOFFS, .dep_bit = OMAP4430_ABE_STATDEP_SHIFT, .flags = CLKDM_CAN_HWSUP_SWSUP, }; static struct clockdomain l3_instr_44xx_clkdm = { .name = "l3_instr_clkdm", .pwrdm = { .name = "core_pwrdm" }, .prcm_partition = OMAP4430_CM2_PARTITION, .cm_inst = OMAP4430_CM2_CORE_INST, .clkdm_offs = OMAP4430_CM2_CORE_L3INSTR_CDOFFS, }; static struct clockdomain l3_init_44xx_clkdm = { .name = "l3_init_clkdm", .pwrdm = { .name = "l3init_pwrdm" }, .prcm_partition = OMAP4430_CM2_PARTITION, .cm_inst = OMAP4430_CM2_L3INIT_INST, .clkdm_offs = OMAP4430_CM2_L3INIT_L3INIT_CDOFFS, .dep_bit = OMAP4430_L3INIT_STATDEP_SHIFT, .wkdep_srcs = l3_init_wkup_sleep_deps, .sleepdep_srcs = l3_init_wkup_sleep_deps, .flags = CLKDM_CAN_HWSUP_SWSUP, }; static struct clockdomain d2d_44xx_clkdm = { .name = "d2d_clkdm", .pwrdm = { .name = "core_pwrdm" }, .prcm_partition = OMAP4430_CM2_PARTITION, .cm_inst = OMAP4430_CM2_CORE_INST, .clkdm_offs = OMAP4430_CM2_CORE_D2D_CDOFFS, .wkdep_srcs = d2d_wkup_sleep_deps, .sleepdep_srcs = d2d_wkup_sleep_deps, .flags = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP, }; static struct clockdomain mpu0_44xx_clkdm = { .name = "mpu0_clkdm", .pwrdm = { .name = "cpu0_pwrdm" }, .prcm_partition = OMAP4430_PRCM_MPU_PARTITION, .cm_inst = OMAP4430_PRCM_MPU_CPU0_INST, .clkdm_offs = OMAP4430_PRCM_MPU_CPU0_CPU0_CDOFFS, .flags = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP, }; static struct clockdomain mpu1_44xx_clkdm = { .name = "mpu1_clkdm", .pwrdm = { .name = "cpu1_pwrdm" }, .prcm_partition = OMAP4430_PRCM_MPU_PARTITION, .cm_inst = OMAP4430_PRCM_MPU_CPU1_INST, .clkdm_offs = OMAP4430_PRCM_MPU_CPU1_CPU1_CDOFFS, .flags = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP, }; static struct clockdomain l3_emif_44xx_clkdm = { .name = "l3_emif_clkdm", .pwrdm = { .name = "core_pwrdm" }, .prcm_partition = OMAP4430_CM2_PARTITION, .cm_inst = OMAP4430_CM2_CORE_INST, .clkdm_offs = OMAP4430_CM2_CORE_MEMIF_CDOFFS, .dep_bit = OMAP4430_MEMIF_STATDEP_SHIFT, .flags = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP, }; static struct clockdomain l4_ao_44xx_clkdm = { .name = "l4_ao_clkdm", .pwrdm = { .name = "always_on_core_pwrdm" }, .prcm_partition = OMAP4430_CM2_PARTITION, .cm_inst = OMAP4430_CM2_ALWAYS_ON_INST, .clkdm_offs = OMAP4430_CM2_ALWAYS_ON_ALWON_CDOFFS, .flags = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP, }; static struct clockdomain ducati_44xx_clkdm = { .name = "ducati_clkdm", .pwrdm = { .name = "core_pwrdm" }, .prcm_partition = OMAP4430_CM2_PARTITION, .cm_inst = OMAP4430_CM2_CORE_INST, .clkdm_offs = OMAP4430_CM2_CORE_DUCATI_CDOFFS, .dep_bit = OMAP4430_DUCATI_STATDEP_SHIFT, .wkdep_srcs = ducati_wkup_sleep_deps, .sleepdep_srcs = ducati_wkup_sleep_deps, .flags = CLKDM_CAN_HWSUP_SWSUP, }; static struct clockdomain mpu_44xx_clkdm = { .name = "mpuss_clkdm", .pwrdm = { .name = "mpu_pwrdm" }, .prcm_partition = OMAP4430_CM1_PARTITION, .cm_inst = OMAP4430_CM1_MPU_INST, .clkdm_offs = OMAP4430_CM1_MPU_MPU_CDOFFS, .wkdep_srcs = mpu_wkup_sleep_deps, .sleepdep_srcs = mpu_wkup_sleep_deps, .flags = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP, }; static struct clockdomain l3_2_44xx_clkdm = { .name = "l3_2_clkdm", .pwrdm = { .name = "core_pwrdm" }, .prcm_partition = OMAP4430_CM2_PARTITION, .cm_inst = OMAP4430_CM2_CORE_INST, .clkdm_offs = OMAP4430_CM2_CORE_L3_2_CDOFFS, .dep_bit = OMAP4430_L3_2_STATDEP_SHIFT, .flags = CLKDM_CAN_HWSUP, }; static struct clockdomain l3_1_44xx_clkdm = { .name = "l3_1_clkdm", .pwrdm = { .name = "core_pwrdm" }, .prcm_partition = OMAP4430_CM2_PARTITION, .cm_inst = OMAP4430_CM2_CORE_INST, .clkdm_offs = OMAP4430_CM2_CORE_L3_1_CDOFFS, .dep_bit = OMAP4430_L3_1_STATDEP_SHIFT, .flags = CLKDM_CAN_HWSUP, }; static struct clockdomain iss_44xx_clkdm = { .name = "iss_clkdm", .pwrdm = { .name = "cam_pwrdm" }, .prcm_partition = OMAP4430_CM2_PARTITION, .cm_inst = OMAP4430_CM2_CAM_INST, .clkdm_offs = OMAP4430_CM2_CAM_CAM_CDOFFS, .wkdep_srcs = iss_wkup_sleep_deps, .sleepdep_srcs = iss_wkup_sleep_deps, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain l3_dss_44xx_clkdm = { .name = "l3_dss_clkdm", .pwrdm = { .name = "dss_pwrdm" }, .prcm_partition = OMAP4430_CM2_PARTITION, .cm_inst = OMAP4430_CM2_DSS_INST, .clkdm_offs = OMAP4430_CM2_DSS_DSS_CDOFFS, .dep_bit = OMAP4430_DSS_STATDEP_SHIFT, .wkdep_srcs = l3_dss_wkup_sleep_deps, .sleepdep_srcs = l3_dss_wkup_sleep_deps, .flags = CLKDM_CAN_HWSUP_SWSUP, }; static struct clockdomain l4_wkup_44xx_clkdm = { .name = "l4_wkup_clkdm", .pwrdm = { .name = "wkup_pwrdm" }, .prcm_partition = OMAP4430_PRM_PARTITION, .cm_inst = OMAP4430_PRM_WKUP_CM_INST, .clkdm_offs = OMAP4430_PRM_WKUP_CM_WKUP_CDOFFS, .dep_bit = OMAP4430_L4WKUP_STATDEP_SHIFT, .flags = CLKDM_CAN_HWSUP | CLKDM_ACTIVE_WITH_MPU, }; static struct clockdomain emu_sys_44xx_clkdm = { .name = "emu_sys_clkdm", .pwrdm = { .name = "emu_pwrdm" }, .prcm_partition = OMAP4430_PRM_PARTITION, .cm_inst = OMAP4430_PRM_EMU_CM_INST, .clkdm_offs = OMAP4430_PRM_EMU_CM_EMU_CDOFFS, .flags = (CLKDM_CAN_ENABLE_AUTO | CLKDM_CAN_FORCE_WAKEUP | CLKDM_MISSING_IDLE_REPORTING), }; static struct clockdomain l3_dma_44xx_clkdm = { .name = "l3_dma_clkdm", .pwrdm = { .name = "core_pwrdm" }, .prcm_partition = OMAP4430_CM2_PARTITION, .cm_inst = OMAP4430_CM2_CORE_INST, .clkdm_offs = OMAP4430_CM2_CORE_SDMA_CDOFFS, .wkdep_srcs = l3_dma_wkup_sleep_deps, .sleepdep_srcs = l3_dma_wkup_sleep_deps, .flags = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP, }; /* As clockdomains are added or removed above, this list must also be changed */ static struct clockdomain *clockdomains_omap44xx[] __initdata = { &l4_cefuse_44xx_clkdm, &l4_cfg_44xx_clkdm, &tesla_44xx_clkdm, &l3_gfx_44xx_clkdm, &ivahd_44xx_clkdm, &l4_secure_44xx_clkdm, &l4_per_44xx_clkdm, &abe_44xx_clkdm, &l3_instr_44xx_clkdm, &l3_init_44xx_clkdm, &d2d_44xx_clkdm, &mpu0_44xx_clkdm, &mpu1_44xx_clkdm, &l3_emif_44xx_clkdm, &l4_ao_44xx_clkdm, &ducati_44xx_clkdm, &mpu_44xx_clkdm, &l3_2_44xx_clkdm, &l3_1_44xx_clkdm, &iss_44xx_clkdm, &l3_dss_44xx_clkdm, &l4_wkup_44xx_clkdm, &emu_sys_44xx_clkdm, &l3_dma_44xx_clkdm, NULL }; void __init omap44xx_clockdomains_init(void) { clkdm_register_platform_funcs(&omap4_clkdm_operations); clkdm_register_clkdms(clockdomains_omap44xx); clkdm_complete_init(); }
linux-master
arch/arm/mach-omap2/clockdomains44xx_data.c
// SPDX-License-Identifier: GPL-2.0-only /* * OMAP2+ common Clock Management (CM) IP block functions * * Copyright (C) 2012 Texas Instruments, Inc. * Paul Walmsley * * XXX This code should eventually be moved to a CM driver. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/bug.h> #include <linux/of.h> #include <linux/of_address.h> #include "cm2xxx.h" #include "cm3xxx.h" #include "cm33xx.h" #include "cm44xx.h" #include "clock.h" /* * cm_ll_data: function pointers to SoC-specific implementations of * common CM functions */ static struct cm_ll_data null_cm_ll_data; static const struct cm_ll_data *cm_ll_data = &null_cm_ll_data; /* cm_base: base virtual address of the CM IP block */ struct omap_domain_base cm_base; /* cm2_base: base virtual address of the CM2 IP block (OMAP44xx only) */ struct omap_domain_base cm2_base; #define CM_NO_CLOCKS 0x1 #define CM_SINGLE_INSTANCE 0x2 /** * cm_split_idlest_reg - split CM_IDLEST reg addr into its components * @idlest_reg: CM_IDLEST* virtual address * @prcm_inst: pointer to an s16 to return the PRCM instance offset * @idlest_reg_id: pointer to a u8 to return the CM_IDLESTx register ID * * Given an absolute CM_IDLEST register address @idlest_reg, passes * the PRCM instance offset and IDLEST register ID back to the caller * via the @prcm_inst and @idlest_reg_id. Returns -EINVAL upon error, * or 0 upon success. XXX This function is only needed until absolute * register addresses are removed from the OMAP struct clk records. */ int cm_split_idlest_reg(struct clk_omap_reg *idlest_reg, s16 *prcm_inst, u8 *idlest_reg_id) { int ret; if (!cm_ll_data->split_idlest_reg) { WARN_ONCE(1, "cm: %s: no low-level function defined\n", __func__); return -EINVAL; } ret = cm_ll_data->split_idlest_reg(idlest_reg, prcm_inst, idlest_reg_id); *prcm_inst -= cm_base.offset; return ret; } /** * omap_cm_wait_module_ready - wait for a module to leave idle or standby * @part: PRCM partition * @prcm_mod: PRCM module offset * @idlest_reg: CM_IDLESTx register * @idlest_shift: shift of the bit in the CM_IDLEST* register to check * * Wait for the PRCM to indicate that the module identified by * (@prcm_mod, @idlest_id, @idlest_shift) is clocked. Return 0 upon * success, -EBUSY if the module doesn't enable in time, or -EINVAL if * no per-SoC wait_module_ready() function pointer has been registered * or if the idlest register is unknown on the SoC. */ int omap_cm_wait_module_ready(u8 part, s16 prcm_mod, u16 idlest_reg, u8 idlest_shift) { if (!cm_ll_data->wait_module_ready) { WARN_ONCE(1, "cm: %s: no low-level function defined\n", __func__); return -EINVAL; } return cm_ll_data->wait_module_ready(part, prcm_mod, idlest_reg, idlest_shift); } /** * omap_cm_wait_module_idle - wait for a module to enter idle or standby * @part: PRCM partition * @prcm_mod: PRCM module offset * @idlest_reg: CM_IDLESTx register * @idlest_shift: shift of the bit in the CM_IDLEST* register to check * * Wait for the PRCM to indicate that the module identified by * (@prcm_mod, @idlest_id, @idlest_shift) is no longer clocked. Return * 0 upon success, -EBUSY if the module doesn't enable in time, or * -EINVAL if no per-SoC wait_module_idle() function pointer has been * registered or if the idlest register is unknown on the SoC. */ int omap_cm_wait_module_idle(u8 part, s16 prcm_mod, u16 idlest_reg, u8 idlest_shift) { if (!cm_ll_data->wait_module_idle) { WARN_ONCE(1, "cm: %s: no low-level function defined\n", __func__); return -EINVAL; } return cm_ll_data->wait_module_idle(part, prcm_mod, idlest_reg, idlest_shift); } /** * omap_cm_module_enable - enable a module * @mode: target mode for the module * @part: PRCM partition * @inst: PRCM instance * @clkctrl_offs: CM_CLKCTRL register offset for the module * * Enables clocks for a module identified by (@part, @inst, @clkctrl_offs) * making its IO space accessible. Return 0 upon success, -EINVAL if no * per-SoC module_enable() function pointer has been registered. */ int omap_cm_module_enable(u8 mode, u8 part, u16 inst, u16 clkctrl_offs) { if (!cm_ll_data->module_enable) { WARN_ONCE(1, "cm: %s: no low-level function defined\n", __func__); return -EINVAL; } cm_ll_data->module_enable(mode, part, inst, clkctrl_offs); return 0; } /** * omap_cm_module_disable - disable a module * @part: PRCM partition * @inst: PRCM instance * @clkctrl_offs: CM_CLKCTRL register offset for the module * * Disables clocks for a module identified by (@part, @inst, @clkctrl_offs) * makings its IO space inaccessible. Return 0 upon success, -EINVAL if * no per-SoC module_disable() function pointer has been registered. */ int omap_cm_module_disable(u8 part, u16 inst, u16 clkctrl_offs) { if (!cm_ll_data->module_disable) { WARN_ONCE(1, "cm: %s: no low-level function defined\n", __func__); return -EINVAL; } cm_ll_data->module_disable(part, inst, clkctrl_offs); return 0; } u32 omap_cm_xlate_clkctrl(u8 part, u16 inst, u16 clkctrl_offs) { if (!cm_ll_data->xlate_clkctrl) { WARN_ONCE(1, "cm: %s: no low-level function defined\n", __func__); return 0; } return cm_ll_data->xlate_clkctrl(part, inst, clkctrl_offs); } /** * cm_register - register per-SoC low-level data with the CM * @cld: low-level per-SoC OMAP CM data & function pointers to register * * Register per-SoC low-level OMAP CM data and function pointers with * the OMAP CM common interface. The caller must keep the data * pointed to by @cld valid until it calls cm_unregister() and * it returns successfully. Returns 0 upon success, -EINVAL if @cld * is NULL, or -EEXIST if cm_register() has already been called * without an intervening cm_unregister(). */ int cm_register(const struct cm_ll_data *cld) { if (!cld) return -EINVAL; if (cm_ll_data != &null_cm_ll_data) return -EEXIST; cm_ll_data = cld; return 0; } /** * cm_unregister - unregister per-SoC low-level data & function pointers * @cld: low-level per-SoC OMAP CM data & function pointers to unregister * * Unregister per-SoC low-level OMAP CM data and function pointers * that were previously registered with cm_register(). The * caller may not destroy any of the data pointed to by @cld until * this function returns successfully. Returns 0 upon success, or * -EINVAL if @cld is NULL or if @cld does not match the struct * cm_ll_data * previously registered by cm_register(). */ int cm_unregister(const struct cm_ll_data *cld) { if (!cld || cm_ll_data != cld) return -EINVAL; cm_ll_data = &null_cm_ll_data; return 0; } #if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \ defined(CONFIG_SOC_DRA7XX) static struct omap_prcm_init_data cm_data __initdata = { .index = TI_CLKM_CM, .init = omap4_cm_init, }; static struct omap_prcm_init_data cm2_data __initdata = { .index = TI_CLKM_CM2, .init = omap4_cm_init, }; #endif #ifdef CONFIG_ARCH_OMAP2 static struct omap_prcm_init_data omap2_prcm_data __initdata = { .index = TI_CLKM_CM, .init = omap2xxx_cm_init, .flags = CM_NO_CLOCKS | CM_SINGLE_INSTANCE, }; #endif #ifdef CONFIG_ARCH_OMAP3 static struct omap_prcm_init_data omap3_cm_data __initdata = { .index = TI_CLKM_CM, .init = omap3xxx_cm_init, .flags = CM_SINGLE_INSTANCE, /* * IVA2 offset is a negative value, must offset the cm_base address * by this to get it to positive side on the iomap */ .offset = -OMAP3430_IVA2_MOD, }; #endif #if defined(CONFIG_SOC_AM33XX) || defined(CONFIG_SOC_TI81XX) static struct omap_prcm_init_data am3_prcm_data __initdata = { .index = TI_CLKM_CM, .flags = CM_NO_CLOCKS | CM_SINGLE_INSTANCE, .init = am33xx_cm_init, }; #endif #ifdef CONFIG_SOC_AM43XX static struct omap_prcm_init_data am4_prcm_data __initdata = { .index = TI_CLKM_CM, .flags = CM_NO_CLOCKS | CM_SINGLE_INSTANCE, .init = omap4_cm_init, }; #endif static const struct of_device_id omap_cm_dt_match_table[] __initconst = { #ifdef CONFIG_ARCH_OMAP2 { .compatible = "ti,omap2-prcm", .data = &omap2_prcm_data }, #endif #ifdef CONFIG_ARCH_OMAP3 { .compatible = "ti,omap3-cm", .data = &omap3_cm_data }, #endif #ifdef CONFIG_ARCH_OMAP4 { .compatible = "ti,omap4-cm1", .data = &cm_data }, { .compatible = "ti,omap4-cm2", .data = &cm2_data }, #endif #ifdef CONFIG_SOC_OMAP5 { .compatible = "ti,omap5-cm-core-aon", .data = &cm_data }, { .compatible = "ti,omap5-cm-core", .data = &cm2_data }, #endif #ifdef CONFIG_SOC_DRA7XX { .compatible = "ti,dra7-cm-core-aon", .data = &cm_data }, { .compatible = "ti,dra7-cm-core", .data = &cm2_data }, #endif #ifdef CONFIG_SOC_AM33XX { .compatible = "ti,am3-prcm", .data = &am3_prcm_data }, #endif #ifdef CONFIG_SOC_AM43XX { .compatible = "ti,am4-prcm", .data = &am4_prcm_data }, #endif #ifdef CONFIG_SOC_TI81XX { .compatible = "ti,dm814-prcm", .data = &am3_prcm_data }, { .compatible = "ti,dm816-prcm", .data = &am3_prcm_data }, #endif { } }; /** * omap2_cm_base_init - initialize iomappings for the CM drivers * * Detects and initializes the iomappings for the CM driver, based * on the DT data. Returns 0 in success, negative error value * otherwise. */ int __init omap2_cm_base_init(void) { struct device_node *np; const struct of_device_id *match; struct omap_prcm_init_data *data; struct resource res; int ret; struct omap_domain_base *mem = NULL; for_each_matching_node_and_match(np, omap_cm_dt_match_table, &match) { data = (struct omap_prcm_init_data *)match->data; ret = of_address_to_resource(np, 0, &res); if (ret) { of_node_put(np); return ret; } if (data->index == TI_CLKM_CM) mem = &cm_base; if (data->index == TI_CLKM_CM2) mem = &cm2_base; data->mem = ioremap(res.start, resource_size(&res)); if (mem) { mem->pa = res.start + data->offset; mem->va = data->mem + data->offset; mem->offset = data->offset; } data->np = np; if (data->init && (data->flags & CM_SINGLE_INSTANCE || (cm_base.va && cm2_base.va))) data->init(data); } return 0; } /** * omap_cm_init - low level init for the CM drivers * * Initializes the low level clock infrastructure for CM drivers. * Returns 0 in success, negative error value in failure. */ int __init omap_cm_init(void) { struct device_node *np; const struct of_device_id *match; const struct omap_prcm_init_data *data; int ret; for_each_matching_node_and_match(np, omap_cm_dt_match_table, &match) { data = match->data; if (data->flags & CM_NO_CLOCKS) continue; ret = omap2_clk_provider_init(np, data->index, NULL, data->mem); if (ret) { of_node_put(np); return ret; } } return 0; }
linux-master
arch/arm/mach-omap2/cm_common.c
// SPDX-License-Identifier: GPL-2.0-only /* * OMAP powerdomain control * * Copyright (C) 2007-2008, 2011 Texas Instruments, Inc. * Copyright (C) 2007-2011 Nokia Corporation * * Written by Paul Walmsley * Added OMAP4 specific support by Abhijit Pagare <[email protected]> * State counting code by Tero Kristo <[email protected]> */ #undef DEBUG #include <linux/cpu_pm.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/list.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/spinlock.h> #include <trace/events/power.h> #include "cm2xxx_3xxx.h" #include "prcm44xx.h" #include "cm44xx.h" #include "prm2xxx_3xxx.h" #include "prm44xx.h" #include <asm/cpu.h> #include "powerdomain.h" #include "clockdomain.h" #include "voltage.h" #include "soc.h" #include "pm.h" #define PWRDM_TRACE_STATES_FLAG (1<<31) static void pwrdms_save_context(void); static void pwrdms_restore_context(void); enum { PWRDM_STATE_NOW = 0, PWRDM_STATE_PREV, }; /* * Types of sleep_switch used internally in omap_set_pwrdm_state() * and its associated static functions * * XXX Better documentation is needed here */ #define ALREADYACTIVE_SWITCH 0 #define FORCEWAKEUP_SWITCH 1 #define LOWPOWERSTATE_SWITCH 2 /* pwrdm_list contains all registered struct powerdomains */ static LIST_HEAD(pwrdm_list); static struct pwrdm_ops *arch_pwrdm; /* Private functions */ static struct powerdomain *_pwrdm_lookup(const char *name) { struct powerdomain *pwrdm, *temp_pwrdm; pwrdm = NULL; list_for_each_entry(temp_pwrdm, &pwrdm_list, node) { if (!strcmp(name, temp_pwrdm->name)) { pwrdm = temp_pwrdm; break; } } return pwrdm; } /** * _pwrdm_register - register a powerdomain * @pwrdm: struct powerdomain * to register * * Adds a powerdomain to the internal powerdomain list. Returns * -EINVAL if given a null pointer, -EEXIST if a powerdomain is * already registered by the provided name, or 0 upon success. */ static int _pwrdm_register(struct powerdomain *pwrdm) { int i; struct voltagedomain *voltdm; if (!pwrdm || !pwrdm->name) return -EINVAL; if (cpu_is_omap44xx() && pwrdm->prcm_partition == OMAP4430_INVALID_PRCM_PARTITION) { pr_err("powerdomain: %s: missing OMAP4 PRCM partition ID\n", pwrdm->name); return -EINVAL; } if (_pwrdm_lookup(pwrdm->name)) return -EEXIST; if (arch_pwrdm && arch_pwrdm->pwrdm_has_voltdm) if (!arch_pwrdm->pwrdm_has_voltdm()) goto skip_voltdm; voltdm = voltdm_lookup(pwrdm->voltdm.name); if (!voltdm) { pr_err("powerdomain: %s: voltagedomain %s does not exist\n", pwrdm->name, pwrdm->voltdm.name); return -EINVAL; } pwrdm->voltdm.ptr = voltdm; INIT_LIST_HEAD(&pwrdm->voltdm_node); skip_voltdm: spin_lock_init(&pwrdm->_lock); list_add(&pwrdm->node, &pwrdm_list); /* Initialize the powerdomain's state counter */ for (i = 0; i < PWRDM_MAX_PWRSTS; i++) pwrdm->state_counter[i] = 0; pwrdm->ret_logic_off_counter = 0; for (i = 0; i < pwrdm->banks; i++) pwrdm->ret_mem_off_counter[i] = 0; if (arch_pwrdm && arch_pwrdm->pwrdm_wait_transition) arch_pwrdm->pwrdm_wait_transition(pwrdm); pwrdm->state = pwrdm_read_pwrst(pwrdm); pwrdm->state_counter[pwrdm->state] = 1; pr_debug("powerdomain: registered %s\n", pwrdm->name); return 0; } static void _update_logic_membank_counters(struct powerdomain *pwrdm) { int i; u8 prev_logic_pwrst, prev_mem_pwrst; prev_logic_pwrst = pwrdm_read_prev_logic_pwrst(pwrdm); if ((pwrdm->pwrsts_logic_ret == PWRSTS_OFF_RET) && (prev_logic_pwrst == PWRDM_POWER_OFF)) pwrdm->ret_logic_off_counter++; for (i = 0; i < pwrdm->banks; i++) { prev_mem_pwrst = pwrdm_read_prev_mem_pwrst(pwrdm, i); if ((pwrdm->pwrsts_mem_ret[i] == PWRSTS_OFF_RET) && (prev_mem_pwrst == PWRDM_POWER_OFF)) pwrdm->ret_mem_off_counter[i]++; } } static int _pwrdm_state_switch(struct powerdomain *pwrdm, int flag) { int prev, next, state, trace_state = 0; if (pwrdm == NULL) return -EINVAL; state = pwrdm_read_pwrst(pwrdm); switch (flag) { case PWRDM_STATE_NOW: prev = pwrdm->state; break; case PWRDM_STATE_PREV: prev = pwrdm_read_prev_pwrst(pwrdm); if (prev >= 0 && pwrdm->state != prev) pwrdm->state_counter[prev]++; if (prev == PWRDM_POWER_RET) _update_logic_membank_counters(pwrdm); /* * If the power domain did not hit the desired state, * generate a trace event with both the desired and hit states */ next = pwrdm_read_next_pwrst(pwrdm); if (next != prev) { trace_state = (PWRDM_TRACE_STATES_FLAG | ((next & OMAP_POWERSTATE_MASK) << 8) | ((prev & OMAP_POWERSTATE_MASK) << 0)); trace_power_domain_target(pwrdm->name, trace_state, raw_smp_processor_id()); } break; default: return -EINVAL; } if (state != prev) pwrdm->state_counter[state]++; pm_dbg_update_time(pwrdm, prev); pwrdm->state = state; return 0; } static int _pwrdm_pre_transition_cb(struct powerdomain *pwrdm, void *unused) { pwrdm_clear_all_prev_pwrst(pwrdm); _pwrdm_state_switch(pwrdm, PWRDM_STATE_NOW); return 0; } static int _pwrdm_post_transition_cb(struct powerdomain *pwrdm, void *unused) { _pwrdm_state_switch(pwrdm, PWRDM_STATE_PREV); return 0; } /** * _pwrdm_save_clkdm_state_and_activate - prepare for power state change * @pwrdm: struct powerdomain * to operate on * @curr_pwrst: current power state of @pwrdm * @pwrst: power state to switch to * * Determine whether the powerdomain needs to be turned on before * attempting to switch power states. Called by * omap_set_pwrdm_state(). NOTE that if the powerdomain contains * multiple clockdomains, this code assumes that the first clockdomain * supports software-supervised wakeup mode - potentially a problem. * Returns the power state switch mode currently in use (see the * "Types of sleep_switch" comment above). */ static u8 _pwrdm_save_clkdm_state_and_activate(struct powerdomain *pwrdm, u8 curr_pwrst, u8 pwrst) { u8 sleep_switch; if (curr_pwrst < PWRDM_POWER_ON) { if (curr_pwrst > pwrst && pwrdm->flags & PWRDM_HAS_LOWPOWERSTATECHANGE && arch_pwrdm->pwrdm_set_lowpwrstchange) { sleep_switch = LOWPOWERSTATE_SWITCH; } else { clkdm_deny_idle_nolock(pwrdm->pwrdm_clkdms[0]); sleep_switch = FORCEWAKEUP_SWITCH; } } else { sleep_switch = ALREADYACTIVE_SWITCH; } return sleep_switch; } /** * _pwrdm_restore_clkdm_state - restore the clkdm hwsup state after pwrst change * @pwrdm: struct powerdomain * to operate on * @sleep_switch: return value from _pwrdm_save_clkdm_state_and_activate() * * Restore the clockdomain state perturbed by * _pwrdm_save_clkdm_state_and_activate(), and call the power state * bookkeeping code. Called by omap_set_pwrdm_state(). NOTE that if * the powerdomain contains multiple clockdomains, this assumes that * the first associated clockdomain supports either * hardware-supervised idle control in the register, or * software-supervised sleep. No return value. */ static void _pwrdm_restore_clkdm_state(struct powerdomain *pwrdm, u8 sleep_switch) { switch (sleep_switch) { case FORCEWAKEUP_SWITCH: clkdm_allow_idle_nolock(pwrdm->pwrdm_clkdms[0]); break; case LOWPOWERSTATE_SWITCH: if (pwrdm->flags & PWRDM_HAS_LOWPOWERSTATECHANGE && arch_pwrdm->pwrdm_set_lowpwrstchange) arch_pwrdm->pwrdm_set_lowpwrstchange(pwrdm); pwrdm_state_switch_nolock(pwrdm); break; } } /* Public functions */ /** * pwrdm_register_platform_funcs - register powerdomain implementation fns * @po: func pointers for arch specific implementations * * Register the list of function pointers used to implement the * powerdomain functions on different OMAP SoCs. Should be called * before any other pwrdm_register*() function. Returns -EINVAL if * @po is null, -EEXIST if platform functions have already been * registered, or 0 upon success. */ int pwrdm_register_platform_funcs(struct pwrdm_ops *po) { if (!po) return -EINVAL; if (arch_pwrdm) return -EEXIST; arch_pwrdm = po; return 0; } /** * pwrdm_register_pwrdms - register SoC powerdomains * @ps: pointer to an array of struct powerdomain to register * * Register the powerdomains available on a particular OMAP SoC. Must * be called after pwrdm_register_platform_funcs(). May be called * multiple times. Returns -EACCES if called before * pwrdm_register_platform_funcs(); -EINVAL if the argument @ps is * null; or 0 upon success. */ int pwrdm_register_pwrdms(struct powerdomain **ps) { struct powerdomain **p = NULL; if (!arch_pwrdm) return -EEXIST; if (!ps) return -EINVAL; for (p = ps; *p; p++) _pwrdm_register(*p); return 0; } static int cpu_notifier(struct notifier_block *nb, unsigned long cmd, void *v) { switch (cmd) { case CPU_CLUSTER_PM_ENTER: if (enable_off_mode) pwrdms_save_context(); break; case CPU_CLUSTER_PM_EXIT: if (enable_off_mode) pwrdms_restore_context(); break; } return NOTIFY_OK; } /** * pwrdm_complete_init - set up the powerdomain layer * * Do whatever is necessary to initialize registered powerdomains and * powerdomain code. Currently, this programs the next power state * for each powerdomain to ON. This prevents powerdomains from * unexpectedly losing context or entering high wakeup latency modes * with non-power-management-enabled kernels. Must be called after * pwrdm_register_pwrdms(). Returns -EACCES if called before * pwrdm_register_pwrdms(), or 0 upon success. */ int pwrdm_complete_init(void) { struct powerdomain *temp_p; static struct notifier_block nb; if (list_empty(&pwrdm_list)) return -EACCES; list_for_each_entry(temp_p, &pwrdm_list, node) pwrdm_set_next_pwrst(temp_p, PWRDM_POWER_ON); /* Only AM43XX can lose pwrdm context during rtc-ddr suspend */ if (soc_is_am43xx()) { nb.notifier_call = cpu_notifier; cpu_pm_register_notifier(&nb); } return 0; } /** * pwrdm_lock - acquire a Linux spinlock on a powerdomain * @pwrdm: struct powerdomain * to lock * * Acquire the powerdomain spinlock on @pwrdm. No return value. */ void pwrdm_lock(struct powerdomain *pwrdm) __acquires(&pwrdm->_lock) { spin_lock_irqsave(&pwrdm->_lock, pwrdm->_lock_flags); } /** * pwrdm_unlock - release a Linux spinlock on a powerdomain * @pwrdm: struct powerdomain * to unlock * * Release the powerdomain spinlock on @pwrdm. No return value. */ void pwrdm_unlock(struct powerdomain *pwrdm) __releases(&pwrdm->_lock) { spin_unlock_irqrestore(&pwrdm->_lock, pwrdm->_lock_flags); } /** * pwrdm_lookup - look up a powerdomain by name, return a pointer * @name: name of powerdomain * * Find a registered powerdomain by its name @name. Returns a pointer * to the struct powerdomain if found, or NULL otherwise. */ struct powerdomain *pwrdm_lookup(const char *name) { struct powerdomain *pwrdm; if (!name) return NULL; pwrdm = _pwrdm_lookup(name); return pwrdm; } /** * pwrdm_for_each - call function on each registered clockdomain * @fn: callback function * * * Call the supplied function @fn for each registered powerdomain. * The callback function @fn can return anything but 0 to bail out * early from the iterator. Returns the last return value of the * callback function, which should be 0 for success or anything else * to indicate failure; or -EINVAL if the function pointer is null. */ int pwrdm_for_each(int (*fn)(struct powerdomain *pwrdm, void *user), void *user) { struct powerdomain *temp_pwrdm; int ret = 0; if (!fn) return -EINVAL; list_for_each_entry(temp_pwrdm, &pwrdm_list, node) { ret = (*fn)(temp_pwrdm, user); if (ret) break; } return ret; } /** * pwrdm_add_clkdm - add a clockdomain to a powerdomain * @pwrdm: struct powerdomain * to add the clockdomain to * @clkdm: struct clockdomain * to associate with a powerdomain * * Associate the clockdomain @clkdm with a powerdomain @pwrdm. This * enables the use of pwrdm_for_each_clkdm(). Returns -EINVAL if * presented with invalid pointers; -ENOMEM if memory could not be allocated; * or 0 upon success. */ int pwrdm_add_clkdm(struct powerdomain *pwrdm, struct clockdomain *clkdm) { int i; int ret = -EINVAL; if (!pwrdm || !clkdm) return -EINVAL; pr_debug("powerdomain: %s: associating clockdomain %s\n", pwrdm->name, clkdm->name); for (i = 0; i < PWRDM_MAX_CLKDMS; i++) { if (!pwrdm->pwrdm_clkdms[i]) break; #ifdef DEBUG if (pwrdm->pwrdm_clkdms[i] == clkdm) { ret = -EINVAL; goto pac_exit; } #endif } if (i == PWRDM_MAX_CLKDMS) { pr_debug("powerdomain: %s: increase PWRDM_MAX_CLKDMS for clkdm %s\n", pwrdm->name, clkdm->name); WARN_ON(1); ret = -ENOMEM; goto pac_exit; } pwrdm->pwrdm_clkdms[i] = clkdm; ret = 0; pac_exit: return ret; } /** * pwrdm_get_mem_bank_count - get number of memory banks in this powerdomain * @pwrdm: struct powerdomain * * * Return the number of controllable memory banks in powerdomain @pwrdm, * starting with 1. Returns -EINVAL if the powerdomain pointer is null. */ int pwrdm_get_mem_bank_count(struct powerdomain *pwrdm) { if (!pwrdm) return -EINVAL; return pwrdm->banks; } /** * pwrdm_set_next_pwrst - set next powerdomain power state * @pwrdm: struct powerdomain * to set * @pwrst: one of the PWRDM_POWER_* macros * * Set the powerdomain @pwrdm's next power state to @pwrst. The powerdomain * may not enter this state immediately if the preconditions for this state * have not been satisfied. Returns -EINVAL if the powerdomain pointer is * null or if the power state is invalid for the powerdomin, or returns 0 * upon success. */ int pwrdm_set_next_pwrst(struct powerdomain *pwrdm, u8 pwrst) { int ret = -EINVAL; if (!pwrdm) return -EINVAL; if (!(pwrdm->pwrsts & (1 << pwrst))) return -EINVAL; pr_debug("powerdomain: %s: setting next powerstate to %0x\n", pwrdm->name, pwrst); if (arch_pwrdm && arch_pwrdm->pwrdm_set_next_pwrst) { /* Trace the pwrdm desired target state */ trace_power_domain_target(pwrdm->name, pwrst, raw_smp_processor_id()); /* Program the pwrdm desired target state */ ret = arch_pwrdm->pwrdm_set_next_pwrst(pwrdm, pwrst); } return ret; } /** * pwrdm_read_next_pwrst - get next powerdomain power state * @pwrdm: struct powerdomain * to get power state * * Return the powerdomain @pwrdm's next power state. Returns -EINVAL * if the powerdomain pointer is null or returns the next power state * upon success. */ int pwrdm_read_next_pwrst(struct powerdomain *pwrdm) { int ret = -EINVAL; if (!pwrdm) return -EINVAL; if (arch_pwrdm && arch_pwrdm->pwrdm_read_next_pwrst) ret = arch_pwrdm->pwrdm_read_next_pwrst(pwrdm); return ret; } /** * pwrdm_read_pwrst - get current powerdomain power state * @pwrdm: struct powerdomain * to get power state * * Return the powerdomain @pwrdm's current power state. Returns -EINVAL * if the powerdomain pointer is null or returns the current power state * upon success. Note that if the power domain only supports the ON state * then just return ON as the current state. */ int pwrdm_read_pwrst(struct powerdomain *pwrdm) { int ret = -EINVAL; if (!pwrdm) return -EINVAL; if (pwrdm->pwrsts == PWRSTS_ON) return PWRDM_POWER_ON; if (arch_pwrdm && arch_pwrdm->pwrdm_read_pwrst) ret = arch_pwrdm->pwrdm_read_pwrst(pwrdm); return ret; } /** * pwrdm_read_prev_pwrst - get previous powerdomain power state * @pwrdm: struct powerdomain * to get previous power state * * Return the powerdomain @pwrdm's previous power state. Returns -EINVAL * if the powerdomain pointer is null or returns the previous power state * upon success. */ int pwrdm_read_prev_pwrst(struct powerdomain *pwrdm) { int ret = -EINVAL; if (!pwrdm) return -EINVAL; if (arch_pwrdm && arch_pwrdm->pwrdm_read_prev_pwrst) ret = arch_pwrdm->pwrdm_read_prev_pwrst(pwrdm); return ret; } /** * pwrdm_set_logic_retst - set powerdomain logic power state upon retention * @pwrdm: struct powerdomain * to set * @pwrst: one of the PWRDM_POWER_* macros * * Set the next power state @pwrst that the logic portion of the * powerdomain @pwrdm will enter when the powerdomain enters retention. * This will be either RETENTION or OFF, if supported. Returns * -EINVAL if the powerdomain pointer is null or the target power * state is not supported, or returns 0 upon success. */ int pwrdm_set_logic_retst(struct powerdomain *pwrdm, u8 pwrst) { int ret = -EINVAL; if (!pwrdm) return -EINVAL; if (!(pwrdm->pwrsts_logic_ret & (1 << pwrst))) return -EINVAL; pr_debug("powerdomain: %s: setting next logic powerstate to %0x\n", pwrdm->name, pwrst); if (arch_pwrdm && arch_pwrdm->pwrdm_set_logic_retst) ret = arch_pwrdm->pwrdm_set_logic_retst(pwrdm, pwrst); return ret; } /** * pwrdm_set_mem_onst - set memory power state while powerdomain ON * @pwrdm: struct powerdomain * to set * @bank: memory bank number to set (0-3) * @pwrst: one of the PWRDM_POWER_* macros * * Set the next power state @pwrst that memory bank @bank of the * powerdomain @pwrdm will enter when the powerdomain enters the ON * state. @bank will be a number from 0 to 3, and represents different * types of memory, depending on the powerdomain. Returns -EINVAL if * the powerdomain pointer is null or the target power state is not * supported for this memory bank, -EEXIST if the target memory * bank does not exist or is not controllable, or returns 0 upon * success. */ int pwrdm_set_mem_onst(struct powerdomain *pwrdm, u8 bank, u8 pwrst) { int ret = -EINVAL; if (!pwrdm) return -EINVAL; if (pwrdm->banks < (bank + 1)) return -EEXIST; if (!(pwrdm->pwrsts_mem_on[bank] & (1 << pwrst))) return -EINVAL; pr_debug("powerdomain: %s: setting next memory powerstate for bank %0x while pwrdm-ON to %0x\n", pwrdm->name, bank, pwrst); if (arch_pwrdm && arch_pwrdm->pwrdm_set_mem_onst) ret = arch_pwrdm->pwrdm_set_mem_onst(pwrdm, bank, pwrst); return ret; } /** * pwrdm_set_mem_retst - set memory power state while powerdomain in RET * @pwrdm: struct powerdomain * to set * @bank: memory bank number to set (0-3) * @pwrst: one of the PWRDM_POWER_* macros * * Set the next power state @pwrst that memory bank @bank of the * powerdomain @pwrdm will enter when the powerdomain enters the * RETENTION state. Bank will be a number from 0 to 3, and represents * different types of memory, depending on the powerdomain. @pwrst * will be either RETENTION or OFF, if supported. Returns -EINVAL if * the powerdomain pointer is null or the target power state is not * supported for this memory bank, -EEXIST if the target memory * bank does not exist or is not controllable, or returns 0 upon * success. */ int pwrdm_set_mem_retst(struct powerdomain *pwrdm, u8 bank, u8 pwrst) { int ret = -EINVAL; if (!pwrdm) return -EINVAL; if (pwrdm->banks < (bank + 1)) return -EEXIST; if (!(pwrdm->pwrsts_mem_ret[bank] & (1 << pwrst))) return -EINVAL; pr_debug("powerdomain: %s: setting next memory powerstate for bank %0x while pwrdm-RET to %0x\n", pwrdm->name, bank, pwrst); if (arch_pwrdm && arch_pwrdm->pwrdm_set_mem_retst) ret = arch_pwrdm->pwrdm_set_mem_retst(pwrdm, bank, pwrst); return ret; } /** * pwrdm_read_logic_pwrst - get current powerdomain logic retention power state * @pwrdm: struct powerdomain * to get current logic retention power state * * Return the power state that the logic portion of powerdomain @pwrdm * will enter when the powerdomain enters retention. Returns -EINVAL * if the powerdomain pointer is null or returns the logic retention * power state upon success. */ int pwrdm_read_logic_pwrst(struct powerdomain *pwrdm) { int ret = -EINVAL; if (!pwrdm) return -EINVAL; if (arch_pwrdm && arch_pwrdm->pwrdm_read_logic_pwrst) ret = arch_pwrdm->pwrdm_read_logic_pwrst(pwrdm); return ret; } /** * pwrdm_read_prev_logic_pwrst - get previous powerdomain logic power state * @pwrdm: struct powerdomain * to get previous logic power state * * Return the powerdomain @pwrdm's previous logic power state. Returns * -EINVAL if the powerdomain pointer is null or returns the previous * logic power state upon success. */ int pwrdm_read_prev_logic_pwrst(struct powerdomain *pwrdm) { int ret = -EINVAL; if (!pwrdm) return -EINVAL; if (arch_pwrdm && arch_pwrdm->pwrdm_read_prev_logic_pwrst) ret = arch_pwrdm->pwrdm_read_prev_logic_pwrst(pwrdm); return ret; } /** * pwrdm_read_logic_retst - get next powerdomain logic power state * @pwrdm: struct powerdomain * to get next logic power state * * Return the powerdomain pwrdm's logic power state. Returns -EINVAL * if the powerdomain pointer is null or returns the next logic * power state upon success. */ int pwrdm_read_logic_retst(struct powerdomain *pwrdm) { int ret = -EINVAL; if (!pwrdm) return -EINVAL; if (arch_pwrdm && arch_pwrdm->pwrdm_read_logic_retst) ret = arch_pwrdm->pwrdm_read_logic_retst(pwrdm); return ret; } /** * pwrdm_read_mem_pwrst - get current memory bank power state * @pwrdm: struct powerdomain * to get current memory bank power state * @bank: memory bank number (0-3) * * Return the powerdomain @pwrdm's current memory power state for bank * @bank. Returns -EINVAL if the powerdomain pointer is null, -EEXIST if * the target memory bank does not exist or is not controllable, or * returns the current memory power state upon success. */ int pwrdm_read_mem_pwrst(struct powerdomain *pwrdm, u8 bank) { int ret = -EINVAL; if (!pwrdm) return ret; if (pwrdm->banks < (bank + 1)) return ret; if (pwrdm->flags & PWRDM_HAS_MPU_QUIRK) bank = 1; if (arch_pwrdm && arch_pwrdm->pwrdm_read_mem_pwrst) ret = arch_pwrdm->pwrdm_read_mem_pwrst(pwrdm, bank); return ret; } /** * pwrdm_read_prev_mem_pwrst - get previous memory bank power state * @pwrdm: struct powerdomain * to get previous memory bank power state * @bank: memory bank number (0-3) * * Return the powerdomain @pwrdm's previous memory power state for * bank @bank. Returns -EINVAL if the powerdomain pointer is null, * -EEXIST if the target memory bank does not exist or is not * controllable, or returns the previous memory power state upon * success. */ int pwrdm_read_prev_mem_pwrst(struct powerdomain *pwrdm, u8 bank) { int ret = -EINVAL; if (!pwrdm) return ret; if (pwrdm->banks < (bank + 1)) return ret; if (pwrdm->flags & PWRDM_HAS_MPU_QUIRK) bank = 1; if (arch_pwrdm && arch_pwrdm->pwrdm_read_prev_mem_pwrst) ret = arch_pwrdm->pwrdm_read_prev_mem_pwrst(pwrdm, bank); return ret; } /** * pwrdm_read_mem_retst - get next memory bank power state * @pwrdm: struct powerdomain * to get mext memory bank power state * @bank: memory bank number (0-3) * * Return the powerdomain pwrdm's next memory power state for bank * x. Returns -EINVAL if the powerdomain pointer is null, -EEXIST if * the target memory bank does not exist or is not controllable, or * returns the next memory power state upon success. */ int pwrdm_read_mem_retst(struct powerdomain *pwrdm, u8 bank) { int ret = -EINVAL; if (!pwrdm) return ret; if (pwrdm->banks < (bank + 1)) return ret; if (arch_pwrdm && arch_pwrdm->pwrdm_read_mem_retst) ret = arch_pwrdm->pwrdm_read_mem_retst(pwrdm, bank); return ret; } /** * pwrdm_clear_all_prev_pwrst - clear previous powerstate register for a pwrdm * @pwrdm: struct powerdomain * to clear * * Clear the powerdomain's previous power state register @pwrdm. * Clears the entire register, including logic and memory bank * previous power states. Returns -EINVAL if the powerdomain pointer * is null, or returns 0 upon success. */ int pwrdm_clear_all_prev_pwrst(struct powerdomain *pwrdm) { int ret = -EINVAL; if (!pwrdm) return ret; /* * XXX should get the powerdomain's current state here; * warn & fail if it is not ON. */ pr_debug("powerdomain: %s: clearing previous power state reg\n", pwrdm->name); if (arch_pwrdm && arch_pwrdm->pwrdm_clear_all_prev_pwrst) ret = arch_pwrdm->pwrdm_clear_all_prev_pwrst(pwrdm); return ret; } /** * pwrdm_enable_hdwr_sar - enable automatic hardware SAR for a pwrdm * @pwrdm: struct powerdomain * * * Enable automatic context save-and-restore upon power state change * for some devices in the powerdomain @pwrdm. Warning: this only * affects a subset of devices in a powerdomain; check the TRM * closely. Returns -EINVAL if the powerdomain pointer is null or if * the powerdomain does not support automatic save-and-restore, or * returns 0 upon success. */ int pwrdm_enable_hdwr_sar(struct powerdomain *pwrdm) { int ret = -EINVAL; if (!pwrdm) return ret; if (!(pwrdm->flags & PWRDM_HAS_HDWR_SAR)) return ret; pr_debug("powerdomain: %s: setting SAVEANDRESTORE bit\n", pwrdm->name); if (arch_pwrdm && arch_pwrdm->pwrdm_enable_hdwr_sar) ret = arch_pwrdm->pwrdm_enable_hdwr_sar(pwrdm); return ret; } /** * pwrdm_disable_hdwr_sar - disable automatic hardware SAR for a pwrdm * @pwrdm: struct powerdomain * * * Disable automatic context save-and-restore upon power state change * for some devices in the powerdomain @pwrdm. Warning: this only * affects a subset of devices in a powerdomain; check the TRM * closely. Returns -EINVAL if the powerdomain pointer is null or if * the powerdomain does not support automatic save-and-restore, or * returns 0 upon success. */ int pwrdm_disable_hdwr_sar(struct powerdomain *pwrdm) { int ret = -EINVAL; if (!pwrdm) return ret; if (!(pwrdm->flags & PWRDM_HAS_HDWR_SAR)) return ret; pr_debug("powerdomain: %s: clearing SAVEANDRESTORE bit\n", pwrdm->name); if (arch_pwrdm && arch_pwrdm->pwrdm_disable_hdwr_sar) ret = arch_pwrdm->pwrdm_disable_hdwr_sar(pwrdm); return ret; } /** * pwrdm_has_hdwr_sar - test whether powerdomain supports hardware SAR * @pwrdm: struct powerdomain * * * Returns 1 if powerdomain @pwrdm supports hardware save-and-restore * for some devices, or 0 if it does not. */ bool pwrdm_has_hdwr_sar(struct powerdomain *pwrdm) { return (pwrdm && pwrdm->flags & PWRDM_HAS_HDWR_SAR) ? 1 : 0; } int pwrdm_state_switch_nolock(struct powerdomain *pwrdm) { int ret; if (!pwrdm || !arch_pwrdm) return -EINVAL; ret = arch_pwrdm->pwrdm_wait_transition(pwrdm); if (!ret) ret = _pwrdm_state_switch(pwrdm, PWRDM_STATE_NOW); return ret; } int __deprecated pwrdm_state_switch(struct powerdomain *pwrdm) { int ret; pwrdm_lock(pwrdm); ret = pwrdm_state_switch_nolock(pwrdm); pwrdm_unlock(pwrdm); return ret; } int pwrdm_pre_transition(struct powerdomain *pwrdm) { if (pwrdm) _pwrdm_pre_transition_cb(pwrdm, NULL); else pwrdm_for_each(_pwrdm_pre_transition_cb, NULL); return 0; } int pwrdm_post_transition(struct powerdomain *pwrdm) { if (pwrdm) _pwrdm_post_transition_cb(pwrdm, NULL); else pwrdm_for_each(_pwrdm_post_transition_cb, NULL); return 0; } /** * pwrdm_get_valid_lp_state() - Find best match deep power state * @pwrdm: power domain for which we want to find best match * @is_logic_state: Are we looking for logic state match here? Should * be one of PWRDM_xxx macro values * @req_state: requested power state * * Returns: closest match for requested power state. default fallback * is RET for logic state and ON for power state. * * This does a search from the power domain data looking for the * closest valid power domain state that the hardware can achieve. * PRCM definitions for PWRSTCTRL allows us to program whatever * configuration we'd like, and PRCM will actually attempt such * a transition, however if the powerdomain does not actually support it, * we endup with a hung system. The valid power domain states are already * available in our powerdomain data files. So this function tries to do * the following: * a) find if we have an exact match to the request - no issues. * b) else find if a deeper power state is possible. * c) failing which, it tries to find closest higher power state for the * request. */ u8 pwrdm_get_valid_lp_state(struct powerdomain *pwrdm, bool is_logic_state, u8 req_state) { u8 pwrdm_states = is_logic_state ? pwrdm->pwrsts_logic_ret : pwrdm->pwrsts; /* For logic, ret is highest and others, ON is highest */ u8 default_pwrst = is_logic_state ? PWRDM_POWER_RET : PWRDM_POWER_ON; u8 new_pwrst; bool found; /* If it is already supported, nothing to search */ if (pwrdm_states & BIT(req_state)) return req_state; if (!req_state) goto up_search; /* * So, we dont have a exact match * Can we get a deeper power state match? */ new_pwrst = req_state - 1; found = true; while (!(pwrdm_states & BIT(new_pwrst))) { /* No match even at OFF? Not available */ if (new_pwrst == PWRDM_POWER_OFF) { found = false; break; } new_pwrst--; } if (found) goto done; up_search: /* OK, no deeper ones, can we get a higher match? */ new_pwrst = req_state + 1; while (!(pwrdm_states & BIT(new_pwrst))) { if (new_pwrst > PWRDM_POWER_ON) { WARN(1, "powerdomain: %s: Fix max powerstate to ON\n", pwrdm->name); return PWRDM_POWER_ON; } if (new_pwrst == default_pwrst) break; new_pwrst++; } done: return new_pwrst; } /** * omap_set_pwrdm_state - change a powerdomain's current power state * @pwrdm: struct powerdomain * to change the power state of * @pwrst: power state to change to * * Change the current hardware power state of the powerdomain * represented by @pwrdm to the power state represented by @pwrst. * Returns -EINVAL if @pwrdm is null or invalid or if the * powerdomain's current power state could not be read, or returns 0 * upon success or if @pwrdm does not support @pwrst or any * lower-power state. XXX Should not return 0 if the @pwrdm does not * support @pwrst or any lower-power state: this should be an error. */ int omap_set_pwrdm_state(struct powerdomain *pwrdm, u8 pwrst) { u8 next_pwrst, sleep_switch; int curr_pwrst; int ret = 0; if (!pwrdm || IS_ERR(pwrdm)) return -EINVAL; while (!(pwrdm->pwrsts & (1 << pwrst))) { if (pwrst == PWRDM_POWER_OFF) return ret; pwrst--; } pwrdm_lock(pwrdm); curr_pwrst = pwrdm_read_pwrst(pwrdm); if (curr_pwrst < 0) { ret = -EINVAL; goto osps_out; } next_pwrst = pwrdm_read_next_pwrst(pwrdm); if (curr_pwrst == pwrst && next_pwrst == pwrst) goto osps_out; sleep_switch = _pwrdm_save_clkdm_state_and_activate(pwrdm, curr_pwrst, pwrst); ret = pwrdm_set_next_pwrst(pwrdm, pwrst); if (ret) pr_err("%s: unable to set power state of powerdomain: %s\n", __func__, pwrdm->name); _pwrdm_restore_clkdm_state(pwrdm, sleep_switch); osps_out: pwrdm_unlock(pwrdm); return ret; } /** * pwrdm_save_context - save powerdomain registers * * Register state is going to be lost due to a suspend or hibernate * event. Save the powerdomain registers. */ static int pwrdm_save_context(struct powerdomain *pwrdm, void *unused) { if (arch_pwrdm && arch_pwrdm->pwrdm_save_context) arch_pwrdm->pwrdm_save_context(pwrdm); return 0; } /** * pwrdm_save_context - restore powerdomain registers * * Restore powerdomain control registers after a suspend or resume * event. */ static int pwrdm_restore_context(struct powerdomain *pwrdm, void *unused) { if (arch_pwrdm && arch_pwrdm->pwrdm_restore_context) arch_pwrdm->pwrdm_restore_context(pwrdm); return 0; } static void pwrdms_save_context(void) { pwrdm_for_each(pwrdm_save_context, NULL); } static void pwrdms_restore_context(void) { pwrdm_for_each(pwrdm_restore_context, NULL); }
linux-master
arch/arm/mach-omap2/powerdomain.c
// SPDX-License-Identifier: GPL-2.0-only /* * AM33XX Clock Domain data. * * Copyright (C) 2011-2012 Texas Instruments Incorporated - https://www.ti.com/ * Vaibhav Hiremath <[email protected]> */ #include <linux/kernel.h> #include <linux/io.h> #include "clockdomain.h" #include "cm.h" #include "cm33xx.h" #include "cm-regbits-33xx.h" static struct clockdomain l4ls_am33xx_clkdm = { .name = "l4ls_clkdm", .pwrdm = { .name = "per_pwrdm" }, .cm_inst = AM33XX_CM_PER_MOD, .clkdm_offs = AM33XX_CM_PER_L4LS_CLKSTCTRL_OFFSET, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain l3s_am33xx_clkdm = { .name = "l3s_clkdm", .pwrdm = { .name = "per_pwrdm" }, .cm_inst = AM33XX_CM_PER_MOD, .clkdm_offs = AM33XX_CM_PER_L3S_CLKSTCTRL_OFFSET, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain l4fw_am33xx_clkdm = { .name = "l4fw_clkdm", .pwrdm = { .name = "per_pwrdm" }, .cm_inst = AM33XX_CM_PER_MOD, .clkdm_offs = AM33XX_CM_PER_L4FW_CLKSTCTRL_OFFSET, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain l3_am33xx_clkdm = { .name = "l3_clkdm", .pwrdm = { .name = "per_pwrdm" }, .cm_inst = AM33XX_CM_PER_MOD, .clkdm_offs = AM33XX_CM_PER_L3_CLKSTCTRL_OFFSET, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain l4hs_am33xx_clkdm = { .name = "l4hs_clkdm", .pwrdm = { .name = "per_pwrdm" }, .cm_inst = AM33XX_CM_PER_MOD, .clkdm_offs = AM33XX_CM_PER_L4HS_CLKSTCTRL_OFFSET, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain ocpwp_l3_am33xx_clkdm = { .name = "ocpwp_l3_clkdm", .pwrdm = { .name = "per_pwrdm" }, .cm_inst = AM33XX_CM_PER_MOD, .clkdm_offs = AM33XX_CM_PER_OCPWP_L3_CLKSTCTRL_OFFSET, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain pruss_ocp_am33xx_clkdm = { .name = "pruss_ocp_clkdm", .pwrdm = { .name = "per_pwrdm" }, .cm_inst = AM33XX_CM_PER_MOD, .clkdm_offs = AM33XX_CM_PER_PRUSS_CLKSTCTRL_OFFSET, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain cpsw_125mhz_am33xx_clkdm = { .name = "cpsw_125mhz_clkdm", .pwrdm = { .name = "per_pwrdm" }, .cm_inst = AM33XX_CM_PER_MOD, .clkdm_offs = AM33XX_CM_PER_CPSW_CLKSTCTRL_OFFSET, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain lcdc_am33xx_clkdm = { .name = "lcdc_clkdm", .pwrdm = { .name = "per_pwrdm" }, .cm_inst = AM33XX_CM_PER_MOD, .clkdm_offs = AM33XX_CM_PER_LCDC_CLKSTCTRL_OFFSET, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain clk_24mhz_am33xx_clkdm = { .name = "clk_24mhz_clkdm", .pwrdm = { .name = "per_pwrdm" }, .cm_inst = AM33XX_CM_PER_MOD, .clkdm_offs = AM33XX_CM_PER_CLK_24MHZ_CLKSTCTRL_OFFSET, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain l4_wkup_am33xx_clkdm = { .name = "l4_wkup_clkdm", .pwrdm = { .name = "wkup_pwrdm" }, .cm_inst = AM33XX_CM_WKUP_MOD, .clkdm_offs = AM33XX_CM_WKUP_CLKSTCTRL_OFFSET, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain l3_aon_am33xx_clkdm = { .name = "l3_aon_clkdm", .pwrdm = { .name = "wkup_pwrdm" }, .cm_inst = AM33XX_CM_WKUP_MOD, .clkdm_offs = AM33XX_CM_L3_AON_CLKSTCTRL_OFFSET, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain l4_wkup_aon_am33xx_clkdm = { .name = "l4_wkup_aon_clkdm", .pwrdm = { .name = "wkup_pwrdm" }, .cm_inst = AM33XX_CM_WKUP_MOD, .clkdm_offs = AM33XX_CM_L4_WKUP_AON_CLKSTCTRL_OFFSET, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain mpu_am33xx_clkdm = { .name = "mpu_clkdm", .pwrdm = { .name = "mpu_pwrdm" }, .cm_inst = AM33XX_CM_MPU_MOD, .clkdm_offs = AM33XX_CM_MPU_CLKSTCTRL_OFFSET, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain l4_rtc_am33xx_clkdm = { .name = "l4_rtc_clkdm", .pwrdm = { .name = "rtc_pwrdm" }, .cm_inst = AM33XX_CM_RTC_MOD, .clkdm_offs = AM33XX_CM_RTC_CLKSTCTRL_OFFSET, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain gfx_l3_am33xx_clkdm = { .name = "gfx_l3_clkdm", .pwrdm = { .name = "gfx_pwrdm" }, .cm_inst = AM33XX_CM_GFX_MOD, .clkdm_offs = AM33XX_CM_GFX_L3_CLKSTCTRL_OFFSET, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain gfx_l4ls_gfx_am33xx_clkdm = { .name = "gfx_l4ls_gfx_clkdm", .pwrdm = { .name = "gfx_pwrdm" }, .cm_inst = AM33XX_CM_GFX_MOD, .clkdm_offs = AM33XX_CM_GFX_L4LS_GFX_CLKSTCTRL__1_OFFSET, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain l4_cefuse_am33xx_clkdm = { .name = "l4_cefuse_clkdm", .pwrdm = { .name = "cefuse_pwrdm" }, .cm_inst = AM33XX_CM_CEFUSE_MOD, .clkdm_offs = AM33XX_CM_CEFUSE_CLKSTCTRL_OFFSET, .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain *clockdomains_am33xx[] __initdata = { &l4ls_am33xx_clkdm, &l3s_am33xx_clkdm, &l4fw_am33xx_clkdm, &l3_am33xx_clkdm, &l4hs_am33xx_clkdm, &ocpwp_l3_am33xx_clkdm, &pruss_ocp_am33xx_clkdm, &cpsw_125mhz_am33xx_clkdm, &lcdc_am33xx_clkdm, &clk_24mhz_am33xx_clkdm, &l4_wkup_am33xx_clkdm, &l3_aon_am33xx_clkdm, &l4_wkup_aon_am33xx_clkdm, &mpu_am33xx_clkdm, &l4_rtc_am33xx_clkdm, &gfx_l3_am33xx_clkdm, &gfx_l4ls_gfx_am33xx_clkdm, &l4_cefuse_am33xx_clkdm, NULL, }; void __init am33xx_clockdomains_init(void) { clkdm_register_platform_funcs(&am33xx_clkdm_operations); clkdm_register_clkdms(clockdomains_am33xx); clkdm_complete_init(); }
linux-master
arch/arm/mach-omap2/clockdomains33xx_data.c
// SPDX-License-Identifier: GPL-2.0-only /* * OMAP3/OMAP4 Voltage Management Routines * * Author: Thara Gopinath <[email protected]> * * Copyright (C) 2007 Texas Instruments, Inc. * Rajendra Nayak <[email protected]> * Lesly A M <[email protected]> * * Copyright (C) 2008 Nokia Corporation * Kalle Jokiniemi * * Copyright (C) 2010 Texas Instruments, Inc. * Thara Gopinath <[email protected]> */ #include <linux/kernel.h> #include <linux/err.h> #include <linux/init.h> #include "common.h" #include "soc.h" #include "prm-regbits-44xx.h" #include "prm44xx.h" #include "prcm44xx.h" #include "prminst44xx.h" #include "voltage.h" #include "omap_opp_data.h" #include "vc.h" #include "vp.h" static const struct omap_vfsm_instance omap4_vdd_mpu_vfsm = { .voltsetup_reg = OMAP4_PRM_VOLTSETUP_MPU_RET_SLEEP_OFFSET, .voltsetup_off_reg = OMAP4_PRM_VOLTSETUP_MPU_OFF_OFFSET, }; static const struct omap_vfsm_instance omap4_vdd_iva_vfsm = { .voltsetup_reg = OMAP4_PRM_VOLTSETUP_IVA_RET_SLEEP_OFFSET, .voltsetup_off_reg = OMAP4_PRM_VOLTSETUP_IVA_OFF_OFFSET, }; static const struct omap_vfsm_instance omap4_vdd_core_vfsm = { .voltsetup_reg = OMAP4_PRM_VOLTSETUP_CORE_RET_SLEEP_OFFSET, .voltsetup_off_reg = OMAP4_PRM_VOLTSETUP_CORE_OFF_OFFSET, }; static struct voltagedomain omap4_voltdm_mpu = { .name = "mpu", .scalable = true, .read = omap4_prm_vcvp_read, .write = omap4_prm_vcvp_write, .rmw = omap4_prm_vcvp_rmw, .vc = &omap4_vc_mpu, .vfsm = &omap4_vdd_mpu_vfsm, .vp = &omap4_vp_mpu, }; static struct voltagedomain omap4_voltdm_iva = { .name = "iva", .scalable = true, .read = omap4_prm_vcvp_read, .write = omap4_prm_vcvp_write, .rmw = omap4_prm_vcvp_rmw, .vc = &omap4_vc_iva, .vfsm = &omap4_vdd_iva_vfsm, .vp = &omap4_vp_iva, }; static struct voltagedomain omap4_voltdm_core = { .name = "core", .scalable = true, .read = omap4_prm_vcvp_read, .write = omap4_prm_vcvp_write, .rmw = omap4_prm_vcvp_rmw, .vc = &omap4_vc_core, .vfsm = &omap4_vdd_core_vfsm, .vp = &omap4_vp_core, }; static struct voltagedomain omap4_voltdm_wkup = { .name = "wakeup", }; static struct voltagedomain *voltagedomains_omap4[] __initdata = { &omap4_voltdm_mpu, &omap4_voltdm_iva, &omap4_voltdm_core, &omap4_voltdm_wkup, NULL, }; static const char *const sys_clk_name __initconst = "sys_clkin_ck"; void __init omap44xx_voltagedomains_init(void) { struct voltagedomain *voltdm; int i; /* * XXX Will depend on the process, validation, and binning * for the currently-running IC */ #ifdef CONFIG_PM_OPP if (cpu_is_omap443x()) { omap4_voltdm_mpu.volt_data = omap443x_vdd_mpu_volt_data; omap4_voltdm_iva.volt_data = omap443x_vdd_iva_volt_data; omap4_voltdm_core.volt_data = omap443x_vdd_core_volt_data; } else if (cpu_is_omap446x()) { omap4_voltdm_mpu.volt_data = omap446x_vdd_mpu_volt_data; omap4_voltdm_iva.volt_data = omap446x_vdd_iva_volt_data; omap4_voltdm_core.volt_data = omap446x_vdd_core_volt_data; } #endif omap4_voltdm_mpu.vp_param = &omap4_mpu_vp_data; omap4_voltdm_iva.vp_param = &omap4_iva_vp_data; omap4_voltdm_core.vp_param = &omap4_core_vp_data; omap4_voltdm_mpu.vc_param = &omap4_mpu_vc_data; omap4_voltdm_iva.vc_param = &omap4_iva_vc_data; omap4_voltdm_core.vc_param = &omap4_core_vc_data; for (i = 0; voltdm = voltagedomains_omap4[i], voltdm; i++) voltdm->sys_clk.name = sys_clk_name; voltdm_init(voltagedomains_omap4); };
linux-master
arch/arm/mach-omap2/voltagedomains44xx_data.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * OMAP2+ MPU WD_TIMER-specific code * * Copyright (C) 2012 Texas Instruments, Inc. */ #include <linux/kernel.h> #include <linux/io.h> #include <linux/err.h> #include <linux/platform_data/omap-wd-timer.h> #include "omap_hwmod.h" #include "omap_device.h" #include "wd_timer.h" #include "common.h" #include "prm.h" #include "soc.h" /* * In order to avoid any assumptions from bootloader regarding WDT * settings, WDT module is reset during init. This enables the watchdog * timer. Hence it is required to disable the watchdog after the WDT reset * during init. Otherwise the system would reboot as per the default * watchdog timer registers settings. */ #define OMAP_WDT_WPS 0x34 #define OMAP_WDT_SPR 0x48 int omap2_wd_timer_disable(struct omap_hwmod *oh) { void __iomem *base; if (!oh) { pr_err("%s: Could not look up wdtimer_hwmod\n", __func__); return -EINVAL; } base = omap_hwmod_get_mpu_rt_va(oh); if (!base) { pr_err("%s: Could not get the base address for %s\n", oh->name, __func__); return -EINVAL; } /* sequence required to disable watchdog */ writel_relaxed(0xAAAA, base + OMAP_WDT_SPR); while (readl_relaxed(base + OMAP_WDT_WPS) & 0x10) cpu_relax(); writel_relaxed(0x5555, base + OMAP_WDT_SPR); while (readl_relaxed(base + OMAP_WDT_WPS) & 0x10) cpu_relax(); return 0; } /** * omap2_wdtimer_reset - reset and disable the WDTIMER IP block * @oh: struct omap_hwmod * * * After the WDTIMER IP blocks are reset on OMAP2/3, we must also take * care to execute the special watchdog disable sequence. This is * because the watchdog is re-armed upon OCP softreset. (On OMAP4, * this behavior was apparently changed and the watchdog is no longer * re-armed after an OCP soft-reset.) Returns -ETIMEDOUT if the reset * did not complete, or 0 upon success. * * XXX Most of this code should be moved to the omap_hwmod.c layer * during a normal merge window. omap_hwmod_softreset() should be * renamed to omap_hwmod_set_ocp_softreset(), and omap_hwmod_softreset() * should call the hwmod _ocp_softreset() code. */ int omap2_wd_timer_reset(struct omap_hwmod *oh) { int c = 0; /* Write to the SOFTRESET bit */ omap_hwmod_softreset(oh); /* Poll on RESETDONE bit */ omap_test_timeout((omap_hwmod_read(oh, oh->class->sysc->syss_offs) & SYSS_RESETDONE_MASK), MAX_MODULE_SOFTRESET_WAIT, c); if (oh->class->sysc->srst_udelay) udelay(oh->class->sysc->srst_udelay); if (c == MAX_MODULE_SOFTRESET_WAIT) pr_warn("%s: %s: softreset failed (waited %d usec)\n", __func__, oh->name, MAX_MODULE_SOFTRESET_WAIT); else pr_debug("%s: %s: softreset in %d usec\n", __func__, oh->name, c); return (c == MAX_MODULE_SOFTRESET_WAIT) ? -ETIMEDOUT : omap2_wd_timer_disable(oh); }
linux-master
arch/arm/mach-omap2/wd_timer.c
// SPDX-License-Identifier: GPL-2.0-only /* * OMAP3xxx CM module functions * * Copyright (C) 2009 Nokia Corporation * Copyright (C) 2008-2010, 2012 Texas Instruments, Inc. * Paul Walmsley * Rajendra Nayak <[email protected]> */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/io.h> #include "prm2xxx_3xxx.h" #include "cm.h" #include "cm3xxx.h" #include "cm-regbits-34xx.h" #include "clockdomain.h" static const u8 omap3xxx_cm_idlest_offs[] = { CM_IDLEST1, CM_IDLEST2, OMAP2430_CM_IDLEST3 }; /* * */ static void _write_clktrctrl(u8 c, s16 module, u32 mask) { u32 v; v = omap2_cm_read_mod_reg(module, OMAP2_CM_CLKSTCTRL); v &= ~mask; v |= c << __ffs(mask); omap2_cm_write_mod_reg(v, module, OMAP2_CM_CLKSTCTRL); } static bool omap3xxx_cm_is_clkdm_in_hwsup(s16 module, u32 mask) { u32 v; v = omap2_cm_read_mod_reg(module, OMAP2_CM_CLKSTCTRL); v &= mask; v >>= __ffs(mask); return (v == OMAP34XX_CLKSTCTRL_ENABLE_AUTO) ? 1 : 0; } static void omap3xxx_cm_clkdm_enable_hwsup(s16 module, u32 mask) { _write_clktrctrl(OMAP34XX_CLKSTCTRL_ENABLE_AUTO, module, mask); } static void omap3xxx_cm_clkdm_disable_hwsup(s16 module, u32 mask) { _write_clktrctrl(OMAP34XX_CLKSTCTRL_DISABLE_AUTO, module, mask); } static void omap3xxx_cm_clkdm_force_sleep(s16 module, u32 mask) { _write_clktrctrl(OMAP34XX_CLKSTCTRL_FORCE_SLEEP, module, mask); } static void omap3xxx_cm_clkdm_force_wakeup(s16 module, u32 mask) { _write_clktrctrl(OMAP34XX_CLKSTCTRL_FORCE_WAKEUP, module, mask); } /* * */ /** * omap3xxx_cm_wait_module_ready - wait for a module to leave idle or standby * @part: PRCM partition, ignored for OMAP3 * @prcm_mod: PRCM module offset * @idlest_id: CM_IDLESTx register ID (i.e., x = 1, 2, 3) * @idlest_shift: shift of the bit in the CM_IDLEST* register to check * * Wait for the PRCM to indicate that the module identified by * (@prcm_mod, @idlest_id, @idlest_shift) is clocked. Return 0 upon * success or -EBUSY if the module doesn't enable in time. */ static int omap3xxx_cm_wait_module_ready(u8 part, s16 prcm_mod, u16 idlest_id, u8 idlest_shift) { int ena = 0, i = 0; u8 cm_idlest_reg; u32 mask; if (!idlest_id || (idlest_id > ARRAY_SIZE(omap3xxx_cm_idlest_offs))) return -EINVAL; cm_idlest_reg = omap3xxx_cm_idlest_offs[idlest_id - 1]; mask = 1 << idlest_shift; ena = 0; omap_test_timeout(((omap2_cm_read_mod_reg(prcm_mod, cm_idlest_reg) & mask) == ena), MAX_MODULE_READY_TIME, i); return (i < MAX_MODULE_READY_TIME) ? 0 : -EBUSY; } /** * omap3xxx_cm_split_idlest_reg - split CM_IDLEST reg addr into its components * @idlest_reg: CM_IDLEST* virtual address * @prcm_inst: pointer to an s16 to return the PRCM instance offset * @idlest_reg_id: pointer to a u8 to return the CM_IDLESTx register ID * * XXX This function is only needed until absolute register addresses are * removed from the OMAP struct clk records. */ static int omap3xxx_cm_split_idlest_reg(struct clk_omap_reg *idlest_reg, s16 *prcm_inst, u8 *idlest_reg_id) { unsigned long offs; u8 idlest_offs; int i; idlest_offs = idlest_reg->offset & 0xff; for (i = 0; i < ARRAY_SIZE(omap3xxx_cm_idlest_offs); i++) { if (idlest_offs == omap3xxx_cm_idlest_offs[i]) { *idlest_reg_id = i + 1; break; } } if (i == ARRAY_SIZE(omap3xxx_cm_idlest_offs)) return -EINVAL; offs = idlest_reg->offset; offs &= 0xff00; *prcm_inst = offs; return 0; } /* Clockdomain low-level operations */ static int omap3xxx_clkdm_add_sleepdep(struct clockdomain *clkdm1, struct clockdomain *clkdm2) { omap2_cm_set_mod_reg_bits((1 << clkdm2->dep_bit), clkdm1->pwrdm.ptr->prcm_offs, OMAP3430_CM_SLEEPDEP); return 0; } static int omap3xxx_clkdm_del_sleepdep(struct clockdomain *clkdm1, struct clockdomain *clkdm2) { omap2_cm_clear_mod_reg_bits((1 << clkdm2->dep_bit), clkdm1->pwrdm.ptr->prcm_offs, OMAP3430_CM_SLEEPDEP); return 0; } static int omap3xxx_clkdm_read_sleepdep(struct clockdomain *clkdm1, struct clockdomain *clkdm2) { return omap2_cm_read_mod_bits_shift(clkdm1->pwrdm.ptr->prcm_offs, OMAP3430_CM_SLEEPDEP, (1 << clkdm2->dep_bit)); } static int omap3xxx_clkdm_clear_all_sleepdeps(struct clockdomain *clkdm) { struct clkdm_dep *cd; u32 mask = 0; for (cd = clkdm->sleepdep_srcs; cd && cd->clkdm_name; cd++) { if (!cd->clkdm) continue; /* only happens if data is erroneous */ mask |= 1 << cd->clkdm->dep_bit; cd->sleepdep_usecount = 0; } omap2_cm_clear_mod_reg_bits(mask, clkdm->pwrdm.ptr->prcm_offs, OMAP3430_CM_SLEEPDEP); return 0; } static int omap3xxx_clkdm_sleep(struct clockdomain *clkdm) { omap3xxx_cm_clkdm_force_sleep(clkdm->pwrdm.ptr->prcm_offs, clkdm->clktrctrl_mask); return 0; } static int omap3xxx_clkdm_wakeup(struct clockdomain *clkdm) { omap3xxx_cm_clkdm_force_wakeup(clkdm->pwrdm.ptr->prcm_offs, clkdm->clktrctrl_mask); return 0; } static void omap3xxx_clkdm_allow_idle(struct clockdomain *clkdm) { if (clkdm->usecount > 0) clkdm_add_autodeps(clkdm); omap3xxx_cm_clkdm_enable_hwsup(clkdm->pwrdm.ptr->prcm_offs, clkdm->clktrctrl_mask); } static void omap3xxx_clkdm_deny_idle(struct clockdomain *clkdm) { omap3xxx_cm_clkdm_disable_hwsup(clkdm->pwrdm.ptr->prcm_offs, clkdm->clktrctrl_mask); if (clkdm->usecount > 0) clkdm_del_autodeps(clkdm); } static int omap3xxx_clkdm_clk_enable(struct clockdomain *clkdm) { bool hwsup = false; if (!clkdm->clktrctrl_mask) return 0; /* * The CLKDM_MISSING_IDLE_REPORTING flag documentation has * more details on the unpleasant problem this is working * around */ if ((clkdm->flags & CLKDM_MISSING_IDLE_REPORTING) && (clkdm->flags & CLKDM_CAN_FORCE_WAKEUP)) { omap3xxx_clkdm_wakeup(clkdm); return 0; } hwsup = omap3xxx_cm_is_clkdm_in_hwsup(clkdm->pwrdm.ptr->prcm_offs, clkdm->clktrctrl_mask); if (hwsup) { /* Disable HW transitions when we are changing deps */ omap3xxx_cm_clkdm_disable_hwsup(clkdm->pwrdm.ptr->prcm_offs, clkdm->clktrctrl_mask); clkdm_add_autodeps(clkdm); omap3xxx_cm_clkdm_enable_hwsup(clkdm->pwrdm.ptr->prcm_offs, clkdm->clktrctrl_mask); } else { if (clkdm->flags & CLKDM_CAN_FORCE_WAKEUP) omap3xxx_clkdm_wakeup(clkdm); } return 0; } static int omap3xxx_clkdm_clk_disable(struct clockdomain *clkdm) { bool hwsup = false; if (!clkdm->clktrctrl_mask) return 0; /* * The CLKDM_MISSING_IDLE_REPORTING flag documentation has * more details on the unpleasant problem this is working * around */ if (clkdm->flags & CLKDM_MISSING_IDLE_REPORTING && !(clkdm->flags & CLKDM_CAN_FORCE_SLEEP)) { omap3xxx_cm_clkdm_enable_hwsup(clkdm->pwrdm.ptr->prcm_offs, clkdm->clktrctrl_mask); return 0; } hwsup = omap3xxx_cm_is_clkdm_in_hwsup(clkdm->pwrdm.ptr->prcm_offs, clkdm->clktrctrl_mask); if (hwsup) { /* Disable HW transitions when we are changing deps */ omap3xxx_cm_clkdm_disable_hwsup(clkdm->pwrdm.ptr->prcm_offs, clkdm->clktrctrl_mask); clkdm_del_autodeps(clkdm); omap3xxx_cm_clkdm_enable_hwsup(clkdm->pwrdm.ptr->prcm_offs, clkdm->clktrctrl_mask); } else { if (clkdm->flags & CLKDM_CAN_FORCE_SLEEP) omap3xxx_clkdm_sleep(clkdm); } return 0; } struct clkdm_ops omap3_clkdm_operations = { .clkdm_add_wkdep = omap2_clkdm_add_wkdep, .clkdm_del_wkdep = omap2_clkdm_del_wkdep, .clkdm_read_wkdep = omap2_clkdm_read_wkdep, .clkdm_clear_all_wkdeps = omap2_clkdm_clear_all_wkdeps, .clkdm_add_sleepdep = omap3xxx_clkdm_add_sleepdep, .clkdm_del_sleepdep = omap3xxx_clkdm_del_sleepdep, .clkdm_read_sleepdep = omap3xxx_clkdm_read_sleepdep, .clkdm_clear_all_sleepdeps = omap3xxx_clkdm_clear_all_sleepdeps, .clkdm_sleep = omap3xxx_clkdm_sleep, .clkdm_wakeup = omap3xxx_clkdm_wakeup, .clkdm_allow_idle = omap3xxx_clkdm_allow_idle, .clkdm_deny_idle = omap3xxx_clkdm_deny_idle, .clkdm_clk_enable = omap3xxx_clkdm_clk_enable, .clkdm_clk_disable = omap3xxx_clkdm_clk_disable, }; /* * Context save/restore code - OMAP3 only */ struct omap3_cm_regs { u32 iva2_cm_clksel1; u32 iva2_cm_clksel2; u32 cm_sysconfig; u32 sgx_cm_clksel; u32 dss_cm_clksel; u32 cam_cm_clksel; u32 per_cm_clksel; u32 emu_cm_clksel; u32 emu_cm_clkstctrl; u32 pll_cm_autoidle; u32 pll_cm_autoidle2; u32 pll_cm_clksel4; u32 pll_cm_clksel5; u32 pll_cm_clken2; u32 cm_polctrl; u32 iva2_cm_fclken; u32 iva2_cm_clken_pll; u32 core_cm_fclken1; u32 core_cm_fclken3; u32 sgx_cm_fclken; u32 wkup_cm_fclken; u32 dss_cm_fclken; u32 cam_cm_fclken; u32 per_cm_fclken; u32 usbhost_cm_fclken; u32 core_cm_iclken1; u32 core_cm_iclken2; u32 core_cm_iclken3; u32 sgx_cm_iclken; u32 wkup_cm_iclken; u32 dss_cm_iclken; u32 cam_cm_iclken; u32 per_cm_iclken; u32 usbhost_cm_iclken; u32 iva2_cm_autoidle2; u32 mpu_cm_autoidle2; u32 iva2_cm_clkstctrl; u32 mpu_cm_clkstctrl; u32 core_cm_clkstctrl; u32 sgx_cm_clkstctrl; u32 dss_cm_clkstctrl; u32 cam_cm_clkstctrl; u32 per_cm_clkstctrl; u32 neon_cm_clkstctrl; u32 usbhost_cm_clkstctrl; u32 core_cm_autoidle1; u32 core_cm_autoidle2; u32 core_cm_autoidle3; u32 wkup_cm_autoidle; u32 dss_cm_autoidle; u32 cam_cm_autoidle; u32 per_cm_autoidle; u32 usbhost_cm_autoidle; u32 sgx_cm_sleepdep; u32 dss_cm_sleepdep; u32 cam_cm_sleepdep; u32 per_cm_sleepdep; u32 usbhost_cm_sleepdep; u32 cm_clkout_ctrl; }; static struct omap3_cm_regs cm_context; void omap3_cm_save_context(void) { cm_context.iva2_cm_clksel1 = omap2_cm_read_mod_reg(OMAP3430_IVA2_MOD, CM_CLKSEL1); cm_context.iva2_cm_clksel2 = omap2_cm_read_mod_reg(OMAP3430_IVA2_MOD, CM_CLKSEL2); cm_context.cm_sysconfig = omap2_cm_read_mod_reg(OCP_MOD, OMAP3430_CM_SYSCONFIG); cm_context.sgx_cm_clksel = omap2_cm_read_mod_reg(OMAP3430ES2_SGX_MOD, CM_CLKSEL); cm_context.dss_cm_clksel = omap2_cm_read_mod_reg(OMAP3430_DSS_MOD, CM_CLKSEL); cm_context.cam_cm_clksel = omap2_cm_read_mod_reg(OMAP3430_CAM_MOD, CM_CLKSEL); cm_context.per_cm_clksel = omap2_cm_read_mod_reg(OMAP3430_PER_MOD, CM_CLKSEL); cm_context.emu_cm_clksel = omap2_cm_read_mod_reg(OMAP3430_EMU_MOD, CM_CLKSEL1); cm_context.emu_cm_clkstctrl = omap2_cm_read_mod_reg(OMAP3430_EMU_MOD, OMAP2_CM_CLKSTCTRL); /* * As per erratum i671, ROM code does not respect the PER DPLL * programming scheme if CM_AUTOIDLE_PLL.AUTO_PERIPH_DPLL == 1. * In this case, even though this register has been saved in * scratchpad contents, we need to restore AUTO_PERIPH_DPLL * by ourselves. So, we need to save it anyway. */ cm_context.pll_cm_autoidle = omap2_cm_read_mod_reg(PLL_MOD, CM_AUTOIDLE); cm_context.pll_cm_autoidle2 = omap2_cm_read_mod_reg(PLL_MOD, CM_AUTOIDLE2); cm_context.pll_cm_clksel4 = omap2_cm_read_mod_reg(PLL_MOD, OMAP3430ES2_CM_CLKSEL4); cm_context.pll_cm_clksel5 = omap2_cm_read_mod_reg(PLL_MOD, OMAP3430ES2_CM_CLKSEL5); cm_context.pll_cm_clken2 = omap2_cm_read_mod_reg(PLL_MOD, OMAP3430ES2_CM_CLKEN2); cm_context.cm_polctrl = omap2_cm_read_mod_reg(OCP_MOD, OMAP3430_CM_POLCTRL); cm_context.iva2_cm_fclken = omap2_cm_read_mod_reg(OMAP3430_IVA2_MOD, CM_FCLKEN); cm_context.iva2_cm_clken_pll = omap2_cm_read_mod_reg(OMAP3430_IVA2_MOD, OMAP3430_CM_CLKEN_PLL); cm_context.core_cm_fclken1 = omap2_cm_read_mod_reg(CORE_MOD, CM_FCLKEN1); cm_context.core_cm_fclken3 = omap2_cm_read_mod_reg(CORE_MOD, OMAP3430ES2_CM_FCLKEN3); cm_context.sgx_cm_fclken = omap2_cm_read_mod_reg(OMAP3430ES2_SGX_MOD, CM_FCLKEN); cm_context.wkup_cm_fclken = omap2_cm_read_mod_reg(WKUP_MOD, CM_FCLKEN); cm_context.dss_cm_fclken = omap2_cm_read_mod_reg(OMAP3430_DSS_MOD, CM_FCLKEN); cm_context.cam_cm_fclken = omap2_cm_read_mod_reg(OMAP3430_CAM_MOD, CM_FCLKEN); cm_context.per_cm_fclken = omap2_cm_read_mod_reg(OMAP3430_PER_MOD, CM_FCLKEN); cm_context.usbhost_cm_fclken = omap2_cm_read_mod_reg(OMAP3430ES2_USBHOST_MOD, CM_FCLKEN); cm_context.core_cm_iclken1 = omap2_cm_read_mod_reg(CORE_MOD, CM_ICLKEN1); cm_context.core_cm_iclken2 = omap2_cm_read_mod_reg(CORE_MOD, CM_ICLKEN2); cm_context.core_cm_iclken3 = omap2_cm_read_mod_reg(CORE_MOD, CM_ICLKEN3); cm_context.sgx_cm_iclken = omap2_cm_read_mod_reg(OMAP3430ES2_SGX_MOD, CM_ICLKEN); cm_context.wkup_cm_iclken = omap2_cm_read_mod_reg(WKUP_MOD, CM_ICLKEN); cm_context.dss_cm_iclken = omap2_cm_read_mod_reg(OMAP3430_DSS_MOD, CM_ICLKEN); cm_context.cam_cm_iclken = omap2_cm_read_mod_reg(OMAP3430_CAM_MOD, CM_ICLKEN); cm_context.per_cm_iclken = omap2_cm_read_mod_reg(OMAP3430_PER_MOD, CM_ICLKEN); cm_context.usbhost_cm_iclken = omap2_cm_read_mod_reg(OMAP3430ES2_USBHOST_MOD, CM_ICLKEN); cm_context.iva2_cm_autoidle2 = omap2_cm_read_mod_reg(OMAP3430_IVA2_MOD, CM_AUTOIDLE2); cm_context.mpu_cm_autoidle2 = omap2_cm_read_mod_reg(MPU_MOD, CM_AUTOIDLE2); cm_context.iva2_cm_clkstctrl = omap2_cm_read_mod_reg(OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL); cm_context.mpu_cm_clkstctrl = omap2_cm_read_mod_reg(MPU_MOD, OMAP2_CM_CLKSTCTRL); cm_context.core_cm_clkstctrl = omap2_cm_read_mod_reg(CORE_MOD, OMAP2_CM_CLKSTCTRL); cm_context.sgx_cm_clkstctrl = omap2_cm_read_mod_reg(OMAP3430ES2_SGX_MOD, OMAP2_CM_CLKSTCTRL); cm_context.dss_cm_clkstctrl = omap2_cm_read_mod_reg(OMAP3430_DSS_MOD, OMAP2_CM_CLKSTCTRL); cm_context.cam_cm_clkstctrl = omap2_cm_read_mod_reg(OMAP3430_CAM_MOD, OMAP2_CM_CLKSTCTRL); cm_context.per_cm_clkstctrl = omap2_cm_read_mod_reg(OMAP3430_PER_MOD, OMAP2_CM_CLKSTCTRL); cm_context.neon_cm_clkstctrl = omap2_cm_read_mod_reg(OMAP3430_NEON_MOD, OMAP2_CM_CLKSTCTRL); cm_context.usbhost_cm_clkstctrl = omap2_cm_read_mod_reg(OMAP3430ES2_USBHOST_MOD, OMAP2_CM_CLKSTCTRL); cm_context.core_cm_autoidle1 = omap2_cm_read_mod_reg(CORE_MOD, CM_AUTOIDLE1); cm_context.core_cm_autoidle2 = omap2_cm_read_mod_reg(CORE_MOD, CM_AUTOIDLE2); cm_context.core_cm_autoidle3 = omap2_cm_read_mod_reg(CORE_MOD, CM_AUTOIDLE3); cm_context.wkup_cm_autoidle = omap2_cm_read_mod_reg(WKUP_MOD, CM_AUTOIDLE); cm_context.dss_cm_autoidle = omap2_cm_read_mod_reg(OMAP3430_DSS_MOD, CM_AUTOIDLE); cm_context.cam_cm_autoidle = omap2_cm_read_mod_reg(OMAP3430_CAM_MOD, CM_AUTOIDLE); cm_context.per_cm_autoidle = omap2_cm_read_mod_reg(OMAP3430_PER_MOD, CM_AUTOIDLE); cm_context.usbhost_cm_autoidle = omap2_cm_read_mod_reg(OMAP3430ES2_USBHOST_MOD, CM_AUTOIDLE); cm_context.sgx_cm_sleepdep = omap2_cm_read_mod_reg(OMAP3430ES2_SGX_MOD, OMAP3430_CM_SLEEPDEP); cm_context.dss_cm_sleepdep = omap2_cm_read_mod_reg(OMAP3430_DSS_MOD, OMAP3430_CM_SLEEPDEP); cm_context.cam_cm_sleepdep = omap2_cm_read_mod_reg(OMAP3430_CAM_MOD, OMAP3430_CM_SLEEPDEP); cm_context.per_cm_sleepdep = omap2_cm_read_mod_reg(OMAP3430_PER_MOD, OMAP3430_CM_SLEEPDEP); cm_context.usbhost_cm_sleepdep = omap2_cm_read_mod_reg(OMAP3430ES2_USBHOST_MOD, OMAP3430_CM_SLEEPDEP); cm_context.cm_clkout_ctrl = omap2_cm_read_mod_reg(OMAP3430_CCR_MOD, OMAP3_CM_CLKOUT_CTRL_OFFSET); } void omap3_cm_restore_context(void) { omap2_cm_write_mod_reg(cm_context.iva2_cm_clksel1, OMAP3430_IVA2_MOD, CM_CLKSEL1); omap2_cm_write_mod_reg(cm_context.iva2_cm_clksel2, OMAP3430_IVA2_MOD, CM_CLKSEL2); omap2_cm_write_mod_reg(cm_context.cm_sysconfig, OCP_MOD, OMAP3430_CM_SYSCONFIG); omap2_cm_write_mod_reg(cm_context.sgx_cm_clksel, OMAP3430ES2_SGX_MOD, CM_CLKSEL); omap2_cm_write_mod_reg(cm_context.dss_cm_clksel, OMAP3430_DSS_MOD, CM_CLKSEL); omap2_cm_write_mod_reg(cm_context.cam_cm_clksel, OMAP3430_CAM_MOD, CM_CLKSEL); omap2_cm_write_mod_reg(cm_context.per_cm_clksel, OMAP3430_PER_MOD, CM_CLKSEL); omap2_cm_write_mod_reg(cm_context.emu_cm_clksel, OMAP3430_EMU_MOD, CM_CLKSEL1); omap2_cm_write_mod_reg(cm_context.emu_cm_clkstctrl, OMAP3430_EMU_MOD, OMAP2_CM_CLKSTCTRL); /* * As per erratum i671, ROM code does not respect the PER DPLL * programming scheme if CM_AUTOIDLE_PLL.AUTO_PERIPH_DPLL == 1. * In this case, we need to restore AUTO_PERIPH_DPLL by ourselves. */ omap2_cm_write_mod_reg(cm_context.pll_cm_autoidle, PLL_MOD, CM_AUTOIDLE); omap2_cm_write_mod_reg(cm_context.pll_cm_autoidle2, PLL_MOD, CM_AUTOIDLE2); omap2_cm_write_mod_reg(cm_context.pll_cm_clksel4, PLL_MOD, OMAP3430ES2_CM_CLKSEL4); omap2_cm_write_mod_reg(cm_context.pll_cm_clksel5, PLL_MOD, OMAP3430ES2_CM_CLKSEL5); omap2_cm_write_mod_reg(cm_context.pll_cm_clken2, PLL_MOD, OMAP3430ES2_CM_CLKEN2); omap2_cm_write_mod_reg(cm_context.cm_polctrl, OCP_MOD, OMAP3430_CM_POLCTRL); omap2_cm_write_mod_reg(cm_context.iva2_cm_fclken, OMAP3430_IVA2_MOD, CM_FCLKEN); omap2_cm_write_mod_reg(cm_context.iva2_cm_clken_pll, OMAP3430_IVA2_MOD, OMAP3430_CM_CLKEN_PLL); omap2_cm_write_mod_reg(cm_context.core_cm_fclken1, CORE_MOD, CM_FCLKEN1); omap2_cm_write_mod_reg(cm_context.core_cm_fclken3, CORE_MOD, OMAP3430ES2_CM_FCLKEN3); omap2_cm_write_mod_reg(cm_context.sgx_cm_fclken, OMAP3430ES2_SGX_MOD, CM_FCLKEN); omap2_cm_write_mod_reg(cm_context.wkup_cm_fclken, WKUP_MOD, CM_FCLKEN); omap2_cm_write_mod_reg(cm_context.dss_cm_fclken, OMAP3430_DSS_MOD, CM_FCLKEN); omap2_cm_write_mod_reg(cm_context.cam_cm_fclken, OMAP3430_CAM_MOD, CM_FCLKEN); omap2_cm_write_mod_reg(cm_context.per_cm_fclken, OMAP3430_PER_MOD, CM_FCLKEN); omap2_cm_write_mod_reg(cm_context.usbhost_cm_fclken, OMAP3430ES2_USBHOST_MOD, CM_FCLKEN); omap2_cm_write_mod_reg(cm_context.core_cm_iclken1, CORE_MOD, CM_ICLKEN1); omap2_cm_write_mod_reg(cm_context.core_cm_iclken2, CORE_MOD, CM_ICLKEN2); omap2_cm_write_mod_reg(cm_context.core_cm_iclken3, CORE_MOD, CM_ICLKEN3); omap2_cm_write_mod_reg(cm_context.sgx_cm_iclken, OMAP3430ES2_SGX_MOD, CM_ICLKEN); omap2_cm_write_mod_reg(cm_context.wkup_cm_iclken, WKUP_MOD, CM_ICLKEN); omap2_cm_write_mod_reg(cm_context.dss_cm_iclken, OMAP3430_DSS_MOD, CM_ICLKEN); omap2_cm_write_mod_reg(cm_context.cam_cm_iclken, OMAP3430_CAM_MOD, CM_ICLKEN); omap2_cm_write_mod_reg(cm_context.per_cm_iclken, OMAP3430_PER_MOD, CM_ICLKEN); omap2_cm_write_mod_reg(cm_context.usbhost_cm_iclken, OMAP3430ES2_USBHOST_MOD, CM_ICLKEN); omap2_cm_write_mod_reg(cm_context.iva2_cm_autoidle2, OMAP3430_IVA2_MOD, CM_AUTOIDLE2); omap2_cm_write_mod_reg(cm_context.mpu_cm_autoidle2, MPU_MOD, CM_AUTOIDLE2); omap2_cm_write_mod_reg(cm_context.iva2_cm_clkstctrl, OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL); omap2_cm_write_mod_reg(cm_context.mpu_cm_clkstctrl, MPU_MOD, OMAP2_CM_CLKSTCTRL); omap2_cm_write_mod_reg(cm_context.core_cm_clkstctrl, CORE_MOD, OMAP2_CM_CLKSTCTRL); omap2_cm_write_mod_reg(cm_context.sgx_cm_clkstctrl, OMAP3430ES2_SGX_MOD, OMAP2_CM_CLKSTCTRL); omap2_cm_write_mod_reg(cm_context.dss_cm_clkstctrl, OMAP3430_DSS_MOD, OMAP2_CM_CLKSTCTRL); omap2_cm_write_mod_reg(cm_context.cam_cm_clkstctrl, OMAP3430_CAM_MOD, OMAP2_CM_CLKSTCTRL); omap2_cm_write_mod_reg(cm_context.per_cm_clkstctrl, OMAP3430_PER_MOD, OMAP2_CM_CLKSTCTRL); omap2_cm_write_mod_reg(cm_context.neon_cm_clkstctrl, OMAP3430_NEON_MOD, OMAP2_CM_CLKSTCTRL); omap2_cm_write_mod_reg(cm_context.usbhost_cm_clkstctrl, OMAP3430ES2_USBHOST_MOD, OMAP2_CM_CLKSTCTRL); omap2_cm_write_mod_reg(cm_context.core_cm_autoidle1, CORE_MOD, CM_AUTOIDLE1); omap2_cm_write_mod_reg(cm_context.core_cm_autoidle2, CORE_MOD, CM_AUTOIDLE2); omap2_cm_write_mod_reg(cm_context.core_cm_autoidle3, CORE_MOD, CM_AUTOIDLE3); omap2_cm_write_mod_reg(cm_context.wkup_cm_autoidle, WKUP_MOD, CM_AUTOIDLE); omap2_cm_write_mod_reg(cm_context.dss_cm_autoidle, OMAP3430_DSS_MOD, CM_AUTOIDLE); omap2_cm_write_mod_reg(cm_context.cam_cm_autoidle, OMAP3430_CAM_MOD, CM_AUTOIDLE); omap2_cm_write_mod_reg(cm_context.per_cm_autoidle, OMAP3430_PER_MOD, CM_AUTOIDLE); omap2_cm_write_mod_reg(cm_context.usbhost_cm_autoidle, OMAP3430ES2_USBHOST_MOD, CM_AUTOIDLE); omap2_cm_write_mod_reg(cm_context.sgx_cm_sleepdep, OMAP3430ES2_SGX_MOD, OMAP3430_CM_SLEEPDEP); omap2_cm_write_mod_reg(cm_context.dss_cm_sleepdep, OMAP3430_DSS_MOD, OMAP3430_CM_SLEEPDEP); omap2_cm_write_mod_reg(cm_context.cam_cm_sleepdep, OMAP3430_CAM_MOD, OMAP3430_CM_SLEEPDEP); omap2_cm_write_mod_reg(cm_context.per_cm_sleepdep, OMAP3430_PER_MOD, OMAP3430_CM_SLEEPDEP); omap2_cm_write_mod_reg(cm_context.usbhost_cm_sleepdep, OMAP3430ES2_USBHOST_MOD, OMAP3430_CM_SLEEPDEP); omap2_cm_write_mod_reg(cm_context.cm_clkout_ctrl, OMAP3430_CCR_MOD, OMAP3_CM_CLKOUT_CTRL_OFFSET); } void omap3_cm_save_scratchpad_contents(u32 *ptr) { *ptr++ = omap2_cm_read_mod_reg(CORE_MOD, CM_CLKSEL); *ptr++ = omap2_cm_read_mod_reg(WKUP_MOD, CM_CLKSEL); *ptr++ = omap2_cm_read_mod_reg(PLL_MOD, CM_CLKEN); /* * As per erratum i671, ROM code does not respect the PER DPLL * programming scheme if CM_AUTOIDLE_PLL..AUTO_PERIPH_DPLL == 1. * Then, in any case, clear these bits to avoid extra latencies. */ *ptr++ = omap2_cm_read_mod_reg(PLL_MOD, CM_AUTOIDLE) & ~OMAP3430_AUTO_PERIPH_DPLL_MASK; *ptr++ = omap2_cm_read_mod_reg(PLL_MOD, OMAP3430_CM_CLKSEL1_PLL); *ptr++ = omap2_cm_read_mod_reg(PLL_MOD, OMAP3430_CM_CLKSEL2_PLL); *ptr++ = omap2_cm_read_mod_reg(PLL_MOD, OMAP3430_CM_CLKSEL3); *ptr++ = omap2_cm_read_mod_reg(MPU_MOD, OMAP3430_CM_CLKEN_PLL); *ptr++ = omap2_cm_read_mod_reg(MPU_MOD, OMAP3430_CM_AUTOIDLE_PLL); *ptr++ = omap2_cm_read_mod_reg(MPU_MOD, OMAP3430_CM_CLKSEL1_PLL); *ptr++ = omap2_cm_read_mod_reg(MPU_MOD, OMAP3430_CM_CLKSEL2_PLL); } /* * */ static const struct cm_ll_data omap3xxx_cm_ll_data = { .split_idlest_reg = &omap3xxx_cm_split_idlest_reg, .wait_module_ready = &omap3xxx_cm_wait_module_ready, }; int __init omap3xxx_cm_init(const struct omap_prcm_init_data *data) { omap2_clk_legacy_provider_init(TI_CLKM_CM, cm_base.va + OMAP3430_IVA2_MOD); return cm_register(&omap3xxx_cm_ll_data); } static void __exit omap3xxx_cm_exit(void) { cm_unregister(&omap3xxx_cm_ll_data); } __exitcall(omap3xxx_cm_exit);
linux-master
arch/arm/mach-omap2/cm3xxx.c
// SPDX-License-Identifier: GPL-2.0-only /* * AM33XX PRM functions * * Copyright (C) 2011-2012 Texas Instruments Incorporated - https://www.ti.com/ */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/io.h> #include "powerdomain.h" #include "prm33xx.h" #include "prm-regbits-33xx.h" #define AM33XX_PRM_RSTCTRL_OFFSET 0x0000 #define AM33XX_RST_GLOBAL_WARM_SW_MASK (1 << 0) /* Read a register in a PRM instance */ static u32 am33xx_prm_read_reg(s16 inst, u16 idx) { return readl_relaxed(prm_base.va + inst + idx); } /* Write into a register in a PRM instance */ static void am33xx_prm_write_reg(u32 val, s16 inst, u16 idx) { writel_relaxed(val, prm_base.va + inst + idx); } /* Read-modify-write a register in PRM. Caller must lock */ static u32 am33xx_prm_rmw_reg_bits(u32 mask, u32 bits, s16 inst, s16 idx) { u32 v; v = am33xx_prm_read_reg(inst, idx); v &= ~mask; v |= bits; am33xx_prm_write_reg(v, inst, idx); return v; } /** * am33xx_prm_is_hardreset_asserted - read the HW reset line state of * submodules contained in the hwmod module * @shift: register bit shift corresponding to the reset line to check * @part: PRM partition, ignored for AM33xx * @inst: CM instance register offset (*_INST macro) * @rstctrl_offs: RM_RSTCTRL register address offset for this module * * Returns 1 if the (sub)module hardreset line is currently asserted, * 0 if the (sub)module hardreset line is not currently asserted, or * -EINVAL upon parameter error. */ static int am33xx_prm_is_hardreset_asserted(u8 shift, u8 part, s16 inst, u16 rstctrl_offs) { u32 v; v = am33xx_prm_read_reg(inst, rstctrl_offs); v &= 1 << shift; v >>= shift; return v; } /** * am33xx_prm_assert_hardreset - assert the HW reset line of a submodule * @shift: register bit shift corresponding to the reset line to assert * @part: CM partition, ignored for AM33xx * @inst: CM instance register offset (*_INST macro) * @rstctrl_reg: RM_RSTCTRL register address for this module * * Some IPs like dsp, ipu or iva contain processors that require an HW * reset line to be asserted / deasserted in order to fully enable the * IP. These modules may have multiple hard-reset lines that reset * different 'submodules' inside the IP block. This function will * place the submodule into reset. Returns 0 upon success or -EINVAL * upon an argument error. */ static int am33xx_prm_assert_hardreset(u8 shift, u8 part, s16 inst, u16 rstctrl_offs) { u32 mask = 1 << shift; am33xx_prm_rmw_reg_bits(mask, mask, inst, rstctrl_offs); return 0; } /** * am33xx_prm_deassert_hardreset - deassert a submodule hardreset line and * wait * @shift: register bit shift corresponding to the reset line to deassert * @st_shift: reset status register bit shift corresponding to the reset line * @part: PRM partition, not used for AM33xx * @inst: CM instance register offset (*_INST macro) * @rstctrl_reg: RM_RSTCTRL register address for this module * @rstst_reg: RM_RSTST register address for this module * * Some IPs like dsp, ipu or iva contain processors that require an HW * reset line to be asserted / deasserted in order to fully enable the * IP. These modules may have multiple hard-reset lines that reset * different 'submodules' inside the IP block. This function will * take the submodule out of reset and wait until the PRCM indicates * that the reset has completed before returning. Returns 0 upon success or * -EINVAL upon an argument error, -EEXIST if the submodule was already out * of reset, or -EBUSY if the submodule did not exit reset promptly. */ static int am33xx_prm_deassert_hardreset(u8 shift, u8 st_shift, u8 part, s16 inst, u16 rstctrl_offs, u16 rstst_offs) { int c; u32 mask = 1 << st_shift; /* Check the current status to avoid de-asserting the line twice */ if (am33xx_prm_is_hardreset_asserted(shift, 0, inst, rstctrl_offs) == 0) return -EEXIST; /* Clear the reset status by writing 1 to the status bit */ am33xx_prm_rmw_reg_bits(0xffffffff, mask, inst, rstst_offs); /* de-assert the reset control line */ mask = 1 << shift; am33xx_prm_rmw_reg_bits(mask, 0, inst, rstctrl_offs); /* wait the status to be set */ omap_test_timeout(am33xx_prm_is_hardreset_asserted(st_shift, 0, inst, rstst_offs), MAX_MODULE_HARDRESET_WAIT, c); return (c == MAX_MODULE_HARDRESET_WAIT) ? -EBUSY : 0; } static int am33xx_pwrdm_set_next_pwrst(struct powerdomain *pwrdm, u8 pwrst) { am33xx_prm_rmw_reg_bits(OMAP_POWERSTATE_MASK, (pwrst << OMAP_POWERSTATE_SHIFT), pwrdm->prcm_offs, pwrdm->pwrstctrl_offs); return 0; } static int am33xx_pwrdm_read_next_pwrst(struct powerdomain *pwrdm) { u32 v; v = am33xx_prm_read_reg(pwrdm->prcm_offs, pwrdm->pwrstctrl_offs); v &= OMAP_POWERSTATE_MASK; v >>= OMAP_POWERSTATE_SHIFT; return v; } static int am33xx_pwrdm_read_pwrst(struct powerdomain *pwrdm) { u32 v; v = am33xx_prm_read_reg(pwrdm->prcm_offs, pwrdm->pwrstst_offs); v &= OMAP_POWERSTATEST_MASK; v >>= OMAP_POWERSTATEST_SHIFT; return v; } static int am33xx_pwrdm_set_lowpwrstchange(struct powerdomain *pwrdm) { am33xx_prm_rmw_reg_bits(AM33XX_LOWPOWERSTATECHANGE_MASK, (1 << AM33XX_LOWPOWERSTATECHANGE_SHIFT), pwrdm->prcm_offs, pwrdm->pwrstctrl_offs); return 0; } static int am33xx_pwrdm_clear_all_prev_pwrst(struct powerdomain *pwrdm) { am33xx_prm_rmw_reg_bits(AM33XX_LASTPOWERSTATEENTERED_MASK, AM33XX_LASTPOWERSTATEENTERED_MASK, pwrdm->prcm_offs, pwrdm->pwrstst_offs); return 0; } static int am33xx_pwrdm_set_logic_retst(struct powerdomain *pwrdm, u8 pwrst) { u32 m; m = pwrdm->logicretstate_mask; if (!m) return -EINVAL; am33xx_prm_rmw_reg_bits(m, (pwrst << __ffs(m)), pwrdm->prcm_offs, pwrdm->pwrstctrl_offs); return 0; } static int am33xx_pwrdm_read_logic_pwrst(struct powerdomain *pwrdm) { u32 v; v = am33xx_prm_read_reg(pwrdm->prcm_offs, pwrdm->pwrstst_offs); v &= AM33XX_LOGICSTATEST_MASK; v >>= AM33XX_LOGICSTATEST_SHIFT; return v; } static int am33xx_pwrdm_read_logic_retst(struct powerdomain *pwrdm) { u32 v, m; m = pwrdm->logicretstate_mask; if (!m) return -EINVAL; v = am33xx_prm_read_reg(pwrdm->prcm_offs, pwrdm->pwrstctrl_offs); v &= m; v >>= __ffs(m); return v; } static int am33xx_pwrdm_set_mem_onst(struct powerdomain *pwrdm, u8 bank, u8 pwrst) { u32 m; m = pwrdm->mem_on_mask[bank]; if (!m) return -EINVAL; am33xx_prm_rmw_reg_bits(m, (pwrst << __ffs(m)), pwrdm->prcm_offs, pwrdm->pwrstctrl_offs); return 0; } static int am33xx_pwrdm_set_mem_retst(struct powerdomain *pwrdm, u8 bank, u8 pwrst) { u32 m; m = pwrdm->mem_ret_mask[bank]; if (!m) return -EINVAL; am33xx_prm_rmw_reg_bits(m, (pwrst << __ffs(m)), pwrdm->prcm_offs, pwrdm->pwrstctrl_offs); return 0; } static int am33xx_pwrdm_read_mem_pwrst(struct powerdomain *pwrdm, u8 bank) { u32 m, v; m = pwrdm->mem_pwrst_mask[bank]; if (!m) return -EINVAL; v = am33xx_prm_read_reg(pwrdm->prcm_offs, pwrdm->pwrstst_offs); v &= m; v >>= __ffs(m); return v; } static int am33xx_pwrdm_read_mem_retst(struct powerdomain *pwrdm, u8 bank) { u32 m, v; m = pwrdm->mem_retst_mask[bank]; if (!m) return -EINVAL; v = am33xx_prm_read_reg(pwrdm->prcm_offs, pwrdm->pwrstctrl_offs); v &= m; v >>= __ffs(m); return v; } static int am33xx_pwrdm_wait_transition(struct powerdomain *pwrdm) { u32 c = 0; /* * REVISIT: pwrdm_wait_transition() may be better implemented * via a callback and a periodic timer check -- how long do we expect * powerdomain transitions to take? */ /* XXX Is this udelay() value meaningful? */ while ((am33xx_prm_read_reg(pwrdm->prcm_offs, pwrdm->pwrstst_offs) & OMAP_INTRANSITION_MASK) && (c++ < PWRDM_TRANSITION_BAILOUT)) udelay(1); if (c > PWRDM_TRANSITION_BAILOUT) { pr_err("powerdomain: %s: waited too long to complete transition\n", pwrdm->name); return -EAGAIN; } pr_debug("powerdomain: completed transition in %d loops\n", c); return 0; } static int am33xx_check_vcvp(void) { /* No VC/VP on am33xx devices */ return 0; } /** * am33xx_prm_global_warm_sw_reset - reboot the device via warm reset * * Immediately reboots the device through warm reset. */ static void am33xx_prm_global_warm_sw_reset(void) { am33xx_prm_rmw_reg_bits(AM33XX_RST_GLOBAL_WARM_SW_MASK, AM33XX_RST_GLOBAL_WARM_SW_MASK, AM33XX_PRM_DEVICE_MOD, AM33XX_PRM_RSTCTRL_OFFSET); /* OCP barrier */ (void)am33xx_prm_read_reg(AM33XX_PRM_DEVICE_MOD, AM33XX_PRM_RSTCTRL_OFFSET); } static void am33xx_pwrdm_save_context(struct powerdomain *pwrdm) { pwrdm->context = am33xx_prm_read_reg(pwrdm->prcm_offs, pwrdm->pwrstctrl_offs); /* * Do not save LOWPOWERSTATECHANGE, writing a 1 indicates a request, * reading back a 1 indicates a request in progress. */ pwrdm->context &= ~AM33XX_LOWPOWERSTATECHANGE_MASK; } static void am33xx_pwrdm_restore_context(struct powerdomain *pwrdm) { int st, ctrl; st = am33xx_prm_read_reg(pwrdm->prcm_offs, pwrdm->pwrstst_offs); am33xx_prm_write_reg(pwrdm->context, pwrdm->prcm_offs, pwrdm->pwrstctrl_offs); /* Make sure we only wait for a transition if there is one */ st &= OMAP_POWERSTATEST_MASK; ctrl = OMAP_POWERSTATEST_MASK & pwrdm->context; if (st != ctrl) am33xx_pwrdm_wait_transition(pwrdm); } struct pwrdm_ops am33xx_pwrdm_operations = { .pwrdm_set_next_pwrst = am33xx_pwrdm_set_next_pwrst, .pwrdm_read_next_pwrst = am33xx_pwrdm_read_next_pwrst, .pwrdm_read_pwrst = am33xx_pwrdm_read_pwrst, .pwrdm_set_logic_retst = am33xx_pwrdm_set_logic_retst, .pwrdm_read_logic_pwrst = am33xx_pwrdm_read_logic_pwrst, .pwrdm_read_logic_retst = am33xx_pwrdm_read_logic_retst, .pwrdm_clear_all_prev_pwrst = am33xx_pwrdm_clear_all_prev_pwrst, .pwrdm_set_lowpwrstchange = am33xx_pwrdm_set_lowpwrstchange, .pwrdm_read_mem_pwrst = am33xx_pwrdm_read_mem_pwrst, .pwrdm_read_mem_retst = am33xx_pwrdm_read_mem_retst, .pwrdm_set_mem_onst = am33xx_pwrdm_set_mem_onst, .pwrdm_set_mem_retst = am33xx_pwrdm_set_mem_retst, .pwrdm_wait_transition = am33xx_pwrdm_wait_transition, .pwrdm_has_voltdm = am33xx_check_vcvp, .pwrdm_save_context = am33xx_pwrdm_save_context, .pwrdm_restore_context = am33xx_pwrdm_restore_context, }; static struct prm_ll_data am33xx_prm_ll_data = { .assert_hardreset = am33xx_prm_assert_hardreset, .deassert_hardreset = am33xx_prm_deassert_hardreset, .is_hardreset_asserted = am33xx_prm_is_hardreset_asserted, .reset_system = am33xx_prm_global_warm_sw_reset, }; int __init am33xx_prm_init(const struct omap_prcm_init_data *data) { return prm_register(&am33xx_prm_ll_data); } static void __exit am33xx_prm_exit(void) { prm_unregister(&am33xx_prm_ll_data); } __exitcall(am33xx_prm_exit);
linux-master
arch/arm/mach-omap2/prm33xx.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/arch/arm/mach-omap2/id.c * * OMAP2 CPU identification code * * Copyright (C) 2005 Nokia Corporation * Written by Tony Lindgren <[email protected]> * * Copyright (C) 2009-11 Texas Instruments * Added OMAP4 support - Santosh Shilimkar <[email protected]> */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/io.h> #include <linux/random.h> #include <linux/slab.h> #ifdef CONFIG_SOC_BUS #include <linux/sys_soc.h> #endif #include <asm/cputype.h> #include "common.h" #include "id.h" #include "soc.h" #include "control.h" #define OMAP4_SILICON_TYPE_STANDARD 0x01 #define OMAP4_SILICON_TYPE_PERFORMANCE 0x02 #define OMAP_SOC_MAX_NAME_LENGTH 16 static unsigned int omap_revision; static char soc_name[OMAP_SOC_MAX_NAME_LENGTH]; static char soc_rev[OMAP_SOC_MAX_NAME_LENGTH]; u32 omap_features; unsigned int omap_rev(void) { return omap_revision; } EXPORT_SYMBOL(omap_rev); int omap_type(void) { static u32 val = OMAP2_DEVICETYPE_MASK; if (val < OMAP2_DEVICETYPE_MASK) return val; if (soc_is_omap24xx()) { val = omap_ctrl_readl(OMAP24XX_CONTROL_STATUS); } else if (soc_is_ti81xx()) { val = omap_ctrl_readl(TI81XX_CONTROL_STATUS); } else if (soc_is_am33xx() || soc_is_am43xx()) { val = omap_ctrl_readl(AM33XX_CONTROL_STATUS); } else if (soc_is_omap34xx()) { val = omap_ctrl_readl(OMAP343X_CONTROL_STATUS); } else if (soc_is_omap44xx()) { val = omap_ctrl_readl(OMAP4_CTRL_MODULE_CORE_STATUS); } else if (soc_is_omap54xx() || soc_is_dra7xx()) { val = omap_ctrl_readl(OMAP5XXX_CONTROL_STATUS); val &= OMAP5_DEVICETYPE_MASK; val >>= 6; goto out; } else { pr_err("Cannot detect omap type!\n"); goto out; } val &= OMAP2_DEVICETYPE_MASK; val >>= 8; out: return val; } EXPORT_SYMBOL(omap_type); /*----------------------------------------------------------------------------*/ #define OMAP_TAP_IDCODE 0x0204 #define OMAP_TAP_DIE_ID_0 0x0218 #define OMAP_TAP_DIE_ID_1 0x021C #define OMAP_TAP_DIE_ID_2 0x0220 #define OMAP_TAP_DIE_ID_3 0x0224 #define OMAP_TAP_DIE_ID_44XX_0 0x0200 #define OMAP_TAP_DIE_ID_44XX_1 0x0208 #define OMAP_TAP_DIE_ID_44XX_2 0x020c #define OMAP_TAP_DIE_ID_44XX_3 0x0210 #define read_tap_reg(reg) readl_relaxed(tap_base + (reg)) struct omap_id { u16 hawkeye; /* Silicon type (Hawkeye id) */ u8 dev; /* Device type from production_id reg */ u32 type; /* Combined type id copied to omap_revision */ }; /* Register values to detect the OMAP version */ static struct omap_id omap_ids[] __initdata = { { .hawkeye = 0xb5d9, .dev = 0x0, .type = 0x24200024 }, { .hawkeye = 0xb5d9, .dev = 0x1, .type = 0x24201024 }, { .hawkeye = 0xb5d9, .dev = 0x2, .type = 0x24202024 }, { .hawkeye = 0xb5d9, .dev = 0x4, .type = 0x24220024 }, { .hawkeye = 0xb5d9, .dev = 0x8, .type = 0x24230024 }, { .hawkeye = 0xb68a, .dev = 0x0, .type = 0x24300024 }, }; static void __iomem *tap_base; static u16 tap_prod_id; static void omap_get_die_id(struct omap_die_id *odi) { if (soc_is_omap44xx() || soc_is_omap54xx() || soc_is_dra7xx()) { odi->id_0 = read_tap_reg(OMAP_TAP_DIE_ID_44XX_0); odi->id_1 = read_tap_reg(OMAP_TAP_DIE_ID_44XX_1); odi->id_2 = read_tap_reg(OMAP_TAP_DIE_ID_44XX_2); odi->id_3 = read_tap_reg(OMAP_TAP_DIE_ID_44XX_3); return; } odi->id_0 = read_tap_reg(OMAP_TAP_DIE_ID_0); odi->id_1 = read_tap_reg(OMAP_TAP_DIE_ID_1); odi->id_2 = read_tap_reg(OMAP_TAP_DIE_ID_2); odi->id_3 = read_tap_reg(OMAP_TAP_DIE_ID_3); } static int __init omap_feed_randpool(void) { struct omap_die_id odi; /* Throw the die ID into the entropy pool at boot */ omap_get_die_id(&odi); add_device_randomness(&odi, sizeof(odi)); return 0; } omap_device_initcall(omap_feed_randpool); void __init omap2xxx_check_revision(void) { int i, j; u32 idcode, prod_id; u16 hawkeye; u8 dev_type, rev; struct omap_die_id odi; idcode = read_tap_reg(OMAP_TAP_IDCODE); prod_id = read_tap_reg(tap_prod_id); hawkeye = (idcode >> 12) & 0xffff; rev = (idcode >> 28) & 0x0f; dev_type = (prod_id >> 16) & 0x0f; omap_get_die_id(&odi); pr_debug("OMAP_TAP_IDCODE 0x%08x REV %i HAWKEYE 0x%04x MANF %03x\n", idcode, rev, hawkeye, (idcode >> 1) & 0x7ff); pr_debug("OMAP_TAP_DIE_ID_0: 0x%08x\n", odi.id_0); pr_debug("OMAP_TAP_DIE_ID_1: 0x%08x DEV_REV: %i\n", odi.id_1, (odi.id_1 >> 28) & 0xf); pr_debug("OMAP_TAP_DIE_ID_2: 0x%08x\n", odi.id_2); pr_debug("OMAP_TAP_DIE_ID_3: 0x%08x\n", odi.id_3); pr_debug("OMAP_TAP_PROD_ID_0: 0x%08x DEV_TYPE: %i\n", prod_id, dev_type); /* Check hawkeye ids */ for (i = 0; i < ARRAY_SIZE(omap_ids); i++) { if (hawkeye == omap_ids[i].hawkeye) break; } if (i == ARRAY_SIZE(omap_ids)) { printk(KERN_ERR "Unknown OMAP CPU id\n"); return; } for (j = i; j < ARRAY_SIZE(omap_ids); j++) { if (dev_type == omap_ids[j].dev) break; } if (j == ARRAY_SIZE(omap_ids)) { pr_err("Unknown OMAP device type. Handling it as OMAP%04x\n", omap_ids[i].type >> 16); j = i; } sprintf(soc_name, "OMAP%04x", omap_rev() >> 16); sprintf(soc_rev, "ES%x", (omap_rev() >> 12) & 0xf); pr_info("%s", soc_name); if ((omap_rev() >> 8) & 0x0f) pr_cont("%s", soc_rev); pr_cont("\n"); } #define OMAP3_SHOW_FEATURE(feat) \ if (omap3_has_ ##feat()) \ n += scnprintf(buf + n, sizeof(buf) - n, #feat " "); static void __init omap3_cpuinfo(void) { const char *cpu_name; char buf[64]; int n = 0; memset(buf, 0, sizeof(buf)); /* * OMAP3430 and OMAP3530 are assumed to be same. * * OMAP3525, OMAP3515 and OMAP3503 can be detected only based * on available features. Upon detection, update the CPU id * and CPU class bits. */ if (soc_is_omap3630()) { if (omap3_has_iva() && omap3_has_sgx()) { cpu_name = (omap3_has_isp()) ? "OMAP3630/DM3730" : "OMAP3621"; } else if (omap3_has_iva()) { cpu_name = "DM3725"; } else if (omap3_has_sgx()) { cpu_name = "OMAP3615/AM3715"; } else { cpu_name = (omap3_has_isp()) ? "AM3703" : "OMAP3611"; } } else if (soc_is_am35xx()) { cpu_name = (omap3_has_sgx()) ? "AM3517" : "AM3505"; } else if (soc_is_ti816x()) { cpu_name = "TI816X"; } else if (soc_is_am335x()) { cpu_name = "AM335X"; } else if (soc_is_am437x()) { cpu_name = "AM437x"; } else if (soc_is_ti814x()) { cpu_name = "TI814X"; } else if (omap3_has_iva() && omap3_has_sgx()) { /* OMAP3430, OMAP3525, OMAP3515, OMAP3503 devices */ cpu_name = "OMAP3430/3530"; } else if (omap3_has_iva()) { cpu_name = "OMAP3525"; } else if (omap3_has_sgx()) { cpu_name = "OMAP3515"; } else { cpu_name = "OMAP3503"; } scnprintf(soc_name, sizeof(soc_name), "%s", cpu_name); /* Print verbose information */ n += scnprintf(buf, sizeof(buf) - n, "%s %s (", soc_name, soc_rev); OMAP3_SHOW_FEATURE(l2cache); OMAP3_SHOW_FEATURE(iva); OMAP3_SHOW_FEATURE(sgx); OMAP3_SHOW_FEATURE(neon); OMAP3_SHOW_FEATURE(isp); OMAP3_SHOW_FEATURE(192mhz_clk); if (*(buf + n - 1) == ' ') n--; n += scnprintf(buf + n, sizeof(buf) - n, ")\n"); pr_info("%s", buf); } #define OMAP3_CHECK_FEATURE(status,feat) \ if (((status & OMAP3_ ##feat## _MASK) \ >> OMAP3_ ##feat## _SHIFT) != FEAT_ ##feat## _NONE) { \ omap_features |= OMAP3_HAS_ ##feat; \ } void __init omap3xxx_check_features(void) { u32 status; omap_features = 0; status = omap_ctrl_readl(OMAP3_CONTROL_OMAP_STATUS); OMAP3_CHECK_FEATURE(status, L2CACHE); OMAP3_CHECK_FEATURE(status, IVA); OMAP3_CHECK_FEATURE(status, SGX); OMAP3_CHECK_FEATURE(status, NEON); OMAP3_CHECK_FEATURE(status, ISP); if (soc_is_omap3630()) omap_features |= OMAP3_HAS_192MHZ_CLK; if (soc_is_omap3430() || soc_is_omap3630()) omap_features |= OMAP3_HAS_IO_WAKEUP; if (soc_is_omap3630() || omap_rev() == OMAP3430_REV_ES3_1 || omap_rev() == OMAP3430_REV_ES3_1_2) omap_features |= OMAP3_HAS_IO_CHAIN_CTRL; omap_features |= OMAP3_HAS_SDRC; /* * am35x fixups: * - The am35x Chip ID register has bits 12, 7:5, and 3:2 marked as * reserved and therefore return 0 when read. Unfortunately, * OMAP3_CHECK_FEATURE() will interpret some of those zeroes to * mean that a feature is present even though it isn't so clear * the incorrectly set feature bits. */ if (soc_is_am35xx()) omap_features &= ~(OMAP3_HAS_IVA | OMAP3_HAS_ISP); /* * TODO: Get additional info (where applicable) * e.g. Size of L2 cache. */ omap3_cpuinfo(); } void __init omap4xxx_check_features(void) { u32 si_type; si_type = (read_tap_reg(OMAP4_CTRL_MODULE_CORE_STD_FUSE_PROD_ID_1) >> 16) & 0x03; if (si_type == OMAP4_SILICON_TYPE_PERFORMANCE) omap_features = OMAP4_HAS_PERF_SILICON; } void __init ti81xx_check_features(void) { omap_features = OMAP3_HAS_NEON; omap3_cpuinfo(); } void __init am33xx_check_features(void) { u32 status; omap_features = OMAP3_HAS_NEON; status = omap_ctrl_readl(AM33XX_DEV_FEATURE); if (status & AM33XX_SGX_MASK) omap_features |= OMAP3_HAS_SGX; omap3_cpuinfo(); } void __init omap3xxx_check_revision(void) { const char *cpu_rev; u32 cpuid, idcode; u16 hawkeye; u8 rev; /* * We cannot access revision registers on ES1.0. * If the processor type is Cortex-A8 and the revision is 0x0 * it means its Cortex r0p0 which is 3430 ES1.0. */ cpuid = read_cpuid_id(); if ((((cpuid >> 4) & 0xfff) == 0xc08) && ((cpuid & 0xf) == 0x0)) { omap_revision = OMAP3430_REV_ES1_0; cpu_rev = "1.0"; return; } /* * Detection for 34xx ES2.0 and above can be done with just * hawkeye and rev. See TRM 1.5.2 Device Identification. * Note that rev does not map directly to our defined processor * revision numbers as ES1.0 uses value 0. */ idcode = read_tap_reg(OMAP_TAP_IDCODE); hawkeye = (idcode >> 12) & 0xffff; rev = (idcode >> 28) & 0xff; switch (hawkeye) { case 0xb7ae: /* Handle 34xx/35xx devices */ switch (rev) { case 0: /* Take care of early samples */ case 1: omap_revision = OMAP3430_REV_ES2_0; cpu_rev = "2.0"; break; case 2: omap_revision = OMAP3430_REV_ES2_1; cpu_rev = "2.1"; break; case 3: omap_revision = OMAP3430_REV_ES3_0; cpu_rev = "3.0"; break; case 4: omap_revision = OMAP3430_REV_ES3_1; cpu_rev = "3.1"; break; case 7: default: /* Use the latest known revision as default */ omap_revision = OMAP3430_REV_ES3_1_2; cpu_rev = "3.1.2"; } break; case 0xb868: /* * Handle OMAP/AM 3505/3517 devices * * Set the device to be OMAP3517 here. Actual device * is identified later based on the features. */ switch (rev) { case 0: omap_revision = AM35XX_REV_ES1_0; cpu_rev = "1.0"; break; case 1: default: omap_revision = AM35XX_REV_ES1_1; cpu_rev = "1.1"; } break; case 0xb891: /* Handle 36xx devices */ switch(rev) { case 0: /* Take care of early samples */ omap_revision = OMAP3630_REV_ES1_0; cpu_rev = "1.0"; break; case 1: omap_revision = OMAP3630_REV_ES1_1; cpu_rev = "1.1"; break; case 2: default: omap_revision = OMAP3630_REV_ES1_2; cpu_rev = "1.2"; } break; case 0xb81e: switch (rev) { case 0: omap_revision = TI8168_REV_ES1_0; cpu_rev = "1.0"; break; case 1: omap_revision = TI8168_REV_ES1_1; cpu_rev = "1.1"; break; case 2: omap_revision = TI8168_REV_ES2_0; cpu_rev = "2.0"; break; case 3: default: omap_revision = TI8168_REV_ES2_1; cpu_rev = "2.1"; } break; case 0xb944: switch (rev) { case 0: omap_revision = AM335X_REV_ES1_0; cpu_rev = "1.0"; break; case 1: omap_revision = AM335X_REV_ES2_0; cpu_rev = "2.0"; break; case 2: default: omap_revision = AM335X_REV_ES2_1; cpu_rev = "2.1"; break; } break; case 0xb98c: switch (rev) { case 0: omap_revision = AM437X_REV_ES1_0; cpu_rev = "1.0"; break; case 1: omap_revision = AM437X_REV_ES1_1; cpu_rev = "1.1"; break; case 2: default: omap_revision = AM437X_REV_ES1_2; cpu_rev = "1.2"; break; } break; case 0xb8f2: case 0xb968: switch (rev) { case 0: case 1: omap_revision = TI8148_REV_ES1_0; cpu_rev = "1.0"; break; case 2: omap_revision = TI8148_REV_ES2_0; cpu_rev = "2.0"; break; case 3: default: omap_revision = TI8148_REV_ES2_1; cpu_rev = "2.1"; break; } break; default: /* Unknown default to latest silicon rev as default */ omap_revision = OMAP3630_REV_ES1_2; cpu_rev = "1.2"; pr_warn("Warning: unknown chip type: hawkeye %04x, assuming OMAP3630ES1.2\n", hawkeye); } sprintf(soc_rev, "ES%s", cpu_rev); } void __init omap4xxx_check_revision(void) { u32 idcode; u16 hawkeye; u8 rev; /* * The IC rev detection is done with hawkeye and rev. * Note that rev does not map directly to defined processor * revision numbers as ES1.0 uses value 0. */ idcode = read_tap_reg(OMAP_TAP_IDCODE); hawkeye = (idcode >> 12) & 0xffff; rev = (idcode >> 28) & 0xf; /* * Few initial 4430 ES2.0 samples IDCODE is same as ES1.0 * Use ARM register to detect the correct ES version */ if (!rev && (hawkeye != 0xb94e) && (hawkeye != 0xb975)) { idcode = read_cpuid_id(); rev = (idcode & 0xf) - 1; } switch (hawkeye) { case 0xb852: switch (rev) { case 0: omap_revision = OMAP4430_REV_ES1_0; break; case 1: default: omap_revision = OMAP4430_REV_ES2_0; } break; case 0xb95c: switch (rev) { case 3: omap_revision = OMAP4430_REV_ES2_1; break; case 4: omap_revision = OMAP4430_REV_ES2_2; break; case 6: default: omap_revision = OMAP4430_REV_ES2_3; } break; case 0xb94e: switch (rev) { case 0: omap_revision = OMAP4460_REV_ES1_0; break; case 2: default: omap_revision = OMAP4460_REV_ES1_1; break; } break; case 0xb975: switch (rev) { case 0: default: omap_revision = OMAP4470_REV_ES1_0; break; } break; default: /* Unknown default to latest silicon rev as default */ omap_revision = OMAP4430_REV_ES2_3; } sprintf(soc_name, "OMAP%04x", omap_rev() >> 16); sprintf(soc_rev, "ES%d.%d", (omap_rev() >> 12) & 0xf, (omap_rev() >> 8) & 0xf); pr_info("%s %s\n", soc_name, soc_rev); } void __init omap5xxx_check_revision(void) { u32 idcode; u16 hawkeye; u8 rev; idcode = read_tap_reg(OMAP_TAP_IDCODE); hawkeye = (idcode >> 12) & 0xffff; rev = (idcode >> 28) & 0xff; switch (hawkeye) { case 0xb942: switch (rev) { case 0: /* No support for ES1.0 Test chip */ BUG(); case 1: default: omap_revision = OMAP5430_REV_ES2_0; } break; case 0xb998: switch (rev) { case 0: /* No support for ES1.0 Test chip */ BUG(); case 1: default: omap_revision = OMAP5432_REV_ES2_0; } break; default: /* Unknown default to latest silicon rev as default*/ omap_revision = OMAP5430_REV_ES2_0; } sprintf(soc_name, "OMAP%04x", omap_rev() >> 16); sprintf(soc_rev, "ES%d.0", (omap_rev() >> 12) & 0xf); pr_info("%s %s\n", soc_name, soc_rev); } void __init dra7xxx_check_revision(void) { u32 idcode; u16 hawkeye; u8 rev, package; struct omap_die_id odi; omap_get_die_id(&odi); package = (odi.id_2 >> 16) & 0x3; idcode = read_tap_reg(OMAP_TAP_IDCODE); hawkeye = (idcode >> 12) & 0xffff; rev = (idcode >> 28) & 0xff; switch (hawkeye) { case 0xbb50: switch (rev) { case 0: default: switch (package) { case 0x2: omap_revision = DRA762_ABZ_REV_ES1_0; break; case 0x3: omap_revision = DRA762_ACD_REV_ES1_0; break; default: omap_revision = DRA762_REV_ES1_0; break; } break; } break; case 0xb990: switch (rev) { case 0: omap_revision = DRA752_REV_ES1_0; break; case 1: omap_revision = DRA752_REV_ES1_1; break; case 2: default: omap_revision = DRA752_REV_ES2_0; break; } break; case 0xb9bc: switch (rev) { case 0: omap_revision = DRA722_REV_ES1_0; break; case 1: omap_revision = DRA722_REV_ES2_0; break; case 2: default: omap_revision = DRA722_REV_ES2_1; break; } break; default: /* Unknown default to latest silicon rev as default*/ pr_warn("%s: unknown idcode=0x%08x (hawkeye=0x%08x,rev=0x%x)\n", __func__, idcode, hawkeye, rev); omap_revision = DRA752_REV_ES2_0; } sprintf(soc_name, "DRA%03x", omap_rev() >> 16); sprintf(soc_rev, "ES%d.%d", (omap_rev() >> 12) & 0xf, (omap_rev() >> 8) & 0xf); pr_info("%s %s\n", soc_name, soc_rev); } /* * Set up things for map_io and processor detection later on. Gets called * pretty much first thing from board init. For multi-omap, this gets * cpu_is_omapxxxx() working accurately enough for map_io. Then we'll try to * detect the exact revision later on in omap2_detect_revision() once map_io * is done. */ void __init omap2_set_globals_tap(u32 class, void __iomem *tap) { omap_revision = class; tap_base = tap; /* XXX What is this intended to do? */ if (soc_is_omap34xx()) tap_prod_id = 0x0210; else tap_prod_id = 0x0208; } #ifdef CONFIG_SOC_BUS static const char * const omap_types[] = { [OMAP2_DEVICE_TYPE_TEST] = "TST", [OMAP2_DEVICE_TYPE_EMU] = "EMU", [OMAP2_DEVICE_TYPE_SEC] = "HS", [OMAP2_DEVICE_TYPE_GP] = "GP", [OMAP2_DEVICE_TYPE_BAD] = "BAD", }; static const char * __init omap_get_family(void) { if (soc_is_omap24xx()) return kasprintf(GFP_KERNEL, "OMAP2"); else if (soc_is_omap34xx()) return kasprintf(GFP_KERNEL, "OMAP3"); else if (soc_is_omap44xx()) return kasprintf(GFP_KERNEL, "OMAP4"); else if (soc_is_omap54xx()) return kasprintf(GFP_KERNEL, "OMAP5"); else if (soc_is_am33xx() || soc_is_am335x()) return kasprintf(GFP_KERNEL, "AM33xx"); else if (soc_is_am43xx()) return kasprintf(GFP_KERNEL, "AM43xx"); else if (soc_is_dra7xx()) return kasprintf(GFP_KERNEL, "DRA7"); else return kasprintf(GFP_KERNEL, "Unknown"); } static ssize_t type_show(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "%s\n", omap_types[omap_type()]); } static DEVICE_ATTR_RO(type); static struct attribute *omap_soc_attrs[] = { &dev_attr_type.attr, NULL }; ATTRIBUTE_GROUPS(omap_soc); void __init omap_soc_device_init(void) { struct soc_device *soc_dev; struct soc_device_attribute *soc_dev_attr; soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL); if (!soc_dev_attr) return; soc_dev_attr->machine = soc_name; soc_dev_attr->family = omap_get_family(); soc_dev_attr->revision = soc_rev; soc_dev_attr->custom_attr_group = omap_soc_groups[0]; soc_dev = soc_device_register(soc_dev_attr); if (IS_ERR(soc_dev)) { kfree(soc_dev_attr); return; } } #endif /* CONFIG_SOC_BUS */
linux-master
arch/arm/mach-omap2/id.c
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) /* * Copyright (C) Sunplus Technology Co., Ltd. * All rights reserved. */ #include <linux/kernel.h> #include <asm/mach/arch.h> static const char *sp7021_compat[] __initconst = { "sunplus,sp7021", NULL }; DT_MACHINE_START(SP7021_DT, "SP7021") .dt_compat = sp7021_compat, MACHINE_END
linux-master
arch/arm/mach-sunplus/sp7021.c
// SPDX-License-Identifier: GPL-2.0-only /* * * arch/arm/probes/decode-arm.c * * Some code moved here from arch/arm/kernel/kprobes-arm.c * * Copyright (C) 2006, 2007 Motorola Inc. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/stddef.h> #include <linux/ptrace.h> #include "decode.h" #include "decode-arm.h" #define sign_extend(x, signbit) ((x) | (0 - ((x) & (1 << (signbit))))) #define branch_displacement(insn) sign_extend(((insn) & 0xffffff) << 2, 25) /* * To avoid the complications of mimicing single-stepping on a * processor without a Next-PC or a single-step mode, and to * avoid having to deal with the side-effects of boosting, we * simulate or emulate (almost) all ARM instructions. * * "Simulation" is where the instruction's behavior is duplicated in * C code. "Emulation" is where the original instruction is rewritten * and executed, often by altering its registers. * * By having all behavior of the kprobe'd instruction completed before * returning from the kprobe_handler(), all locks (scheduler and * interrupt) can safely be released. There is no need for secondary * breakpoints, no race with MP or preemptable kernels, nor having to * clean up resources counts at a later time impacting overall system * performance. By rewriting the instruction, only the minimum registers * need to be loaded and saved back optimizing performance. * * Calling the insnslot_*_rwflags version of a function doesn't hurt * anything even when the CPSR flags aren't updated by the * instruction. It's just a little slower in return for saving * a little space by not having a duplicate function that doesn't * update the flags. (The same optimization can be said for * instructions that do or don't perform register writeback) * Also, instructions can either read the flags, only write the * flags, or read and write the flags. To save combinations * rather than for sheer performance, flag functions just assume * read and write of flags. */ void __kprobes simulate_bbl(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { long iaddr = (long) regs->ARM_pc - 4; int disp = branch_displacement(insn); if (insn & (1 << 24)) regs->ARM_lr = iaddr + 4; regs->ARM_pc = iaddr + 8 + disp; } void __kprobes simulate_blx1(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { long iaddr = (long) regs->ARM_pc - 4; int disp = branch_displacement(insn); regs->ARM_lr = iaddr + 4; regs->ARM_pc = iaddr + 8 + disp + ((insn >> 23) & 0x2); regs->ARM_cpsr |= PSR_T_BIT; } void __kprobes simulate_blx2bx(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { int rm = insn & 0xf; long rmv = regs->uregs[rm]; if (insn & (1 << 5)) regs->ARM_lr = (long) regs->ARM_pc; regs->ARM_pc = rmv & ~0x1; regs->ARM_cpsr &= ~PSR_T_BIT; if (rmv & 0x1) regs->ARM_cpsr |= PSR_T_BIT; } void __kprobes simulate_mrs(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { int rd = (insn >> 12) & 0xf; unsigned long mask = 0xf8ff03df; /* Mask out execution state */ regs->uregs[rd] = regs->ARM_cpsr & mask; } void __kprobes simulate_mov_ipsp(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { regs->uregs[12] = regs->uregs[13]; } /* * For the instruction masking and comparisons in all the "space_*" * functions below, Do _not_ rearrange the order of tests unless * you're very, very sure of what you are doing. For the sake of * efficiency, the masks for some tests sometimes assume other test * have been done prior to them so the number of patterns to test * for an instruction set can be as broad as possible to reduce the * number of tests needed. */ static const union decode_item arm_1111_table[] = { /* Unconditional instructions */ /* memory hint 1111 0100 x001 xxxx xxxx xxxx xxxx xxxx */ /* PLDI (immediate) 1111 0100 x101 xxxx xxxx xxxx xxxx xxxx */ /* PLDW (immediate) 1111 0101 x001 xxxx xxxx xxxx xxxx xxxx */ /* PLD (immediate) 1111 0101 x101 xxxx xxxx xxxx xxxx xxxx */ DECODE_SIMULATE (0xfe300000, 0xf4100000, PROBES_PRELOAD_IMM), /* memory hint 1111 0110 x001 xxxx xxxx xxxx xxx0 xxxx */ /* PLDI (register) 1111 0110 x101 xxxx xxxx xxxx xxx0 xxxx */ /* PLDW (register) 1111 0111 x001 xxxx xxxx xxxx xxx0 xxxx */ /* PLD (register) 1111 0111 x101 xxxx xxxx xxxx xxx0 xxxx */ DECODE_SIMULATE (0xfe300010, 0xf6100000, PROBES_PRELOAD_REG), /* BLX (immediate) 1111 101x xxxx xxxx xxxx xxxx xxxx xxxx */ DECODE_SIMULATE (0xfe000000, 0xfa000000, PROBES_BRANCH_IMM), /* CPS 1111 0001 0000 xxx0 xxxx xxxx xx0x xxxx */ /* SETEND 1111 0001 0000 0001 xxxx xxxx 0000 xxxx */ /* SRS 1111 100x x1x0 xxxx xxxx xxxx xxxx xxxx */ /* RFE 1111 100x x0x1 xxxx xxxx xxxx xxxx xxxx */ /* Coprocessor instructions... */ /* MCRR2 1111 1100 0100 xxxx xxxx xxxx xxxx xxxx */ /* MRRC2 1111 1100 0101 xxxx xxxx xxxx xxxx xxxx */ /* LDC2 1111 110x xxx1 xxxx xxxx xxxx xxxx xxxx */ /* STC2 1111 110x xxx0 xxxx xxxx xxxx xxxx xxxx */ /* CDP2 1111 1110 xxxx xxxx xxxx xxxx xxx0 xxxx */ /* MCR2 1111 1110 xxx0 xxxx xxxx xxxx xxx1 xxxx */ /* MRC2 1111 1110 xxx1 xxxx xxxx xxxx xxx1 xxxx */ /* Other unallocated instructions... */ DECODE_END }; static const union decode_item arm_cccc_0001_0xx0____0xxx_table[] = { /* Miscellaneous instructions */ /* MRS cpsr cccc 0001 0000 xxxx xxxx xxxx 0000 xxxx */ DECODE_SIMULATEX(0x0ff000f0, 0x01000000, PROBES_MRS, REGS(0, NOPC, 0, 0, 0)), /* BX cccc 0001 0010 xxxx xxxx xxxx 0001 xxxx */ DECODE_SIMULATE (0x0ff000f0, 0x01200010, PROBES_BRANCH_REG), /* BLX (register) cccc 0001 0010 xxxx xxxx xxxx 0011 xxxx */ DECODE_SIMULATEX(0x0ff000f0, 0x01200030, PROBES_BRANCH_REG, REGS(0, 0, 0, 0, NOPC)), /* CLZ cccc 0001 0110 xxxx xxxx xxxx 0001 xxxx */ DECODE_EMULATEX (0x0ff000f0, 0x01600010, PROBES_CLZ, REGS(0, NOPC, 0, 0, NOPC)), /* QADD cccc 0001 0000 xxxx xxxx xxxx 0101 xxxx */ /* QSUB cccc 0001 0010 xxxx xxxx xxxx 0101 xxxx */ /* QDADD cccc 0001 0100 xxxx xxxx xxxx 0101 xxxx */ /* QDSUB cccc 0001 0110 xxxx xxxx xxxx 0101 xxxx */ DECODE_EMULATEX (0x0f9000f0, 0x01000050, PROBES_SATURATING_ARITHMETIC, REGS(NOPC, NOPC, 0, 0, NOPC)), /* BXJ cccc 0001 0010 xxxx xxxx xxxx 0010 xxxx */ /* MSR cccc 0001 0x10 xxxx xxxx xxxx 0000 xxxx */ /* MRS spsr cccc 0001 0100 xxxx xxxx xxxx 0000 xxxx */ /* BKPT 1110 0001 0010 xxxx xxxx xxxx 0111 xxxx */ /* SMC cccc 0001 0110 xxxx xxxx xxxx 0111 xxxx */ /* And unallocated instructions... */ DECODE_END }; static const union decode_item arm_cccc_0001_0xx0____1xx0_table[] = { /* Halfword multiply and multiply-accumulate */ /* SMLALxy cccc 0001 0100 xxxx xxxx xxxx 1xx0 xxxx */ DECODE_EMULATEX (0x0ff00090, 0x01400080, PROBES_MUL1, REGS(NOPC, NOPC, NOPC, 0, NOPC)), /* SMULWy cccc 0001 0010 xxxx xxxx xxxx 1x10 xxxx */ DECODE_OR (0x0ff000b0, 0x012000a0), /* SMULxy cccc 0001 0110 xxxx xxxx xxxx 1xx0 xxxx */ DECODE_EMULATEX (0x0ff00090, 0x01600080, PROBES_MUL2, REGS(NOPC, 0, NOPC, 0, NOPC)), /* SMLAxy cccc 0001 0000 xxxx xxxx xxxx 1xx0 xxxx */ DECODE_OR (0x0ff00090, 0x01000080), /* SMLAWy cccc 0001 0010 xxxx xxxx xxxx 1x00 xxxx */ DECODE_EMULATEX (0x0ff000b0, 0x01200080, PROBES_MUL2, REGS(NOPC, NOPC, NOPC, 0, NOPC)), DECODE_END }; static const union decode_item arm_cccc_0000_____1001_table[] = { /* Multiply and multiply-accumulate */ /* MUL cccc 0000 0000 xxxx xxxx xxxx 1001 xxxx */ /* MULS cccc 0000 0001 xxxx xxxx xxxx 1001 xxxx */ DECODE_EMULATEX (0x0fe000f0, 0x00000090, PROBES_MUL2, REGS(NOPC, 0, NOPC, 0, NOPC)), /* MLA cccc 0000 0010 xxxx xxxx xxxx 1001 xxxx */ /* MLAS cccc 0000 0011 xxxx xxxx xxxx 1001 xxxx */ DECODE_OR (0x0fe000f0, 0x00200090), /* MLS cccc 0000 0110 xxxx xxxx xxxx 1001 xxxx */ DECODE_EMULATEX (0x0ff000f0, 0x00600090, PROBES_MUL2, REGS(NOPC, NOPC, NOPC, 0, NOPC)), /* UMAAL cccc 0000 0100 xxxx xxxx xxxx 1001 xxxx */ DECODE_OR (0x0ff000f0, 0x00400090), /* UMULL cccc 0000 1000 xxxx xxxx xxxx 1001 xxxx */ /* UMULLS cccc 0000 1001 xxxx xxxx xxxx 1001 xxxx */ /* UMLAL cccc 0000 1010 xxxx xxxx xxxx 1001 xxxx */ /* UMLALS cccc 0000 1011 xxxx xxxx xxxx 1001 xxxx */ /* SMULL cccc 0000 1100 xxxx xxxx xxxx 1001 xxxx */ /* SMULLS cccc 0000 1101 xxxx xxxx xxxx 1001 xxxx */ /* SMLAL cccc 0000 1110 xxxx xxxx xxxx 1001 xxxx */ /* SMLALS cccc 0000 1111 xxxx xxxx xxxx 1001 xxxx */ DECODE_EMULATEX (0x0f8000f0, 0x00800090, PROBES_MUL1, REGS(NOPC, NOPC, NOPC, 0, NOPC)), DECODE_END }; static const union decode_item arm_cccc_0001_____1001_table[] = { /* Synchronization primitives */ #if __LINUX_ARM_ARCH__ < 6 /* Deprecated on ARMv6 and may be UNDEFINED on v7 */ /* SMP/SWPB cccc 0001 0x00 xxxx xxxx xxxx 1001 xxxx */ DECODE_EMULATEX (0x0fb000f0, 0x01000090, PROBES_SWP, REGS(NOPC, NOPC, 0, 0, NOPC)), #endif /* LDREX/STREX{,D,B,H} cccc 0001 1xxx xxxx xxxx xxxx 1001 xxxx */ /* And unallocated instructions... */ DECODE_END }; static const union decode_item arm_cccc_000x_____1xx1_table[] = { /* Extra load/store instructions */ /* STRHT cccc 0000 xx10 xxxx xxxx xxxx 1011 xxxx */ /* ??? cccc 0000 xx10 xxxx xxxx xxxx 11x1 xxxx */ /* LDRHT cccc 0000 xx11 xxxx xxxx xxxx 1011 xxxx */ /* LDRSBT cccc 0000 xx11 xxxx xxxx xxxx 1101 xxxx */ /* LDRSHT cccc 0000 xx11 xxxx xxxx xxxx 1111 xxxx */ DECODE_REJECT (0x0f200090, 0x00200090), /* LDRD/STRD lr,pc,{... cccc 000x x0x0 xxxx 111x xxxx 1101 xxxx */ DECODE_REJECT (0x0e10e0d0, 0x0000e0d0), /* LDRD (register) cccc 000x x0x0 xxxx xxxx xxxx 1101 xxxx */ /* STRD (register) cccc 000x x0x0 xxxx xxxx xxxx 1111 xxxx */ DECODE_EMULATEX (0x0e5000d0, 0x000000d0, PROBES_LDRSTRD, REGS(NOPCWB, NOPCX, 0, 0, NOPC)), /* LDRD (immediate) cccc 000x x1x0 xxxx xxxx xxxx 1101 xxxx */ /* STRD (immediate) cccc 000x x1x0 xxxx xxxx xxxx 1111 xxxx */ DECODE_EMULATEX (0x0e5000d0, 0x004000d0, PROBES_LDRSTRD, REGS(NOPCWB, NOPCX, 0, 0, 0)), /* STRH (register) cccc 000x x0x0 xxxx xxxx xxxx 1011 xxxx */ DECODE_EMULATEX (0x0e5000f0, 0x000000b0, PROBES_STORE_EXTRA, REGS(NOPCWB, NOPC, 0, 0, NOPC)), /* LDRH (register) cccc 000x x0x1 xxxx xxxx xxxx 1011 xxxx */ /* LDRSB (register) cccc 000x x0x1 xxxx xxxx xxxx 1101 xxxx */ /* LDRSH (register) cccc 000x x0x1 xxxx xxxx xxxx 1111 xxxx */ DECODE_EMULATEX (0x0e500090, 0x00100090, PROBES_LOAD_EXTRA, REGS(NOPCWB, NOPC, 0, 0, NOPC)), /* STRH (immediate) cccc 000x x1x0 xxxx xxxx xxxx 1011 xxxx */ DECODE_EMULATEX (0x0e5000f0, 0x004000b0, PROBES_STORE_EXTRA, REGS(NOPCWB, NOPC, 0, 0, 0)), /* LDRH (immediate) cccc 000x x1x1 xxxx xxxx xxxx 1011 xxxx */ /* LDRSB (immediate) cccc 000x x1x1 xxxx xxxx xxxx 1101 xxxx */ /* LDRSH (immediate) cccc 000x x1x1 xxxx xxxx xxxx 1111 xxxx */ DECODE_EMULATEX (0x0e500090, 0x00500090, PROBES_LOAD_EXTRA, REGS(NOPCWB, NOPC, 0, 0, 0)), DECODE_END }; static const union decode_item arm_cccc_000x_table[] = { /* Data-processing (register) */ /* <op>S PC, ... cccc 000x xxx1 xxxx 1111 xxxx xxxx xxxx */ DECODE_REJECT (0x0e10f000, 0x0010f000), /* MOV IP, SP 1110 0001 1010 0000 1100 0000 0000 1101 */ DECODE_SIMULATE (0xffffffff, 0xe1a0c00d, PROBES_MOV_IP_SP), /* TST (register) cccc 0001 0001 xxxx xxxx xxxx xxx0 xxxx */ /* TEQ (register) cccc 0001 0011 xxxx xxxx xxxx xxx0 xxxx */ /* CMP (register) cccc 0001 0101 xxxx xxxx xxxx xxx0 xxxx */ /* CMN (register) cccc 0001 0111 xxxx xxxx xxxx xxx0 xxxx */ DECODE_EMULATEX (0x0f900010, 0x01100000, PROBES_DATA_PROCESSING_REG, REGS(ANY, 0, 0, 0, ANY)), /* MOV (register) cccc 0001 101x xxxx xxxx xxxx xxx0 xxxx */ /* MVN (register) cccc 0001 111x xxxx xxxx xxxx xxx0 xxxx */ DECODE_EMULATEX (0x0fa00010, 0x01a00000, PROBES_DATA_PROCESSING_REG, REGS(0, ANY, 0, 0, ANY)), /* AND (register) cccc 0000 000x xxxx xxxx xxxx xxx0 xxxx */ /* EOR (register) cccc 0000 001x xxxx xxxx xxxx xxx0 xxxx */ /* SUB (register) cccc 0000 010x xxxx xxxx xxxx xxx0 xxxx */ /* RSB (register) cccc 0000 011x xxxx xxxx xxxx xxx0 xxxx */ /* ADD (register) cccc 0000 100x xxxx xxxx xxxx xxx0 xxxx */ /* ADC (register) cccc 0000 101x xxxx xxxx xxxx xxx0 xxxx */ /* SBC (register) cccc 0000 110x xxxx xxxx xxxx xxx0 xxxx */ /* RSC (register) cccc 0000 111x xxxx xxxx xxxx xxx0 xxxx */ /* ORR (register) cccc 0001 100x xxxx xxxx xxxx xxx0 xxxx */ /* BIC (register) cccc 0001 110x xxxx xxxx xxxx xxx0 xxxx */ DECODE_EMULATEX (0x0e000010, 0x00000000, PROBES_DATA_PROCESSING_REG, REGS(ANY, ANY, 0, 0, ANY)), /* TST (reg-shift reg) cccc 0001 0001 xxxx xxxx xxxx 0xx1 xxxx */ /* TEQ (reg-shift reg) cccc 0001 0011 xxxx xxxx xxxx 0xx1 xxxx */ /* CMP (reg-shift reg) cccc 0001 0101 xxxx xxxx xxxx 0xx1 xxxx */ /* CMN (reg-shift reg) cccc 0001 0111 xxxx xxxx xxxx 0xx1 xxxx */ DECODE_EMULATEX (0x0f900090, 0x01100010, PROBES_DATA_PROCESSING_REG, REGS(NOPC, 0, NOPC, 0, NOPC)), /* MOV (reg-shift reg) cccc 0001 101x xxxx xxxx xxxx 0xx1 xxxx */ /* MVN (reg-shift reg) cccc 0001 111x xxxx xxxx xxxx 0xx1 xxxx */ DECODE_EMULATEX (0x0fa00090, 0x01a00010, PROBES_DATA_PROCESSING_REG, REGS(0, NOPC, NOPC, 0, NOPC)), /* AND (reg-shift reg) cccc 0000 000x xxxx xxxx xxxx 0xx1 xxxx */ /* EOR (reg-shift reg) cccc 0000 001x xxxx xxxx xxxx 0xx1 xxxx */ /* SUB (reg-shift reg) cccc 0000 010x xxxx xxxx xxxx 0xx1 xxxx */ /* RSB (reg-shift reg) cccc 0000 011x xxxx xxxx xxxx 0xx1 xxxx */ /* ADD (reg-shift reg) cccc 0000 100x xxxx xxxx xxxx 0xx1 xxxx */ /* ADC (reg-shift reg) cccc 0000 101x xxxx xxxx xxxx 0xx1 xxxx */ /* SBC (reg-shift reg) cccc 0000 110x xxxx xxxx xxxx 0xx1 xxxx */ /* RSC (reg-shift reg) cccc 0000 111x xxxx xxxx xxxx 0xx1 xxxx */ /* ORR (reg-shift reg) cccc 0001 100x xxxx xxxx xxxx 0xx1 xxxx */ /* BIC (reg-shift reg) cccc 0001 110x xxxx xxxx xxxx 0xx1 xxxx */ DECODE_EMULATEX (0x0e000090, 0x00000010, PROBES_DATA_PROCESSING_REG, REGS(NOPC, NOPC, NOPC, 0, NOPC)), DECODE_END }; static const union decode_item arm_cccc_001x_table[] = { /* Data-processing (immediate) */ /* MOVW cccc 0011 0000 xxxx xxxx xxxx xxxx xxxx */ /* MOVT cccc 0011 0100 xxxx xxxx xxxx xxxx xxxx */ DECODE_EMULATEX (0x0fb00000, 0x03000000, PROBES_MOV_HALFWORD, REGS(0, NOPC, 0, 0, 0)), /* YIELD cccc 0011 0010 0000 xxxx xxxx 0000 0001 */ DECODE_OR (0x0fff00ff, 0x03200001), /* SEV cccc 0011 0010 0000 xxxx xxxx 0000 0100 */ DECODE_EMULATE (0x0fff00ff, 0x03200004, PROBES_SEV), /* NOP cccc 0011 0010 0000 xxxx xxxx 0000 0000 */ /* WFE cccc 0011 0010 0000 xxxx xxxx 0000 0010 */ /* WFI cccc 0011 0010 0000 xxxx xxxx 0000 0011 */ DECODE_SIMULATE (0x0fff00fc, 0x03200000, PROBES_WFE), /* DBG cccc 0011 0010 0000 xxxx xxxx ffff xxxx */ /* unallocated hints cccc 0011 0010 0000 xxxx xxxx xxxx xxxx */ /* MSR (immediate) cccc 0011 0x10 xxxx xxxx xxxx xxxx xxxx */ DECODE_REJECT (0x0fb00000, 0x03200000), /* <op>S PC, ... cccc 001x xxx1 xxxx 1111 xxxx xxxx xxxx */ DECODE_REJECT (0x0e10f000, 0x0210f000), /* TST (immediate) cccc 0011 0001 xxxx xxxx xxxx xxxx xxxx */ /* TEQ (immediate) cccc 0011 0011 xxxx xxxx xxxx xxxx xxxx */ /* CMP (immediate) cccc 0011 0101 xxxx xxxx xxxx xxxx xxxx */ /* CMN (immediate) cccc 0011 0111 xxxx xxxx xxxx xxxx xxxx */ DECODE_EMULATEX (0x0f900000, 0x03100000, PROBES_DATA_PROCESSING_IMM, REGS(ANY, 0, 0, 0, 0)), /* MOV (immediate) cccc 0011 101x xxxx xxxx xxxx xxxx xxxx */ /* MVN (immediate) cccc 0011 111x xxxx xxxx xxxx xxxx xxxx */ DECODE_EMULATEX (0x0fa00000, 0x03a00000, PROBES_DATA_PROCESSING_IMM, REGS(0, ANY, 0, 0, 0)), /* AND (immediate) cccc 0010 000x xxxx xxxx xxxx xxxx xxxx */ /* EOR (immediate) cccc 0010 001x xxxx xxxx xxxx xxxx xxxx */ /* SUB (immediate) cccc 0010 010x xxxx xxxx xxxx xxxx xxxx */ /* RSB (immediate) cccc 0010 011x xxxx xxxx xxxx xxxx xxxx */ /* ADD (immediate) cccc 0010 100x xxxx xxxx xxxx xxxx xxxx */ /* ADC (immediate) cccc 0010 101x xxxx xxxx xxxx xxxx xxxx */ /* SBC (immediate) cccc 0010 110x xxxx xxxx xxxx xxxx xxxx */ /* RSC (immediate) cccc 0010 111x xxxx xxxx xxxx xxxx xxxx */ /* ORR (immediate) cccc 0011 100x xxxx xxxx xxxx xxxx xxxx */ /* BIC (immediate) cccc 0011 110x xxxx xxxx xxxx xxxx xxxx */ DECODE_EMULATEX (0x0e000000, 0x02000000, PROBES_DATA_PROCESSING_IMM, REGS(ANY, ANY, 0, 0, 0)), DECODE_END }; static const union decode_item arm_cccc_0110_____xxx1_table[] = { /* Media instructions */ /* SEL cccc 0110 1000 xxxx xxxx xxxx 1011 xxxx */ DECODE_EMULATEX (0x0ff000f0, 0x068000b0, PROBES_SATURATE, REGS(NOPC, NOPC, 0, 0, NOPC)), /* SSAT cccc 0110 101x xxxx xxxx xxxx xx01 xxxx */ /* USAT cccc 0110 111x xxxx xxxx xxxx xx01 xxxx */ DECODE_OR(0x0fa00030, 0x06a00010), /* SSAT16 cccc 0110 1010 xxxx xxxx xxxx 0011 xxxx */ /* USAT16 cccc 0110 1110 xxxx xxxx xxxx 0011 xxxx */ DECODE_EMULATEX (0x0fb000f0, 0x06a00030, PROBES_SATURATE, REGS(0, NOPC, 0, 0, NOPC)), /* REV cccc 0110 1011 xxxx xxxx xxxx 0011 xxxx */ /* REV16 cccc 0110 1011 xxxx xxxx xxxx 1011 xxxx */ /* RBIT cccc 0110 1111 xxxx xxxx xxxx 0011 xxxx */ /* REVSH cccc 0110 1111 xxxx xxxx xxxx 1011 xxxx */ DECODE_EMULATEX (0x0fb00070, 0x06b00030, PROBES_REV, REGS(0, NOPC, 0, 0, NOPC)), /* ??? cccc 0110 0x00 xxxx xxxx xxxx xxx1 xxxx */ DECODE_REJECT (0x0fb00010, 0x06000010), /* ??? cccc 0110 0xxx xxxx xxxx xxxx 1011 xxxx */ DECODE_REJECT (0x0f8000f0, 0x060000b0), /* ??? cccc 0110 0xxx xxxx xxxx xxxx 1101 xxxx */ DECODE_REJECT (0x0f8000f0, 0x060000d0), /* SADD16 cccc 0110 0001 xxxx xxxx xxxx 0001 xxxx */ /* SADDSUBX cccc 0110 0001 xxxx xxxx xxxx 0011 xxxx */ /* SSUBADDX cccc 0110 0001 xxxx xxxx xxxx 0101 xxxx */ /* SSUB16 cccc 0110 0001 xxxx xxxx xxxx 0111 xxxx */ /* SADD8 cccc 0110 0001 xxxx xxxx xxxx 1001 xxxx */ /* SSUB8 cccc 0110 0001 xxxx xxxx xxxx 1111 xxxx */ /* QADD16 cccc 0110 0010 xxxx xxxx xxxx 0001 xxxx */ /* QADDSUBX cccc 0110 0010 xxxx xxxx xxxx 0011 xxxx */ /* QSUBADDX cccc 0110 0010 xxxx xxxx xxxx 0101 xxxx */ /* QSUB16 cccc 0110 0010 xxxx xxxx xxxx 0111 xxxx */ /* QADD8 cccc 0110 0010 xxxx xxxx xxxx 1001 xxxx */ /* QSUB8 cccc 0110 0010 xxxx xxxx xxxx 1111 xxxx */ /* SHADD16 cccc 0110 0011 xxxx xxxx xxxx 0001 xxxx */ /* SHADDSUBX cccc 0110 0011 xxxx xxxx xxxx 0011 xxxx */ /* SHSUBADDX cccc 0110 0011 xxxx xxxx xxxx 0101 xxxx */ /* SHSUB16 cccc 0110 0011 xxxx xxxx xxxx 0111 xxxx */ /* SHADD8 cccc 0110 0011 xxxx xxxx xxxx 1001 xxxx */ /* SHSUB8 cccc 0110 0011 xxxx xxxx xxxx 1111 xxxx */ /* UADD16 cccc 0110 0101 xxxx xxxx xxxx 0001 xxxx */ /* UADDSUBX cccc 0110 0101 xxxx xxxx xxxx 0011 xxxx */ /* USUBADDX cccc 0110 0101 xxxx xxxx xxxx 0101 xxxx */ /* USUB16 cccc 0110 0101 xxxx xxxx xxxx 0111 xxxx */ /* UADD8 cccc 0110 0101 xxxx xxxx xxxx 1001 xxxx */ /* USUB8 cccc 0110 0101 xxxx xxxx xxxx 1111 xxxx */ /* UQADD16 cccc 0110 0110 xxxx xxxx xxxx 0001 xxxx */ /* UQADDSUBX cccc 0110 0110 xxxx xxxx xxxx 0011 xxxx */ /* UQSUBADDX cccc 0110 0110 xxxx xxxx xxxx 0101 xxxx */ /* UQSUB16 cccc 0110 0110 xxxx xxxx xxxx 0111 xxxx */ /* UQADD8 cccc 0110 0110 xxxx xxxx xxxx 1001 xxxx */ /* UQSUB8 cccc 0110 0110 xxxx xxxx xxxx 1111 xxxx */ /* UHADD16 cccc 0110 0111 xxxx xxxx xxxx 0001 xxxx */ /* UHADDSUBX cccc 0110 0111 xxxx xxxx xxxx 0011 xxxx */ /* UHSUBADDX cccc 0110 0111 xxxx xxxx xxxx 0101 xxxx */ /* UHSUB16 cccc 0110 0111 xxxx xxxx xxxx 0111 xxxx */ /* UHADD8 cccc 0110 0111 xxxx xxxx xxxx 1001 xxxx */ /* UHSUB8 cccc 0110 0111 xxxx xxxx xxxx 1111 xxxx */ DECODE_EMULATEX (0x0f800010, 0x06000010, PROBES_MMI, REGS(NOPC, NOPC, 0, 0, NOPC)), /* PKHBT cccc 0110 1000 xxxx xxxx xxxx x001 xxxx */ /* PKHTB cccc 0110 1000 xxxx xxxx xxxx x101 xxxx */ DECODE_EMULATEX (0x0ff00030, 0x06800010, PROBES_PACK, REGS(NOPC, NOPC, 0, 0, NOPC)), /* ??? cccc 0110 1001 xxxx xxxx xxxx 0111 xxxx */ /* ??? cccc 0110 1101 xxxx xxxx xxxx 0111 xxxx */ DECODE_REJECT (0x0fb000f0, 0x06900070), /* SXTB16 cccc 0110 1000 1111 xxxx xxxx 0111 xxxx */ /* SXTB cccc 0110 1010 1111 xxxx xxxx 0111 xxxx */ /* SXTH cccc 0110 1011 1111 xxxx xxxx 0111 xxxx */ /* UXTB16 cccc 0110 1100 1111 xxxx xxxx 0111 xxxx */ /* UXTB cccc 0110 1110 1111 xxxx xxxx 0111 xxxx */ /* UXTH cccc 0110 1111 1111 xxxx xxxx 0111 xxxx */ DECODE_EMULATEX (0x0f8f00f0, 0x068f0070, PROBES_EXTEND, REGS(0, NOPC, 0, 0, NOPC)), /* SXTAB16 cccc 0110 1000 xxxx xxxx xxxx 0111 xxxx */ /* SXTAB cccc 0110 1010 xxxx xxxx xxxx 0111 xxxx */ /* SXTAH cccc 0110 1011 xxxx xxxx xxxx 0111 xxxx */ /* UXTAB16 cccc 0110 1100 xxxx xxxx xxxx 0111 xxxx */ /* UXTAB cccc 0110 1110 xxxx xxxx xxxx 0111 xxxx */ /* UXTAH cccc 0110 1111 xxxx xxxx xxxx 0111 xxxx */ DECODE_EMULATEX (0x0f8000f0, 0x06800070, PROBES_EXTEND_ADD, REGS(NOPCX, NOPC, 0, 0, NOPC)), DECODE_END }; static const union decode_item arm_cccc_0111_____xxx1_table[] = { /* Media instructions */ /* UNDEFINED cccc 0111 1111 xxxx xxxx xxxx 1111 xxxx */ DECODE_REJECT (0x0ff000f0, 0x07f000f0), /* SMLALD cccc 0111 0100 xxxx xxxx xxxx 00x1 xxxx */ /* SMLSLD cccc 0111 0100 xxxx xxxx xxxx 01x1 xxxx */ DECODE_EMULATEX (0x0ff00090, 0x07400010, PROBES_MUL_ADD_LONG, REGS(NOPC, NOPC, NOPC, 0, NOPC)), /* SMUAD cccc 0111 0000 xxxx 1111 xxxx 00x1 xxxx */ /* SMUSD cccc 0111 0000 xxxx 1111 xxxx 01x1 xxxx */ DECODE_OR (0x0ff0f090, 0x0700f010), /* SMMUL cccc 0111 0101 xxxx 1111 xxxx 00x1 xxxx */ DECODE_OR (0x0ff0f0d0, 0x0750f010), /* USAD8 cccc 0111 1000 xxxx 1111 xxxx 0001 xxxx */ DECODE_EMULATEX (0x0ff0f0f0, 0x0780f010, PROBES_MUL_ADD, REGS(NOPC, 0, NOPC, 0, NOPC)), /* SMLAD cccc 0111 0000 xxxx xxxx xxxx 00x1 xxxx */ /* SMLSD cccc 0111 0000 xxxx xxxx xxxx 01x1 xxxx */ DECODE_OR (0x0ff00090, 0x07000010), /* SMMLA cccc 0111 0101 xxxx xxxx xxxx 00x1 xxxx */ DECODE_OR (0x0ff000d0, 0x07500010), /* USADA8 cccc 0111 1000 xxxx xxxx xxxx 0001 xxxx */ DECODE_EMULATEX (0x0ff000f0, 0x07800010, PROBES_MUL_ADD, REGS(NOPC, NOPCX, NOPC, 0, NOPC)), /* SMMLS cccc 0111 0101 xxxx xxxx xxxx 11x1 xxxx */ DECODE_EMULATEX (0x0ff000d0, 0x075000d0, PROBES_MUL_ADD, REGS(NOPC, NOPC, NOPC, 0, NOPC)), /* SBFX cccc 0111 101x xxxx xxxx xxxx x101 xxxx */ /* UBFX cccc 0111 111x xxxx xxxx xxxx x101 xxxx */ DECODE_EMULATEX (0x0fa00070, 0x07a00050, PROBES_BITFIELD, REGS(0, NOPC, 0, 0, NOPC)), /* BFC cccc 0111 110x xxxx xxxx xxxx x001 1111 */ DECODE_EMULATEX (0x0fe0007f, 0x07c0001f, PROBES_BITFIELD, REGS(0, NOPC, 0, 0, 0)), /* BFI cccc 0111 110x xxxx xxxx xxxx x001 xxxx */ DECODE_EMULATEX (0x0fe00070, 0x07c00010, PROBES_BITFIELD, REGS(0, NOPC, 0, 0, NOPCX)), DECODE_END }; static const union decode_item arm_cccc_01xx_table[] = { /* Load/store word and unsigned byte */ /* LDRB/STRB pc,[...] cccc 01xx x0xx xxxx xxxx xxxx xxxx xxxx */ DECODE_REJECT (0x0c40f000, 0x0440f000), /* STRT cccc 01x0 x010 xxxx xxxx xxxx xxxx xxxx */ /* LDRT cccc 01x0 x011 xxxx xxxx xxxx xxxx xxxx */ /* STRBT cccc 01x0 x110 xxxx xxxx xxxx xxxx xxxx */ /* LDRBT cccc 01x0 x111 xxxx xxxx xxxx xxxx xxxx */ DECODE_REJECT (0x0d200000, 0x04200000), /* STR (immediate) cccc 010x x0x0 xxxx xxxx xxxx xxxx xxxx */ /* STRB (immediate) cccc 010x x1x0 xxxx xxxx xxxx xxxx xxxx */ DECODE_EMULATEX (0x0e100000, 0x04000000, PROBES_STORE, REGS(NOPCWB, ANY, 0, 0, 0)), /* LDR (immediate) cccc 010x x0x1 xxxx xxxx xxxx xxxx xxxx */ /* LDRB (immediate) cccc 010x x1x1 xxxx xxxx xxxx xxxx xxxx */ DECODE_EMULATEX (0x0e100000, 0x04100000, PROBES_LOAD, REGS(NOPCWB, ANY, 0, 0, 0)), /* STR (register) cccc 011x x0x0 xxxx xxxx xxxx xxxx xxxx */ /* STRB (register) cccc 011x x1x0 xxxx xxxx xxxx xxxx xxxx */ DECODE_EMULATEX (0x0e100000, 0x06000000, PROBES_STORE, REGS(NOPCWB, ANY, 0, 0, NOPC)), /* LDR (register) cccc 011x x0x1 xxxx xxxx xxxx xxxx xxxx */ /* LDRB (register) cccc 011x x1x1 xxxx xxxx xxxx xxxx xxxx */ DECODE_EMULATEX (0x0e100000, 0x06100000, PROBES_LOAD, REGS(NOPCWB, ANY, 0, 0, NOPC)), DECODE_END }; static const union decode_item arm_cccc_100x_table[] = { /* Block data transfer instructions */ /* LDM cccc 100x x0x1 xxxx xxxx xxxx xxxx xxxx */ /* STM cccc 100x x0x0 xxxx xxxx xxxx xxxx xxxx */ DECODE_CUSTOM (0x0e400000, 0x08000000, PROBES_LDMSTM), /* STM (user registers) cccc 100x x1x0 xxxx xxxx xxxx xxxx xxxx */ /* LDM (user registers) cccc 100x x1x1 xxxx 0xxx xxxx xxxx xxxx */ /* LDM (exception ret) cccc 100x x1x1 xxxx 1xxx xxxx xxxx xxxx */ DECODE_END }; const union decode_item probes_decode_arm_table[] = { /* * Unconditional instructions * 1111 xxxx xxxx xxxx xxxx xxxx xxxx xxxx */ DECODE_TABLE (0xf0000000, 0xf0000000, arm_1111_table), /* * Miscellaneous instructions * cccc 0001 0xx0 xxxx xxxx xxxx 0xxx xxxx */ DECODE_TABLE (0x0f900080, 0x01000000, arm_cccc_0001_0xx0____0xxx_table), /* * Halfword multiply and multiply-accumulate * cccc 0001 0xx0 xxxx xxxx xxxx 1xx0 xxxx */ DECODE_TABLE (0x0f900090, 0x01000080, arm_cccc_0001_0xx0____1xx0_table), /* * Multiply and multiply-accumulate * cccc 0000 xxxx xxxx xxxx xxxx 1001 xxxx */ DECODE_TABLE (0x0f0000f0, 0x00000090, arm_cccc_0000_____1001_table), /* * Synchronization primitives * cccc 0001 xxxx xxxx xxxx xxxx 1001 xxxx */ DECODE_TABLE (0x0f0000f0, 0x01000090, arm_cccc_0001_____1001_table), /* * Extra load/store instructions * cccc 000x xxxx xxxx xxxx xxxx 1xx1 xxxx */ DECODE_TABLE (0x0e000090, 0x00000090, arm_cccc_000x_____1xx1_table), /* * Data-processing (register) * cccc 000x xxxx xxxx xxxx xxxx xxx0 xxxx * Data-processing (register-shifted register) * cccc 000x xxxx xxxx xxxx xxxx 0xx1 xxxx */ DECODE_TABLE (0x0e000000, 0x00000000, arm_cccc_000x_table), /* * Data-processing (immediate) * cccc 001x xxxx xxxx xxxx xxxx xxxx xxxx */ DECODE_TABLE (0x0e000000, 0x02000000, arm_cccc_001x_table), /* * Media instructions * cccc 011x xxxx xxxx xxxx xxxx xxx1 xxxx */ DECODE_TABLE (0x0f000010, 0x06000010, arm_cccc_0110_____xxx1_table), DECODE_TABLE (0x0f000010, 0x07000010, arm_cccc_0111_____xxx1_table), /* * Load/store word and unsigned byte * cccc 01xx xxxx xxxx xxxx xxxx xxxx xxxx */ DECODE_TABLE (0x0c000000, 0x04000000, arm_cccc_01xx_table), /* * Block data transfer instructions * cccc 100x xxxx xxxx xxxx xxxx xxxx xxxx */ DECODE_TABLE (0x0e000000, 0x08000000, arm_cccc_100x_table), /* B cccc 1010 xxxx xxxx xxxx xxxx xxxx xxxx */ /* BL cccc 1011 xxxx xxxx xxxx xxxx xxxx xxxx */ DECODE_SIMULATE (0x0e000000, 0x0a000000, PROBES_BRANCH), /* * Supervisor Call, and coprocessor instructions */ /* MCRR cccc 1100 0100 xxxx xxxx xxxx xxxx xxxx */ /* MRRC cccc 1100 0101 xxxx xxxx xxxx xxxx xxxx */ /* LDC cccc 110x xxx1 xxxx xxxx xxxx xxxx xxxx */ /* STC cccc 110x xxx0 xxxx xxxx xxxx xxxx xxxx */ /* CDP cccc 1110 xxxx xxxx xxxx xxxx xxx0 xxxx */ /* MCR cccc 1110 xxx0 xxxx xxxx xxxx xxx1 xxxx */ /* MRC cccc 1110 xxx1 xxxx xxxx xxxx xxx1 xxxx */ /* SVC cccc 1111 xxxx xxxx xxxx xxxx xxxx xxxx */ DECODE_REJECT (0x0c000000, 0x0c000000), DECODE_END }; #ifdef CONFIG_ARM_KPROBES_TEST_MODULE EXPORT_SYMBOL_GPL(probes_decode_arm_table); #endif static void __kprobes arm_singlestep(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { regs->ARM_pc += 4; asi->insn_handler(insn, asi, regs); } /* Return: * INSN_REJECTED If instruction is one not allowed to kprobe, * INSN_GOOD If instruction is supported and uses instruction slot, * INSN_GOOD_NO_SLOT If instruction is supported but doesn't use its slot. * * For instructions we don't want to kprobe (INSN_REJECTED return result): * These are generally ones that modify the processor state making * them "hard" to simulate such as switches processor modes or * make accesses in alternate modes. Any of these could be simulated * if the work was put into it, but low return considering they * should also be very rare. */ enum probes_insn __kprobes arm_probes_decode_insn(probes_opcode_t insn, struct arch_probes_insn *asi, bool emulate, const union decode_action *actions, const struct decode_checker *checkers[]) { asi->insn_singlestep = arm_singlestep; asi->insn_check_cc = probes_condition_checks[insn>>28]; return probes_decode_insn(insn, asi, probes_decode_arm_table, false, emulate, actions, checkers); }
linux-master
arch/arm/probes/decode-arm.c
// SPDX-License-Identifier: GPL-2.0-only /* * arch/arm/probes/decode.c * * Copyright (C) 2011 Jon Medhurst <[email protected]>. * * Some contents moved here from arch/arm/include/asm/kprobes-arm.c which is * Copyright (C) 2006, 2007 Motorola Inc. */ #include <linux/kernel.h> #include <linux/types.h> #include <asm/system_info.h> #include <asm/ptrace.h> #include <linux/bug.h> #include "decode.h" #ifndef find_str_pc_offset /* * For STR and STM instructions, an ARM core may choose to use either * a +8 or a +12 displacement from the current instruction's address. * Whichever value is chosen for a given core, it must be the same for * both instructions and may not change. This function measures it. */ int str_pc_offset; void __init find_str_pc_offset(void) { int addr, scratch, ret; __asm__ ( "sub %[ret], pc, #4 \n\t" "str pc, %[addr] \n\t" "ldr %[scr], %[addr] \n\t" "sub %[ret], %[scr], %[ret] \n\t" : [ret] "=r" (ret), [scr] "=r" (scratch), [addr] "+m" (addr)); str_pc_offset = ret; } #endif /* !find_str_pc_offset */ #ifndef test_load_write_pc_interworking bool load_write_pc_interworks; void __init test_load_write_pc_interworking(void) { int arch = cpu_architecture(); BUG_ON(arch == CPU_ARCH_UNKNOWN); load_write_pc_interworks = arch >= CPU_ARCH_ARMv5T; } #endif /* !test_load_write_pc_interworking */ #ifndef test_alu_write_pc_interworking bool alu_write_pc_interworks; void __init test_alu_write_pc_interworking(void) { int arch = cpu_architecture(); BUG_ON(arch == CPU_ARCH_UNKNOWN); alu_write_pc_interworks = arch >= CPU_ARCH_ARMv7; } #endif /* !test_alu_write_pc_interworking */ void __init arm_probes_decode_init(void) { find_str_pc_offset(); test_load_write_pc_interworking(); test_alu_write_pc_interworking(); } static unsigned long __kprobes __check_eq(unsigned long cpsr) { return cpsr & PSR_Z_BIT; } static unsigned long __kprobes __check_ne(unsigned long cpsr) { return (~cpsr) & PSR_Z_BIT; } static unsigned long __kprobes __check_cs(unsigned long cpsr) { return cpsr & PSR_C_BIT; } static unsigned long __kprobes __check_cc(unsigned long cpsr) { return (~cpsr) & PSR_C_BIT; } static unsigned long __kprobes __check_mi(unsigned long cpsr) { return cpsr & PSR_N_BIT; } static unsigned long __kprobes __check_pl(unsigned long cpsr) { return (~cpsr) & PSR_N_BIT; } static unsigned long __kprobes __check_vs(unsigned long cpsr) { return cpsr & PSR_V_BIT; } static unsigned long __kprobes __check_vc(unsigned long cpsr) { return (~cpsr) & PSR_V_BIT; } static unsigned long __kprobes __check_hi(unsigned long cpsr) { cpsr &= ~(cpsr >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */ return cpsr & PSR_C_BIT; } static unsigned long __kprobes __check_ls(unsigned long cpsr) { cpsr &= ~(cpsr >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */ return (~cpsr) & PSR_C_BIT; } static unsigned long __kprobes __check_ge(unsigned long cpsr) { cpsr ^= (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */ return (~cpsr) & PSR_N_BIT; } static unsigned long __kprobes __check_lt(unsigned long cpsr) { cpsr ^= (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */ return cpsr & PSR_N_BIT; } static unsigned long __kprobes __check_gt(unsigned long cpsr) { unsigned long temp = cpsr ^ (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */ temp |= (cpsr << 1); /* PSR_N_BIT |= PSR_Z_BIT */ return (~temp) & PSR_N_BIT; } static unsigned long __kprobes __check_le(unsigned long cpsr) { unsigned long temp = cpsr ^ (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */ temp |= (cpsr << 1); /* PSR_N_BIT |= PSR_Z_BIT */ return temp & PSR_N_BIT; } static unsigned long __kprobes __check_al(unsigned long cpsr) { return true; } probes_check_cc * const probes_condition_checks[16] = { &__check_eq, &__check_ne, &__check_cs, &__check_cc, &__check_mi, &__check_pl, &__check_vs, &__check_vc, &__check_hi, &__check_ls, &__check_ge, &__check_lt, &__check_gt, &__check_le, &__check_al, &__check_al }; void __kprobes probes_simulate_nop(probes_opcode_t opcode, struct arch_probes_insn *asi, struct pt_regs *regs) { } void __kprobes probes_emulate_none(probes_opcode_t opcode, struct arch_probes_insn *asi, struct pt_regs *regs) { asi->insn_fn(); } /* * Prepare an instruction slot to receive an instruction for emulating. * This is done by placing a subroutine return after the location where the * instruction will be placed. We also modify ARM instructions to be * unconditional as the condition code will already be checked before any * emulation handler is called. */ static probes_opcode_t __kprobes prepare_emulated_insn(probes_opcode_t insn, struct arch_probes_insn *asi, bool thumb) { #ifdef CONFIG_THUMB2_KERNEL if (thumb) { u16 *thumb_insn = (u16 *)asi->insn; /* Thumb bx lr */ thumb_insn[1] = __opcode_to_mem_thumb16(0x4770); thumb_insn[2] = __opcode_to_mem_thumb16(0x4770); return insn; } asi->insn[1] = __opcode_to_mem_arm(0xe12fff1e); /* ARM bx lr */ #else asi->insn[1] = __opcode_to_mem_arm(0xe1a0f00e); /* mov pc, lr */ #endif /* Make an ARM instruction unconditional */ if (insn < 0xe0000000) insn = (insn | 0xe0000000) & ~0x10000000; return insn; } /* * Write a (probably modified) instruction into the slot previously prepared by * prepare_emulated_insn */ static void __kprobes set_emulated_insn(probes_opcode_t insn, struct arch_probes_insn *asi, bool thumb) { #ifdef CONFIG_THUMB2_KERNEL if (thumb) { u16 *ip = (u16 *)asi->insn; if (is_wide_instruction(insn)) *ip++ = __opcode_to_mem_thumb16(insn >> 16); *ip++ = __opcode_to_mem_thumb16(insn); return; } #endif asi->insn[0] = __opcode_to_mem_arm(insn); } /* * When we modify the register numbers encoded in an instruction to be emulated, * the new values come from this define. For ARM and 32-bit Thumb instructions * this gives... * * bit position 16 12 8 4 0 * ---------------+---+---+---+---+---+ * register r2 r0 r1 -- r3 */ #define INSN_NEW_BITS 0x00020103 /* Each nibble has same value as that at INSN_NEW_BITS bit 16 */ #define INSN_SAMEAS16_BITS 0x22222222 /* * Validate and modify each of the registers encoded in an instruction. * * Each nibble in regs contains a value from enum decode_reg_type. For each * non-zero value, the corresponding nibble in pinsn is validated and modified * according to the type. */ static bool __kprobes decode_regs(probes_opcode_t *pinsn, u32 regs, bool modify) { probes_opcode_t insn = *pinsn; probes_opcode_t mask = 0xf; /* Start at least significant nibble */ for (; regs != 0; regs >>= 4, mask <<= 4) { probes_opcode_t new_bits = INSN_NEW_BITS; switch (regs & 0xf) { case REG_TYPE_NONE: /* Nibble not a register, skip to next */ continue; case REG_TYPE_ANY: /* Any register is allowed */ break; case REG_TYPE_SAMEAS16: /* Replace register with same as at bit position 16 */ new_bits = INSN_SAMEAS16_BITS; break; case REG_TYPE_SP: /* Only allow SP (R13) */ if ((insn ^ 0xdddddddd) & mask) goto reject; break; case REG_TYPE_PC: /* Only allow PC (R15) */ if ((insn ^ 0xffffffff) & mask) goto reject; break; case REG_TYPE_NOSP: /* Reject SP (R13) */ if (((insn ^ 0xdddddddd) & mask) == 0) goto reject; break; case REG_TYPE_NOSPPC: case REG_TYPE_NOSPPCX: /* Reject SP and PC (R13 and R15) */ if (((insn ^ 0xdddddddd) & 0xdddddddd & mask) == 0) goto reject; break; case REG_TYPE_NOPCWB: if (!is_writeback(insn)) break; /* No writeback, so any register is OK */ fallthrough; case REG_TYPE_NOPC: case REG_TYPE_NOPCX: /* Reject PC (R15) */ if (((insn ^ 0xffffffff) & mask) == 0) goto reject; break; } /* Replace value of nibble with new register number... */ insn &= ~mask; insn |= new_bits & mask; } if (modify) *pinsn = insn; return true; reject: return false; } static const int decode_struct_sizes[NUM_DECODE_TYPES] = { [DECODE_TYPE_TABLE] = sizeof(struct decode_table), [DECODE_TYPE_CUSTOM] = sizeof(struct decode_custom), [DECODE_TYPE_SIMULATE] = sizeof(struct decode_simulate), [DECODE_TYPE_EMULATE] = sizeof(struct decode_emulate), [DECODE_TYPE_OR] = sizeof(struct decode_or), [DECODE_TYPE_REJECT] = sizeof(struct decode_reject) }; static int run_checkers(const struct decode_checker *checkers[], int action, probes_opcode_t insn, struct arch_probes_insn *asi, const struct decode_header *h) { const struct decode_checker **p; if (!checkers) return INSN_GOOD; p = checkers; while (*p != NULL) { int retval; probes_check_t *checker_func = (*p)[action].checker; retval = INSN_GOOD; if (checker_func) retval = checker_func(insn, asi, h); if (retval == INSN_REJECTED) return retval; p++; } return INSN_GOOD; } /* * probes_decode_insn operates on data tables in order to decode an ARM * architecture instruction onto which a kprobe has been placed. * * These instruction decoding tables are a concatenation of entries each * of which consist of one of the following structs: * * decode_table * decode_custom * decode_simulate * decode_emulate * decode_or * decode_reject * * Each of these starts with a struct decode_header which has the following * fields: * * type_regs * mask * value * * The least significant DECODE_TYPE_BITS of type_regs contains a value * from enum decode_type, this indicates which of the decode_* structs * the entry contains. The value DECODE_TYPE_END indicates the end of the * table. * * When the table is parsed, each entry is checked in turn to see if it * matches the instruction to be decoded using the test: * * (insn & mask) == value * * If no match is found before the end of the table is reached then decoding * fails with INSN_REJECTED. * * When a match is found, decode_regs() is called to validate and modify each * of the registers encoded in the instruction; the data it uses to do this * is (type_regs >> DECODE_TYPE_BITS). A validation failure will cause decoding * to fail with INSN_REJECTED. * * Once the instruction has passed the above tests, further processing * depends on the type of the table entry's decode struct. * */ int __kprobes probes_decode_insn(probes_opcode_t insn, struct arch_probes_insn *asi, const union decode_item *table, bool thumb, bool emulate, const union decode_action *actions, const struct decode_checker *checkers[]) { const struct decode_header *h = (struct decode_header *)table; const struct decode_header *next; bool matched = false; /* * @insn can be modified by decode_regs. Save its original * value for checkers. */ probes_opcode_t origin_insn = insn; /* * stack_space is initialized to 0 here. Checker functions * should update is value if they find this is a stack store * instruction: positive value means bytes of stack usage, * negitive value means unable to determine stack usage * statically. For instruction doesn't store to stack, checker * do nothing with it. */ asi->stack_space = 0; /* * Similarly to stack_space, register_usage_flags is filled by * checkers. Its default value is set to ~0, which is 'all * registers are used', to prevent any potential optimization. */ asi->register_usage_flags = ~0UL; if (emulate) insn = prepare_emulated_insn(insn, asi, thumb); for (;; h = next) { enum decode_type type = h->type_regs.bits & DECODE_TYPE_MASK; u32 regs = h->type_regs.bits >> DECODE_TYPE_BITS; if (type == DECODE_TYPE_END) return INSN_REJECTED; next = (struct decode_header *) ((uintptr_t)h + decode_struct_sizes[type]); if (!matched && (insn & h->mask.bits) != h->value.bits) continue; if (!decode_regs(&insn, regs, emulate)) return INSN_REJECTED; switch (type) { case DECODE_TYPE_TABLE: { struct decode_table *d = (struct decode_table *)h; next = (struct decode_header *)d->table.table; break; } case DECODE_TYPE_CUSTOM: { int err; struct decode_custom *d = (struct decode_custom *)h; int action = d->decoder.action; err = run_checkers(checkers, action, origin_insn, asi, h); if (err == INSN_REJECTED) return INSN_REJECTED; return actions[action].decoder(insn, asi, h); } case DECODE_TYPE_SIMULATE: { int err; struct decode_simulate *d = (struct decode_simulate *)h; int action = d->handler.action; err = run_checkers(checkers, action, origin_insn, asi, h); if (err == INSN_REJECTED) return INSN_REJECTED; asi->insn_handler = actions[action].handler; return INSN_GOOD_NO_SLOT; } case DECODE_TYPE_EMULATE: { int err; struct decode_emulate *d = (struct decode_emulate *)h; int action = d->handler.action; err = run_checkers(checkers, action, origin_insn, asi, h); if (err == INSN_REJECTED) return INSN_REJECTED; if (!emulate) return actions[action].decoder(insn, asi, h); asi->insn_handler = actions[action].handler; set_emulated_insn(insn, asi, thumb); return INSN_GOOD; } case DECODE_TYPE_OR: matched = true; break; case DECODE_TYPE_REJECT: default: return INSN_REJECTED; } } }
linux-master
arch/arm/probes/decode.c
// SPDX-License-Identifier: GPL-2.0-only /* * arch/arm/probes/decode-thumb.c * * Copyright (C) 2011 Jon Medhurst <[email protected]>. */ #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/module.h> #include "decode.h" #include "decode-thumb.h" static const union decode_item t32_table_1110_100x_x0xx[] = { /* Load/store multiple instructions */ /* Rn is PC 1110 100x x0xx 1111 xxxx xxxx xxxx xxxx */ DECODE_REJECT (0xfe4f0000, 0xe80f0000), /* SRS 1110 1000 00x0 xxxx xxxx xxxx xxxx xxxx */ /* RFE 1110 1000 00x1 xxxx xxxx xxxx xxxx xxxx */ DECODE_REJECT (0xffc00000, 0xe8000000), /* SRS 1110 1001 10x0 xxxx xxxx xxxx xxxx xxxx */ /* RFE 1110 1001 10x1 xxxx xxxx xxxx xxxx xxxx */ DECODE_REJECT (0xffc00000, 0xe9800000), /* STM Rn, {...pc} 1110 100x x0x0 xxxx 1xxx xxxx xxxx xxxx */ DECODE_REJECT (0xfe508000, 0xe8008000), /* LDM Rn, {...lr,pc} 1110 100x x0x1 xxxx 11xx xxxx xxxx xxxx */ DECODE_REJECT (0xfe50c000, 0xe810c000), /* LDM/STM Rn, {...sp} 1110 100x x0xx xxxx xx1x xxxx xxxx xxxx */ DECODE_REJECT (0xfe402000, 0xe8002000), /* STMIA 1110 1000 10x0 xxxx xxxx xxxx xxxx xxxx */ /* LDMIA 1110 1000 10x1 xxxx xxxx xxxx xxxx xxxx */ /* STMDB 1110 1001 00x0 xxxx xxxx xxxx xxxx xxxx */ /* LDMDB 1110 1001 00x1 xxxx xxxx xxxx xxxx xxxx */ DECODE_CUSTOM (0xfe400000, 0xe8000000, PROBES_T32_LDMSTM), DECODE_END }; static const union decode_item t32_table_1110_100x_x1xx[] = { /* Load/store dual, load/store exclusive, table branch */ /* STRD (immediate) 1110 1000 x110 xxxx xxxx xxxx xxxx xxxx */ /* LDRD (immediate) 1110 1000 x111 xxxx xxxx xxxx xxxx xxxx */ DECODE_OR (0xff600000, 0xe8600000), /* STRD (immediate) 1110 1001 x1x0 xxxx xxxx xxxx xxxx xxxx */ /* LDRD (immediate) 1110 1001 x1x1 xxxx xxxx xxxx xxxx xxxx */ DECODE_EMULATEX (0xff400000, 0xe9400000, PROBES_T32_LDRDSTRD, REGS(NOPCWB, NOSPPC, NOSPPC, 0, 0)), /* TBB 1110 1000 1101 xxxx xxxx xxxx 0000 xxxx */ /* TBH 1110 1000 1101 xxxx xxxx xxxx 0001 xxxx */ DECODE_SIMULATEX(0xfff000e0, 0xe8d00000, PROBES_T32_TABLE_BRANCH, REGS(NOSP, 0, 0, 0, NOSPPC)), /* STREX 1110 1000 0100 xxxx xxxx xxxx xxxx xxxx */ /* LDREX 1110 1000 0101 xxxx xxxx xxxx xxxx xxxx */ /* STREXB 1110 1000 1100 xxxx xxxx xxxx 0100 xxxx */ /* STREXH 1110 1000 1100 xxxx xxxx xxxx 0101 xxxx */ /* STREXD 1110 1000 1100 xxxx xxxx xxxx 0111 xxxx */ /* LDREXB 1110 1000 1101 xxxx xxxx xxxx 0100 xxxx */ /* LDREXH 1110 1000 1101 xxxx xxxx xxxx 0101 xxxx */ /* LDREXD 1110 1000 1101 xxxx xxxx xxxx 0111 xxxx */ /* And unallocated instructions... */ DECODE_END }; static const union decode_item t32_table_1110_101x[] = { /* Data-processing (shifted register) */ /* TST 1110 1010 0001 xxxx xxxx 1111 xxxx xxxx */ /* TEQ 1110 1010 1001 xxxx xxxx 1111 xxxx xxxx */ DECODE_EMULATEX (0xff700f00, 0xea100f00, PROBES_T32_TST, REGS(NOSPPC, 0, 0, 0, NOSPPC)), /* CMN 1110 1011 0001 xxxx xxxx 1111 xxxx xxxx */ DECODE_OR (0xfff00f00, 0xeb100f00), /* CMP 1110 1011 1011 xxxx xxxx 1111 xxxx xxxx */ DECODE_EMULATEX (0xfff00f00, 0xebb00f00, PROBES_T32_TST, REGS(NOPC, 0, 0, 0, NOSPPC)), /* MOV 1110 1010 010x 1111 xxxx xxxx xxxx xxxx */ /* MVN 1110 1010 011x 1111 xxxx xxxx xxxx xxxx */ DECODE_EMULATEX (0xffcf0000, 0xea4f0000, PROBES_T32_MOV, REGS(0, 0, NOSPPC, 0, NOSPPC)), /* ??? 1110 1010 101x xxxx xxxx xxxx xxxx xxxx */ /* ??? 1110 1010 111x xxxx xxxx xxxx xxxx xxxx */ DECODE_REJECT (0xffa00000, 0xeaa00000), /* ??? 1110 1011 001x xxxx xxxx xxxx xxxx xxxx */ DECODE_REJECT (0xffe00000, 0xeb200000), /* ??? 1110 1011 100x xxxx xxxx xxxx xxxx xxxx */ DECODE_REJECT (0xffe00000, 0xeb800000), /* ??? 1110 1011 111x xxxx xxxx xxxx xxxx xxxx */ DECODE_REJECT (0xffe00000, 0xebe00000), /* ADD/SUB SP, SP, Rm, LSL #0..3 */ /* 1110 1011 x0xx 1101 x000 1101 xx00 xxxx */ DECODE_EMULATEX (0xff4f7f30, 0xeb0d0d00, PROBES_T32_ADDSUB, REGS(SP, 0, SP, 0, NOSPPC)), /* ADD/SUB SP, SP, Rm, shift */ /* 1110 1011 x0xx 1101 xxxx 1101 xxxx xxxx */ DECODE_REJECT (0xff4f0f00, 0xeb0d0d00), /* ADD/SUB Rd, SP, Rm, shift */ /* 1110 1011 x0xx 1101 xxxx xxxx xxxx xxxx */ DECODE_EMULATEX (0xff4f0000, 0xeb0d0000, PROBES_T32_ADDSUB, REGS(SP, 0, NOPC, 0, NOSPPC)), /* AND 1110 1010 000x xxxx xxxx xxxx xxxx xxxx */ /* BIC 1110 1010 001x xxxx xxxx xxxx xxxx xxxx */ /* ORR 1110 1010 010x xxxx xxxx xxxx xxxx xxxx */ /* ORN 1110 1010 011x xxxx xxxx xxxx xxxx xxxx */ /* EOR 1110 1010 100x xxxx xxxx xxxx xxxx xxxx */ /* PKH 1110 1010 110x xxxx xxxx xxxx xxxx xxxx */ /* ADD 1110 1011 000x xxxx xxxx xxxx xxxx xxxx */ /* ADC 1110 1011 010x xxxx xxxx xxxx xxxx xxxx */ /* SBC 1110 1011 011x xxxx xxxx xxxx xxxx xxxx */ /* SUB 1110 1011 101x xxxx xxxx xxxx xxxx xxxx */ /* RSB 1110 1011 110x xxxx xxxx xxxx xxxx xxxx */ DECODE_EMULATEX (0xfe000000, 0xea000000, PROBES_T32_LOGICAL, REGS(NOSPPC, 0, NOSPPC, 0, NOSPPC)), DECODE_END }; static const union decode_item t32_table_1111_0x0x___0[] = { /* Data-processing (modified immediate) */ /* TST 1111 0x00 0001 xxxx 0xxx 1111 xxxx xxxx */ /* TEQ 1111 0x00 1001 xxxx 0xxx 1111 xxxx xxxx */ DECODE_EMULATEX (0xfb708f00, 0xf0100f00, PROBES_T32_TST, REGS(NOSPPC, 0, 0, 0, 0)), /* CMN 1111 0x01 0001 xxxx 0xxx 1111 xxxx xxxx */ DECODE_OR (0xfbf08f00, 0xf1100f00), /* CMP 1111 0x01 1011 xxxx 0xxx 1111 xxxx xxxx */ DECODE_EMULATEX (0xfbf08f00, 0xf1b00f00, PROBES_T32_CMP, REGS(NOPC, 0, 0, 0, 0)), /* MOV 1111 0x00 010x 1111 0xxx xxxx xxxx xxxx */ /* MVN 1111 0x00 011x 1111 0xxx xxxx xxxx xxxx */ DECODE_EMULATEX (0xfbcf8000, 0xf04f0000, PROBES_T32_MOV, REGS(0, 0, NOSPPC, 0, 0)), /* ??? 1111 0x00 101x xxxx 0xxx xxxx xxxx xxxx */ DECODE_REJECT (0xfbe08000, 0xf0a00000), /* ??? 1111 0x00 110x xxxx 0xxx xxxx xxxx xxxx */ /* ??? 1111 0x00 111x xxxx 0xxx xxxx xxxx xxxx */ DECODE_REJECT (0xfbc08000, 0xf0c00000), /* ??? 1111 0x01 001x xxxx 0xxx xxxx xxxx xxxx */ DECODE_REJECT (0xfbe08000, 0xf1200000), /* ??? 1111 0x01 100x xxxx 0xxx xxxx xxxx xxxx */ DECODE_REJECT (0xfbe08000, 0xf1800000), /* ??? 1111 0x01 111x xxxx 0xxx xxxx xxxx xxxx */ DECODE_REJECT (0xfbe08000, 0xf1e00000), /* ADD Rd, SP, #imm 1111 0x01 000x 1101 0xxx xxxx xxxx xxxx */ /* SUB Rd, SP, #imm 1111 0x01 101x 1101 0xxx xxxx xxxx xxxx */ DECODE_EMULATEX (0xfb4f8000, 0xf10d0000, PROBES_T32_ADDSUB, REGS(SP, 0, NOPC, 0, 0)), /* AND 1111 0x00 000x xxxx 0xxx xxxx xxxx xxxx */ /* BIC 1111 0x00 001x xxxx 0xxx xxxx xxxx xxxx */ /* ORR 1111 0x00 010x xxxx 0xxx xxxx xxxx xxxx */ /* ORN 1111 0x00 011x xxxx 0xxx xxxx xxxx xxxx */ /* EOR 1111 0x00 100x xxxx 0xxx xxxx xxxx xxxx */ /* ADD 1111 0x01 000x xxxx 0xxx xxxx xxxx xxxx */ /* ADC 1111 0x01 010x xxxx 0xxx xxxx xxxx xxxx */ /* SBC 1111 0x01 011x xxxx 0xxx xxxx xxxx xxxx */ /* SUB 1111 0x01 101x xxxx 0xxx xxxx xxxx xxxx */ /* RSB 1111 0x01 110x xxxx 0xxx xxxx xxxx xxxx */ DECODE_EMULATEX (0xfa008000, 0xf0000000, PROBES_T32_LOGICAL, REGS(NOSPPC, 0, NOSPPC, 0, 0)), DECODE_END }; static const union decode_item t32_table_1111_0x1x___0[] = { /* Data-processing (plain binary immediate) */ /* ADDW Rd, PC, #imm 1111 0x10 0000 1111 0xxx xxxx xxxx xxxx */ DECODE_OR (0xfbff8000, 0xf20f0000), /* SUBW Rd, PC, #imm 1111 0x10 1010 1111 0xxx xxxx xxxx xxxx */ DECODE_EMULATEX (0xfbff8000, 0xf2af0000, PROBES_T32_ADDWSUBW_PC, REGS(PC, 0, NOSPPC, 0, 0)), /* ADDW SP, SP, #imm 1111 0x10 0000 1101 0xxx 1101 xxxx xxxx */ DECODE_OR (0xfbff8f00, 0xf20d0d00), /* SUBW SP, SP, #imm 1111 0x10 1010 1101 0xxx 1101 xxxx xxxx */ DECODE_EMULATEX (0xfbff8f00, 0xf2ad0d00, PROBES_T32_ADDWSUBW, REGS(SP, 0, SP, 0, 0)), /* ADDW 1111 0x10 0000 xxxx 0xxx xxxx xxxx xxxx */ DECODE_OR (0xfbf08000, 0xf2000000), /* SUBW 1111 0x10 1010 xxxx 0xxx xxxx xxxx xxxx */ DECODE_EMULATEX (0xfbf08000, 0xf2a00000, PROBES_T32_ADDWSUBW, REGS(NOPCX, 0, NOSPPC, 0, 0)), /* MOVW 1111 0x10 0100 xxxx 0xxx xxxx xxxx xxxx */ /* MOVT 1111 0x10 1100 xxxx 0xxx xxxx xxxx xxxx */ DECODE_EMULATEX (0xfb708000, 0xf2400000, PROBES_T32_MOVW, REGS(0, 0, NOSPPC, 0, 0)), /* SSAT16 1111 0x11 0010 xxxx 0000 xxxx 00xx xxxx */ /* SSAT 1111 0x11 00x0 xxxx 0xxx xxxx xxxx xxxx */ /* USAT16 1111 0x11 1010 xxxx 0000 xxxx 00xx xxxx */ /* USAT 1111 0x11 10x0 xxxx 0xxx xxxx xxxx xxxx */ DECODE_EMULATEX (0xfb508000, 0xf3000000, PROBES_T32_SAT, REGS(NOSPPC, 0, NOSPPC, 0, 0)), /* SFBX 1111 0x11 0100 xxxx 0xxx xxxx xxxx xxxx */ /* UFBX 1111 0x11 1100 xxxx 0xxx xxxx xxxx xxxx */ DECODE_EMULATEX (0xfb708000, 0xf3400000, PROBES_T32_BITFIELD, REGS(NOSPPC, 0, NOSPPC, 0, 0)), /* BFC 1111 0x11 0110 1111 0xxx xxxx xxxx xxxx */ DECODE_EMULATEX (0xfbff8000, 0xf36f0000, PROBES_T32_BITFIELD, REGS(0, 0, NOSPPC, 0, 0)), /* BFI 1111 0x11 0110 xxxx 0xxx xxxx xxxx xxxx */ DECODE_EMULATEX (0xfbf08000, 0xf3600000, PROBES_T32_BITFIELD, REGS(NOSPPCX, 0, NOSPPC, 0, 0)), DECODE_END }; static const union decode_item t32_table_1111_0xxx___1[] = { /* Branches and miscellaneous control */ /* YIELD 1111 0011 1010 xxxx 10x0 x000 0000 0001 */ DECODE_OR (0xfff0d7ff, 0xf3a08001), /* SEV 1111 0011 1010 xxxx 10x0 x000 0000 0100 */ DECODE_EMULATE (0xfff0d7ff, 0xf3a08004, PROBES_T32_SEV), /* NOP 1111 0011 1010 xxxx 10x0 x000 0000 0000 */ /* WFE 1111 0011 1010 xxxx 10x0 x000 0000 0010 */ /* WFI 1111 0011 1010 xxxx 10x0 x000 0000 0011 */ DECODE_SIMULATE (0xfff0d7fc, 0xf3a08000, PROBES_T32_WFE), /* MRS Rd, CPSR 1111 0011 1110 xxxx 10x0 xxxx xxxx xxxx */ DECODE_SIMULATEX(0xfff0d000, 0xf3e08000, PROBES_T32_MRS, REGS(0, 0, NOSPPC, 0, 0)), /* * Unsupported instructions * 1111 0x11 1xxx xxxx 10x0 xxxx xxxx xxxx * * MSR 1111 0011 100x xxxx 10x0 xxxx xxxx xxxx * DBG hint 1111 0011 1010 xxxx 10x0 x000 1111 xxxx * Unallocated hints 1111 0011 1010 xxxx 10x0 x000 xxxx xxxx * CPS 1111 0011 1010 xxxx 10x0 xxxx xxxx xxxx * CLREX/DSB/DMB/ISB 1111 0011 1011 xxxx 10x0 xxxx xxxx xxxx * BXJ 1111 0011 1100 xxxx 10x0 xxxx xxxx xxxx * SUBS PC,LR,#<imm8> 1111 0011 1101 xxxx 10x0 xxxx xxxx xxxx * MRS Rd, SPSR 1111 0011 1111 xxxx 10x0 xxxx xxxx xxxx * SMC 1111 0111 1111 xxxx 1000 xxxx xxxx xxxx * UNDEFINED 1111 0111 1111 xxxx 1010 xxxx xxxx xxxx * ??? 1111 0111 1xxx xxxx 1010 xxxx xxxx xxxx */ DECODE_REJECT (0xfb80d000, 0xf3808000), /* Bcc 1111 0xxx xxxx xxxx 10x0 xxxx xxxx xxxx */ DECODE_CUSTOM (0xf800d000, 0xf0008000, PROBES_T32_BRANCH_COND), /* BLX 1111 0xxx xxxx xxxx 11x0 xxxx xxxx xxx0 */ DECODE_OR (0xf800d001, 0xf000c000), /* B 1111 0xxx xxxx xxxx 10x1 xxxx xxxx xxxx */ /* BL 1111 0xxx xxxx xxxx 11x1 xxxx xxxx xxxx */ DECODE_SIMULATE (0xf8009000, 0xf0009000, PROBES_T32_BRANCH), DECODE_END }; static const union decode_item t32_table_1111_100x_x0x1__1111[] = { /* Memory hints */ /* PLD (literal) 1111 1000 x001 1111 1111 xxxx xxxx xxxx */ /* PLI (literal) 1111 1001 x001 1111 1111 xxxx xxxx xxxx */ DECODE_SIMULATE (0xfe7ff000, 0xf81ff000, PROBES_T32_PLDI), /* PLD{W} (immediate) 1111 1000 10x1 xxxx 1111 xxxx xxxx xxxx */ DECODE_OR (0xffd0f000, 0xf890f000), /* PLD{W} (immediate) 1111 1000 00x1 xxxx 1111 1100 xxxx xxxx */ DECODE_OR (0xffd0ff00, 0xf810fc00), /* PLI (immediate) 1111 1001 1001 xxxx 1111 xxxx xxxx xxxx */ DECODE_OR (0xfff0f000, 0xf990f000), /* PLI (immediate) 1111 1001 0001 xxxx 1111 1100 xxxx xxxx */ DECODE_SIMULATEX(0xfff0ff00, 0xf910fc00, PROBES_T32_PLDI, REGS(NOPCX, 0, 0, 0, 0)), /* PLD{W} (register) 1111 1000 00x1 xxxx 1111 0000 00xx xxxx */ DECODE_OR (0xffd0ffc0, 0xf810f000), /* PLI (register) 1111 1001 0001 xxxx 1111 0000 00xx xxxx */ DECODE_SIMULATEX(0xfff0ffc0, 0xf910f000, PROBES_T32_PLDI, REGS(NOPCX, 0, 0, 0, NOSPPC)), /* Other unallocated instructions... */ DECODE_END }; static const union decode_item t32_table_1111_100x[] = { /* Store/Load single data item */ /* ??? 1111 100x x11x xxxx xxxx xxxx xxxx xxxx */ DECODE_REJECT (0xfe600000, 0xf8600000), /* ??? 1111 1001 0101 xxxx xxxx xxxx xxxx xxxx */ DECODE_REJECT (0xfff00000, 0xf9500000), /* ??? 1111 100x 0xxx xxxx xxxx 10x0 xxxx xxxx */ DECODE_REJECT (0xfe800d00, 0xf8000800), /* STRBT 1111 1000 0000 xxxx xxxx 1110 xxxx xxxx */ /* STRHT 1111 1000 0010 xxxx xxxx 1110 xxxx xxxx */ /* STRT 1111 1000 0100 xxxx xxxx 1110 xxxx xxxx */ /* LDRBT 1111 1000 0001 xxxx xxxx 1110 xxxx xxxx */ /* LDRSBT 1111 1001 0001 xxxx xxxx 1110 xxxx xxxx */ /* LDRHT 1111 1000 0011 xxxx xxxx 1110 xxxx xxxx */ /* LDRSHT 1111 1001 0011 xxxx xxxx 1110 xxxx xxxx */ /* LDRT 1111 1000 0101 xxxx xxxx 1110 xxxx xxxx */ DECODE_REJECT (0xfe800f00, 0xf8000e00), /* STR{,B,H} Rn,[PC...] 1111 1000 xxx0 1111 xxxx xxxx xxxx xxxx */ DECODE_REJECT (0xff1f0000, 0xf80f0000), /* STR{,B,H} PC,[Rn...] 1111 1000 xxx0 xxxx 1111 xxxx xxxx xxxx */ DECODE_REJECT (0xff10f000, 0xf800f000), /* LDR (literal) 1111 1000 x101 1111 xxxx xxxx xxxx xxxx */ DECODE_SIMULATEX(0xff7f0000, 0xf85f0000, PROBES_T32_LDR_LIT, REGS(PC, ANY, 0, 0, 0)), /* STR (immediate) 1111 1000 0100 xxxx xxxx 1xxx xxxx xxxx */ /* LDR (immediate) 1111 1000 0101 xxxx xxxx 1xxx xxxx xxxx */ DECODE_OR (0xffe00800, 0xf8400800), /* STR (immediate) 1111 1000 1100 xxxx xxxx xxxx xxxx xxxx */ /* LDR (immediate) 1111 1000 1101 xxxx xxxx xxxx xxxx xxxx */ DECODE_EMULATEX (0xffe00000, 0xf8c00000, PROBES_T32_LDRSTR, REGS(NOPCX, ANY, 0, 0, 0)), /* STR (register) 1111 1000 0100 xxxx xxxx 0000 00xx xxxx */ /* LDR (register) 1111 1000 0101 xxxx xxxx 0000 00xx xxxx */ DECODE_EMULATEX (0xffe00fc0, 0xf8400000, PROBES_T32_LDRSTR, REGS(NOPCX, ANY, 0, 0, NOSPPC)), /* LDRB (literal) 1111 1000 x001 1111 xxxx xxxx xxxx xxxx */ /* LDRSB (literal) 1111 1001 x001 1111 xxxx xxxx xxxx xxxx */ /* LDRH (literal) 1111 1000 x011 1111 xxxx xxxx xxxx xxxx */ /* LDRSH (literal) 1111 1001 x011 1111 xxxx xxxx xxxx xxxx */ DECODE_SIMULATEX(0xfe5f0000, 0xf81f0000, PROBES_T32_LDR_LIT, REGS(PC, NOSPPCX, 0, 0, 0)), /* STRB (immediate) 1111 1000 0000 xxxx xxxx 1xxx xxxx xxxx */ /* STRH (immediate) 1111 1000 0010 xxxx xxxx 1xxx xxxx xxxx */ /* LDRB (immediate) 1111 1000 0001 xxxx xxxx 1xxx xxxx xxxx */ /* LDRSB (immediate) 1111 1001 0001 xxxx xxxx 1xxx xxxx xxxx */ /* LDRH (immediate) 1111 1000 0011 xxxx xxxx 1xxx xxxx xxxx */ /* LDRSH (immediate) 1111 1001 0011 xxxx xxxx 1xxx xxxx xxxx */ DECODE_OR (0xfec00800, 0xf8000800), /* STRB (immediate) 1111 1000 1000 xxxx xxxx xxxx xxxx xxxx */ /* STRH (immediate) 1111 1000 1010 xxxx xxxx xxxx xxxx xxxx */ /* LDRB (immediate) 1111 1000 1001 xxxx xxxx xxxx xxxx xxxx */ /* LDRSB (immediate) 1111 1001 1001 xxxx xxxx xxxx xxxx xxxx */ /* LDRH (immediate) 1111 1000 1011 xxxx xxxx xxxx xxxx xxxx */ /* LDRSH (immediate) 1111 1001 1011 xxxx xxxx xxxx xxxx xxxx */ DECODE_EMULATEX (0xfec00000, 0xf8800000, PROBES_T32_LDRSTR, REGS(NOPCX, NOSPPCX, 0, 0, 0)), /* STRB (register) 1111 1000 0000 xxxx xxxx 0000 00xx xxxx */ /* STRH (register) 1111 1000 0010 xxxx xxxx 0000 00xx xxxx */ /* LDRB (register) 1111 1000 0001 xxxx xxxx 0000 00xx xxxx */ /* LDRSB (register) 1111 1001 0001 xxxx xxxx 0000 00xx xxxx */ /* LDRH (register) 1111 1000 0011 xxxx xxxx 0000 00xx xxxx */ /* LDRSH (register) 1111 1001 0011 xxxx xxxx 0000 00xx xxxx */ DECODE_EMULATEX (0xfe800fc0, 0xf8000000, PROBES_T32_LDRSTR, REGS(NOPCX, NOSPPCX, 0, 0, NOSPPC)), /* Other unallocated instructions... */ DECODE_END }; static const union decode_item t32_table_1111_1010___1111[] = { /* Data-processing (register) */ /* ??? 1111 1010 011x xxxx 1111 xxxx 1xxx xxxx */ DECODE_REJECT (0xffe0f080, 0xfa60f080), /* SXTH 1111 1010 0000 1111 1111 xxxx 1xxx xxxx */ /* UXTH 1111 1010 0001 1111 1111 xxxx 1xxx xxxx */ /* SXTB16 1111 1010 0010 1111 1111 xxxx 1xxx xxxx */ /* UXTB16 1111 1010 0011 1111 1111 xxxx 1xxx xxxx */ /* SXTB 1111 1010 0100 1111 1111 xxxx 1xxx xxxx */ /* UXTB 1111 1010 0101 1111 1111 xxxx 1xxx xxxx */ DECODE_EMULATEX (0xff8ff080, 0xfa0ff080, PROBES_T32_SIGN_EXTEND, REGS(0, 0, NOSPPC, 0, NOSPPC)), /* ??? 1111 1010 1xxx xxxx 1111 xxxx 0x11 xxxx */ DECODE_REJECT (0xff80f0b0, 0xfa80f030), /* ??? 1111 1010 1x11 xxxx 1111 xxxx 0xxx xxxx */ DECODE_REJECT (0xffb0f080, 0xfab0f000), /* SADD16 1111 1010 1001 xxxx 1111 xxxx 0000 xxxx */ /* SASX 1111 1010 1010 xxxx 1111 xxxx 0000 xxxx */ /* SSAX 1111 1010 1110 xxxx 1111 xxxx 0000 xxxx */ /* SSUB16 1111 1010 1101 xxxx 1111 xxxx 0000 xxxx */ /* SADD8 1111 1010 1000 xxxx 1111 xxxx 0000 xxxx */ /* SSUB8 1111 1010 1100 xxxx 1111 xxxx 0000 xxxx */ /* QADD16 1111 1010 1001 xxxx 1111 xxxx 0001 xxxx */ /* QASX 1111 1010 1010 xxxx 1111 xxxx 0001 xxxx */ /* QSAX 1111 1010 1110 xxxx 1111 xxxx 0001 xxxx */ /* QSUB16 1111 1010 1101 xxxx 1111 xxxx 0001 xxxx */ /* QADD8 1111 1010 1000 xxxx 1111 xxxx 0001 xxxx */ /* QSUB8 1111 1010 1100 xxxx 1111 xxxx 0001 xxxx */ /* SHADD16 1111 1010 1001 xxxx 1111 xxxx 0010 xxxx */ /* SHASX 1111 1010 1010 xxxx 1111 xxxx 0010 xxxx */ /* SHSAX 1111 1010 1110 xxxx 1111 xxxx 0010 xxxx */ /* SHSUB16 1111 1010 1101 xxxx 1111 xxxx 0010 xxxx */ /* SHADD8 1111 1010 1000 xxxx 1111 xxxx 0010 xxxx */ /* SHSUB8 1111 1010 1100 xxxx 1111 xxxx 0010 xxxx */ /* UADD16 1111 1010 1001 xxxx 1111 xxxx 0100 xxxx */ /* UASX 1111 1010 1010 xxxx 1111 xxxx 0100 xxxx */ /* USAX 1111 1010 1110 xxxx 1111 xxxx 0100 xxxx */ /* USUB16 1111 1010 1101 xxxx 1111 xxxx 0100 xxxx */ /* UADD8 1111 1010 1000 xxxx 1111 xxxx 0100 xxxx */ /* USUB8 1111 1010 1100 xxxx 1111 xxxx 0100 xxxx */ /* UQADD16 1111 1010 1001 xxxx 1111 xxxx 0101 xxxx */ /* UQASX 1111 1010 1010 xxxx 1111 xxxx 0101 xxxx */ /* UQSAX 1111 1010 1110 xxxx 1111 xxxx 0101 xxxx */ /* UQSUB16 1111 1010 1101 xxxx 1111 xxxx 0101 xxxx */ /* UQADD8 1111 1010 1000 xxxx 1111 xxxx 0101 xxxx */ /* UQSUB8 1111 1010 1100 xxxx 1111 xxxx 0101 xxxx */ /* UHADD16 1111 1010 1001 xxxx 1111 xxxx 0110 xxxx */ /* UHASX 1111 1010 1010 xxxx 1111 xxxx 0110 xxxx */ /* UHSAX 1111 1010 1110 xxxx 1111 xxxx 0110 xxxx */ /* UHSUB16 1111 1010 1101 xxxx 1111 xxxx 0110 xxxx */ /* UHADD8 1111 1010 1000 xxxx 1111 xxxx 0110 xxxx */ /* UHSUB8 1111 1010 1100 xxxx 1111 xxxx 0110 xxxx */ DECODE_OR (0xff80f080, 0xfa80f000), /* SXTAH 1111 1010 0000 xxxx 1111 xxxx 1xxx xxxx */ /* UXTAH 1111 1010 0001 xxxx 1111 xxxx 1xxx xxxx */ /* SXTAB16 1111 1010 0010 xxxx 1111 xxxx 1xxx xxxx */ /* UXTAB16 1111 1010 0011 xxxx 1111 xxxx 1xxx xxxx */ /* SXTAB 1111 1010 0100 xxxx 1111 xxxx 1xxx xxxx */ /* UXTAB 1111 1010 0101 xxxx 1111 xxxx 1xxx xxxx */ DECODE_OR (0xff80f080, 0xfa00f080), /* QADD 1111 1010 1000 xxxx 1111 xxxx 1000 xxxx */ /* QDADD 1111 1010 1000 xxxx 1111 xxxx 1001 xxxx */ /* QSUB 1111 1010 1000 xxxx 1111 xxxx 1010 xxxx */ /* QDSUB 1111 1010 1000 xxxx 1111 xxxx 1011 xxxx */ DECODE_OR (0xfff0f0c0, 0xfa80f080), /* SEL 1111 1010 1010 xxxx 1111 xxxx 1000 xxxx */ DECODE_OR (0xfff0f0f0, 0xfaa0f080), /* LSL 1111 1010 000x xxxx 1111 xxxx 0000 xxxx */ /* LSR 1111 1010 001x xxxx 1111 xxxx 0000 xxxx */ /* ASR 1111 1010 010x xxxx 1111 xxxx 0000 xxxx */ /* ROR 1111 1010 011x xxxx 1111 xxxx 0000 xxxx */ DECODE_EMULATEX (0xff80f0f0, 0xfa00f000, PROBES_T32_MEDIA, REGS(NOSPPC, 0, NOSPPC, 0, NOSPPC)), /* CLZ 1111 1010 1010 xxxx 1111 xxxx 1000 xxxx */ DECODE_OR (0xfff0f0f0, 0xfab0f080), /* REV 1111 1010 1001 xxxx 1111 xxxx 1000 xxxx */ /* REV16 1111 1010 1001 xxxx 1111 xxxx 1001 xxxx */ /* RBIT 1111 1010 1001 xxxx 1111 xxxx 1010 xxxx */ /* REVSH 1111 1010 1001 xxxx 1111 xxxx 1011 xxxx */ DECODE_EMULATEX (0xfff0f0c0, 0xfa90f080, PROBES_T32_REVERSE, REGS(NOSPPC, 0, NOSPPC, 0, SAMEAS16)), /* Other unallocated instructions... */ DECODE_END }; static const union decode_item t32_table_1111_1011_0[] = { /* Multiply, multiply accumulate, and absolute difference */ /* ??? 1111 1011 0000 xxxx 1111 xxxx 0001 xxxx */ DECODE_REJECT (0xfff0f0f0, 0xfb00f010), /* ??? 1111 1011 0111 xxxx 1111 xxxx 0001 xxxx */ DECODE_REJECT (0xfff0f0f0, 0xfb70f010), /* SMULxy 1111 1011 0001 xxxx 1111 xxxx 00xx xxxx */ DECODE_OR (0xfff0f0c0, 0xfb10f000), /* MUL 1111 1011 0000 xxxx 1111 xxxx 0000 xxxx */ /* SMUAD{X} 1111 1011 0010 xxxx 1111 xxxx 000x xxxx */ /* SMULWy 1111 1011 0011 xxxx 1111 xxxx 000x xxxx */ /* SMUSD{X} 1111 1011 0100 xxxx 1111 xxxx 000x xxxx */ /* SMMUL{R} 1111 1011 0101 xxxx 1111 xxxx 000x xxxx */ /* USAD8 1111 1011 0111 xxxx 1111 xxxx 0000 xxxx */ DECODE_EMULATEX (0xff80f0e0, 0xfb00f000, PROBES_T32_MUL_ADD, REGS(NOSPPC, 0, NOSPPC, 0, NOSPPC)), /* ??? 1111 1011 0111 xxxx xxxx xxxx 0001 xxxx */ DECODE_REJECT (0xfff000f0, 0xfb700010), /* SMLAxy 1111 1011 0001 xxxx xxxx xxxx 00xx xxxx */ DECODE_OR (0xfff000c0, 0xfb100000), /* MLA 1111 1011 0000 xxxx xxxx xxxx 0000 xxxx */ /* MLS 1111 1011 0000 xxxx xxxx xxxx 0001 xxxx */ /* SMLAD{X} 1111 1011 0010 xxxx xxxx xxxx 000x xxxx */ /* SMLAWy 1111 1011 0011 xxxx xxxx xxxx 000x xxxx */ /* SMLSD{X} 1111 1011 0100 xxxx xxxx xxxx 000x xxxx */ /* SMMLA{R} 1111 1011 0101 xxxx xxxx xxxx 000x xxxx */ /* SMMLS{R} 1111 1011 0110 xxxx xxxx xxxx 000x xxxx */ /* USADA8 1111 1011 0111 xxxx xxxx xxxx 0000 xxxx */ DECODE_EMULATEX (0xff8000c0, 0xfb000000, PROBES_T32_MUL_ADD2, REGS(NOSPPC, NOSPPCX, NOSPPC, 0, NOSPPC)), /* Other unallocated instructions... */ DECODE_END }; static const union decode_item t32_table_1111_1011_1[] = { /* Long multiply, long multiply accumulate, and divide */ /* UMAAL 1111 1011 1110 xxxx xxxx xxxx 0110 xxxx */ DECODE_OR (0xfff000f0, 0xfbe00060), /* SMLALxy 1111 1011 1100 xxxx xxxx xxxx 10xx xxxx */ DECODE_OR (0xfff000c0, 0xfbc00080), /* SMLALD{X} 1111 1011 1100 xxxx xxxx xxxx 110x xxxx */ /* SMLSLD{X} 1111 1011 1101 xxxx xxxx xxxx 110x xxxx */ DECODE_OR (0xffe000e0, 0xfbc000c0), /* SMULL 1111 1011 1000 xxxx xxxx xxxx 0000 xxxx */ /* UMULL 1111 1011 1010 xxxx xxxx xxxx 0000 xxxx */ /* SMLAL 1111 1011 1100 xxxx xxxx xxxx 0000 xxxx */ /* UMLAL 1111 1011 1110 xxxx xxxx xxxx 0000 xxxx */ DECODE_EMULATEX (0xff9000f0, 0xfb800000, PROBES_T32_MUL_ADD_LONG, REGS(NOSPPC, NOSPPC, NOSPPC, 0, NOSPPC)), /* SDIV 1111 1011 1001 xxxx xxxx xxxx 1111 xxxx */ /* UDIV 1111 1011 1011 xxxx xxxx xxxx 1111 xxxx */ /* Other unallocated instructions... */ DECODE_END }; const union decode_item probes_decode_thumb32_table[] = { /* * Load/store multiple instructions * 1110 100x x0xx xxxx xxxx xxxx xxxx xxxx */ DECODE_TABLE (0xfe400000, 0xe8000000, t32_table_1110_100x_x0xx), /* * Load/store dual, load/store exclusive, table branch * 1110 100x x1xx xxxx xxxx xxxx xxxx xxxx */ DECODE_TABLE (0xfe400000, 0xe8400000, t32_table_1110_100x_x1xx), /* * Data-processing (shifted register) * 1110 101x xxxx xxxx xxxx xxxx xxxx xxxx */ DECODE_TABLE (0xfe000000, 0xea000000, t32_table_1110_101x), /* * Coprocessor instructions * 1110 11xx xxxx xxxx xxxx xxxx xxxx xxxx */ DECODE_REJECT (0xfc000000, 0xec000000), /* * Data-processing (modified immediate) * 1111 0x0x xxxx xxxx 0xxx xxxx xxxx xxxx */ DECODE_TABLE (0xfa008000, 0xf0000000, t32_table_1111_0x0x___0), /* * Data-processing (plain binary immediate) * 1111 0x1x xxxx xxxx 0xxx xxxx xxxx xxxx */ DECODE_TABLE (0xfa008000, 0xf2000000, t32_table_1111_0x1x___0), /* * Branches and miscellaneous control * 1111 0xxx xxxx xxxx 1xxx xxxx xxxx xxxx */ DECODE_TABLE (0xf8008000, 0xf0008000, t32_table_1111_0xxx___1), /* * Advanced SIMD element or structure load/store instructions * 1111 1001 xxx0 xxxx xxxx xxxx xxxx xxxx */ DECODE_REJECT (0xff100000, 0xf9000000), /* * Memory hints * 1111 100x x0x1 xxxx 1111 xxxx xxxx xxxx */ DECODE_TABLE (0xfe50f000, 0xf810f000, t32_table_1111_100x_x0x1__1111), /* * Store single data item * 1111 1000 xxx0 xxxx xxxx xxxx xxxx xxxx * Load single data items * 1111 100x xxx1 xxxx xxxx xxxx xxxx xxxx */ DECODE_TABLE (0xfe000000, 0xf8000000, t32_table_1111_100x), /* * Data-processing (register) * 1111 1010 xxxx xxxx 1111 xxxx xxxx xxxx */ DECODE_TABLE (0xff00f000, 0xfa00f000, t32_table_1111_1010___1111), /* * Multiply, multiply accumulate, and absolute difference * 1111 1011 0xxx xxxx xxxx xxxx xxxx xxxx */ DECODE_TABLE (0xff800000, 0xfb000000, t32_table_1111_1011_0), /* * Long multiply, long multiply accumulate, and divide * 1111 1011 1xxx xxxx xxxx xxxx xxxx xxxx */ DECODE_TABLE (0xff800000, 0xfb800000, t32_table_1111_1011_1), /* * Coprocessor instructions * 1111 11xx xxxx xxxx xxxx xxxx xxxx xxxx */ DECODE_END }; #ifdef CONFIG_ARM_KPROBES_TEST_MODULE EXPORT_SYMBOL_GPL(probes_decode_thumb32_table); #endif static const union decode_item t16_table_1011[] = { /* Miscellaneous 16-bit instructions */ /* ADD (SP plus immediate) 1011 0000 0xxx xxxx */ /* SUB (SP minus immediate) 1011 0000 1xxx xxxx */ DECODE_SIMULATE (0xff00, 0xb000, PROBES_T16_ADD_SP), /* CBZ 1011 00x1 xxxx xxxx */ /* CBNZ 1011 10x1 xxxx xxxx */ DECODE_SIMULATE (0xf500, 0xb100, PROBES_T16_CBZ), /* SXTH 1011 0010 00xx xxxx */ /* SXTB 1011 0010 01xx xxxx */ /* UXTH 1011 0010 10xx xxxx */ /* UXTB 1011 0010 11xx xxxx */ /* REV 1011 1010 00xx xxxx */ /* REV16 1011 1010 01xx xxxx */ /* ??? 1011 1010 10xx xxxx */ /* REVSH 1011 1010 11xx xxxx */ DECODE_REJECT (0xffc0, 0xba80), DECODE_EMULATE (0xf500, 0xb000, PROBES_T16_SIGN_EXTEND), /* PUSH 1011 010x xxxx xxxx */ DECODE_CUSTOM (0xfe00, 0xb400, PROBES_T16_PUSH), /* POP 1011 110x xxxx xxxx */ DECODE_CUSTOM (0xfe00, 0xbc00, PROBES_T16_POP), /* * If-Then, and hints * 1011 1111 xxxx xxxx */ /* YIELD 1011 1111 0001 0000 */ DECODE_OR (0xffff, 0xbf10), /* SEV 1011 1111 0100 0000 */ DECODE_EMULATE (0xffff, 0xbf40, PROBES_T16_SEV), /* NOP 1011 1111 0000 0000 */ /* WFE 1011 1111 0010 0000 */ /* WFI 1011 1111 0011 0000 */ DECODE_SIMULATE (0xffcf, 0xbf00, PROBES_T16_WFE), /* Unassigned hints 1011 1111 xxxx 0000 */ DECODE_REJECT (0xff0f, 0xbf00), /* IT 1011 1111 xxxx xxxx */ DECODE_CUSTOM (0xff00, 0xbf00, PROBES_T16_IT), /* SETEND 1011 0110 010x xxxx */ /* CPS 1011 0110 011x xxxx */ /* BKPT 1011 1110 xxxx xxxx */ /* And unallocated instructions... */ DECODE_END }; const union decode_item probes_decode_thumb16_table[] = { /* * Shift (immediate), add, subtract, move, and compare * 00xx xxxx xxxx xxxx */ /* CMP (immediate) 0010 1xxx xxxx xxxx */ DECODE_EMULATE (0xf800, 0x2800, PROBES_T16_CMP), /* ADD (register) 0001 100x xxxx xxxx */ /* SUB (register) 0001 101x xxxx xxxx */ /* LSL (immediate) 0000 0xxx xxxx xxxx */ /* LSR (immediate) 0000 1xxx xxxx xxxx */ /* ASR (immediate) 0001 0xxx xxxx xxxx */ /* ADD (immediate, Thumb) 0001 110x xxxx xxxx */ /* SUB (immediate, Thumb) 0001 111x xxxx xxxx */ /* MOV (immediate) 0010 0xxx xxxx xxxx */ /* ADD (immediate, Thumb) 0011 0xxx xxxx xxxx */ /* SUB (immediate, Thumb) 0011 1xxx xxxx xxxx */ DECODE_EMULATE (0xc000, 0x0000, PROBES_T16_ADDSUB), /* * 16-bit Thumb data-processing instructions * 0100 00xx xxxx xxxx */ /* TST (register) 0100 0010 00xx xxxx */ DECODE_EMULATE (0xffc0, 0x4200, PROBES_T16_CMP), /* CMP (register) 0100 0010 10xx xxxx */ /* CMN (register) 0100 0010 11xx xxxx */ DECODE_EMULATE (0xff80, 0x4280, PROBES_T16_CMP), /* AND (register) 0100 0000 00xx xxxx */ /* EOR (register) 0100 0000 01xx xxxx */ /* LSL (register) 0100 0000 10xx xxxx */ /* LSR (register) 0100 0000 11xx xxxx */ /* ASR (register) 0100 0001 00xx xxxx */ /* ADC (register) 0100 0001 01xx xxxx */ /* SBC (register) 0100 0001 10xx xxxx */ /* ROR (register) 0100 0001 11xx xxxx */ /* RSB (immediate) 0100 0010 01xx xxxx */ /* ORR (register) 0100 0011 00xx xxxx */ /* MUL 0100 0011 00xx xxxx */ /* BIC (register) 0100 0011 10xx xxxx */ /* MVN (register) 0100 0011 10xx xxxx */ DECODE_EMULATE (0xfc00, 0x4000, PROBES_T16_LOGICAL), /* * Special data instructions and branch and exchange * 0100 01xx xxxx xxxx */ /* BLX pc 0100 0111 1111 1xxx */ DECODE_REJECT (0xfff8, 0x47f8), /* BX (register) 0100 0111 0xxx xxxx */ /* BLX (register) 0100 0111 1xxx xxxx */ DECODE_SIMULATE (0xff00, 0x4700, PROBES_T16_BLX), /* ADD pc, pc 0100 0100 1111 1111 */ DECODE_REJECT (0xffff, 0x44ff), /* ADD (register) 0100 0100 xxxx xxxx */ /* CMP (register) 0100 0101 xxxx xxxx */ /* MOV (register) 0100 0110 xxxx xxxx */ DECODE_CUSTOM (0xfc00, 0x4400, PROBES_T16_HIREGOPS), /* * Load from Literal Pool * LDR (literal) 0100 1xxx xxxx xxxx */ DECODE_SIMULATE (0xf800, 0x4800, PROBES_T16_LDR_LIT), /* * 16-bit Thumb Load/store instructions * 0101 xxxx xxxx xxxx * 011x xxxx xxxx xxxx * 100x xxxx xxxx xxxx */ /* STR (register) 0101 000x xxxx xxxx */ /* STRH (register) 0101 001x xxxx xxxx */ /* STRB (register) 0101 010x xxxx xxxx */ /* LDRSB (register) 0101 011x xxxx xxxx */ /* LDR (register) 0101 100x xxxx xxxx */ /* LDRH (register) 0101 101x xxxx xxxx */ /* LDRB (register) 0101 110x xxxx xxxx */ /* LDRSH (register) 0101 111x xxxx xxxx */ /* STR (immediate, Thumb) 0110 0xxx xxxx xxxx */ /* LDR (immediate, Thumb) 0110 1xxx xxxx xxxx */ /* STRB (immediate, Thumb) 0111 0xxx xxxx xxxx */ /* LDRB (immediate, Thumb) 0111 1xxx xxxx xxxx */ DECODE_EMULATE (0xc000, 0x4000, PROBES_T16_LDRHSTRH), /* STRH (immediate, Thumb) 1000 0xxx xxxx xxxx */ /* LDRH (immediate, Thumb) 1000 1xxx xxxx xxxx */ DECODE_EMULATE (0xf000, 0x8000, PROBES_T16_LDRHSTRH), /* STR (immediate, Thumb) 1001 0xxx xxxx xxxx */ /* LDR (immediate, Thumb) 1001 1xxx xxxx xxxx */ DECODE_SIMULATE (0xf000, 0x9000, PROBES_T16_LDRSTR), /* * Generate PC-/SP-relative address * ADR (literal) 1010 0xxx xxxx xxxx * ADD (SP plus immediate) 1010 1xxx xxxx xxxx */ DECODE_SIMULATE (0xf000, 0xa000, PROBES_T16_ADR), /* * Miscellaneous 16-bit instructions * 1011 xxxx xxxx xxxx */ DECODE_TABLE (0xf000, 0xb000, t16_table_1011), /* STM 1100 0xxx xxxx xxxx */ /* LDM 1100 1xxx xxxx xxxx */ DECODE_EMULATE (0xf000, 0xc000, PROBES_T16_LDMSTM), /* * Conditional branch, and Supervisor Call */ /* Permanently UNDEFINED 1101 1110 xxxx xxxx */ /* SVC 1101 1111 xxxx xxxx */ DECODE_REJECT (0xfe00, 0xde00), /* Conditional branch 1101 xxxx xxxx xxxx */ DECODE_CUSTOM (0xf000, 0xd000, PROBES_T16_BRANCH_COND), /* * Unconditional branch * B 1110 0xxx xxxx xxxx */ DECODE_SIMULATE (0xf800, 0xe000, PROBES_T16_BRANCH), DECODE_END }; #ifdef CONFIG_ARM_KPROBES_TEST_MODULE EXPORT_SYMBOL_GPL(probes_decode_thumb16_table); #endif static unsigned long __kprobes thumb_check_cc(unsigned long cpsr) { if (unlikely(in_it_block(cpsr))) return probes_condition_checks[current_cond(cpsr)](cpsr); return true; } static void __kprobes thumb16_singlestep(probes_opcode_t opcode, struct arch_probes_insn *asi, struct pt_regs *regs) { regs->ARM_pc += 2; asi->insn_handler(opcode, asi, regs); regs->ARM_cpsr = it_advance(regs->ARM_cpsr); } static void __kprobes thumb32_singlestep(probes_opcode_t opcode, struct arch_probes_insn *asi, struct pt_regs *regs) { regs->ARM_pc += 4; asi->insn_handler(opcode, asi, regs); regs->ARM_cpsr = it_advance(regs->ARM_cpsr); } enum probes_insn __kprobes thumb16_probes_decode_insn(probes_opcode_t insn, struct arch_probes_insn *asi, bool emulate, const union decode_action *actions, const struct decode_checker *checkers[]) { asi->insn_singlestep = thumb16_singlestep; asi->insn_check_cc = thumb_check_cc; return probes_decode_insn(insn, asi, probes_decode_thumb16_table, true, emulate, actions, checkers); } enum probes_insn __kprobes thumb32_probes_decode_insn(probes_opcode_t insn, struct arch_probes_insn *asi, bool emulate, const union decode_action *actions, const struct decode_checker *checkers[]) { asi->insn_singlestep = thumb32_singlestep; asi->insn_check_cc = thumb_check_cc; return probes_decode_insn(insn, asi, probes_decode_thumb32_table, true, emulate, actions, checkers); }
linux-master
arch/arm/probes/decode-thumb.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2012 Rabin Vincent <rabin at rab.in> */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/stddef.h> #include <linux/wait.h> #include <linux/uprobes.h> #include <linux/module.h> #include "../decode.h" #include "../decode-arm.h" #include "core.h" static int uprobes_substitute_pc(unsigned long *pinsn, u32 oregs) { probes_opcode_t insn = __mem_to_opcode_arm(*pinsn); probes_opcode_t temp; probes_opcode_t mask; int freereg; u32 free = 0xffff; u32 regs; for (regs = oregs; regs; regs >>= 4, insn >>= 4) { if ((regs & 0xf) == REG_TYPE_NONE) continue; free &= ~(1 << (insn & 0xf)); } /* No PC, no problem */ if (free & (1 << 15)) return 15; if (!free) return -1; /* * fls instead of ffs ensures that for "ldrd r0, r1, [pc]" we would * pick LR instead of R1. */ freereg = free = fls(free) - 1; temp = __mem_to_opcode_arm(*pinsn); insn = temp; regs = oregs; mask = 0xf; for (; regs; regs >>= 4, mask <<= 4, free <<= 4, temp >>= 4) { if ((regs & 0xf) == REG_TYPE_NONE) continue; if ((temp & 0xf) != 15) continue; insn &= ~mask; insn |= free & mask; } *pinsn = __opcode_to_mem_arm(insn); return freereg; } static void uprobe_set_pc(struct arch_uprobe *auprobe, struct arch_uprobe_task *autask, struct pt_regs *regs) { u32 pcreg = auprobe->pcreg; autask->backup = regs->uregs[pcreg]; regs->uregs[pcreg] = regs->ARM_pc + 8; } static void uprobe_unset_pc(struct arch_uprobe *auprobe, struct arch_uprobe_task *autask, struct pt_regs *regs) { /* PC will be taken care of by common code */ regs->uregs[auprobe->pcreg] = autask->backup; } static void uprobe_aluwrite_pc(struct arch_uprobe *auprobe, struct arch_uprobe_task *autask, struct pt_regs *regs) { u32 pcreg = auprobe->pcreg; alu_write_pc(regs->uregs[pcreg], regs); regs->uregs[pcreg] = autask->backup; } static void uprobe_write_pc(struct arch_uprobe *auprobe, struct arch_uprobe_task *autask, struct pt_regs *regs) { u32 pcreg = auprobe->pcreg; load_write_pc(regs->uregs[pcreg], regs); regs->uregs[pcreg] = autask->backup; } enum probes_insn decode_pc_ro(probes_opcode_t insn, struct arch_probes_insn *asi, const struct decode_header *d) { struct arch_uprobe *auprobe = container_of(asi, struct arch_uprobe, asi); struct decode_emulate *decode = (struct decode_emulate *) d; u32 regs = decode->header.type_regs.bits >> DECODE_TYPE_BITS; int reg; reg = uprobes_substitute_pc(&auprobe->ixol[0], regs); if (reg == 15) return INSN_GOOD; if (reg == -1) return INSN_REJECTED; auprobe->pcreg = reg; auprobe->prehandler = uprobe_set_pc; auprobe->posthandler = uprobe_unset_pc; return INSN_GOOD; } enum probes_insn decode_wb_pc(probes_opcode_t insn, struct arch_probes_insn *asi, const struct decode_header *d, bool alu) { struct arch_uprobe *auprobe = container_of(asi, struct arch_uprobe, asi); enum probes_insn ret = decode_pc_ro(insn, asi, d); if (((insn >> 12) & 0xf) == 15) auprobe->posthandler = alu ? uprobe_aluwrite_pc : uprobe_write_pc; return ret; } enum probes_insn decode_rd12rn16rm0rs8_rwflags(probes_opcode_t insn, struct arch_probes_insn *asi, const struct decode_header *d) { return decode_wb_pc(insn, asi, d, true); } enum probes_insn decode_ldr(probes_opcode_t insn, struct arch_probes_insn *asi, const struct decode_header *d) { return decode_wb_pc(insn, asi, d, false); } enum probes_insn uprobe_decode_ldmstm(probes_opcode_t insn, struct arch_probes_insn *asi, const struct decode_header *d) { struct arch_uprobe *auprobe = container_of(asi, struct arch_uprobe, asi); unsigned reglist = insn & 0xffff; int rn = (insn >> 16) & 0xf; int lbit = insn & (1 << 20); unsigned used = reglist | (1 << rn); if (rn == 15) return INSN_REJECTED; if (!(used & (1 << 15))) return INSN_GOOD; if (used & (1 << 14)) return INSN_REJECTED; /* Use LR instead of PC */ insn ^= 0xc000; auprobe->pcreg = 14; auprobe->ixol[0] = __opcode_to_mem_arm(insn); auprobe->prehandler = uprobe_set_pc; if (lbit) auprobe->posthandler = uprobe_write_pc; else auprobe->posthandler = uprobe_unset_pc; return INSN_GOOD; } const union decode_action uprobes_probes_actions[] = { [PROBES_PRELOAD_IMM] = {.handler = probes_simulate_nop}, [PROBES_PRELOAD_REG] = {.handler = probes_simulate_nop}, [PROBES_BRANCH_IMM] = {.handler = simulate_blx1}, [PROBES_MRS] = {.handler = simulate_mrs}, [PROBES_BRANCH_REG] = {.handler = simulate_blx2bx}, [PROBES_CLZ] = {.handler = probes_simulate_nop}, [PROBES_SATURATING_ARITHMETIC] = {.handler = probes_simulate_nop}, [PROBES_MUL1] = {.handler = probes_simulate_nop}, [PROBES_MUL2] = {.handler = probes_simulate_nop}, [PROBES_SWP] = {.handler = probes_simulate_nop}, [PROBES_LDRSTRD] = {.decoder = decode_pc_ro}, [PROBES_LOAD_EXTRA] = {.decoder = decode_pc_ro}, [PROBES_LOAD] = {.decoder = decode_ldr}, [PROBES_STORE_EXTRA] = {.decoder = decode_pc_ro}, [PROBES_STORE] = {.decoder = decode_pc_ro}, [PROBES_MOV_IP_SP] = {.handler = simulate_mov_ipsp}, [PROBES_DATA_PROCESSING_REG] = { .decoder = decode_rd12rn16rm0rs8_rwflags}, [PROBES_DATA_PROCESSING_IMM] = { .decoder = decode_rd12rn16rm0rs8_rwflags}, [PROBES_MOV_HALFWORD] = {.handler = probes_simulate_nop}, [PROBES_SEV] = {.handler = probes_simulate_nop}, [PROBES_WFE] = {.handler = probes_simulate_nop}, [PROBES_SATURATE] = {.handler = probes_simulate_nop}, [PROBES_REV] = {.handler = probes_simulate_nop}, [PROBES_MMI] = {.handler = probes_simulate_nop}, [PROBES_PACK] = {.handler = probes_simulate_nop}, [PROBES_EXTEND] = {.handler = probes_simulate_nop}, [PROBES_EXTEND_ADD] = {.handler = probes_simulate_nop}, [PROBES_MUL_ADD_LONG] = {.handler = probes_simulate_nop}, [PROBES_MUL_ADD] = {.handler = probes_simulate_nop}, [PROBES_BITFIELD] = {.handler = probes_simulate_nop}, [PROBES_BRANCH] = {.handler = simulate_bbl}, [PROBES_LDMSTM] = {.decoder = uprobe_decode_ldmstm} };
linux-master
arch/arm/probes/uprobes/actions-arm.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2012 Rabin Vincent <rabin at rab.in> */ #include <linux/kernel.h> #include <linux/stddef.h> #include <linux/errno.h> #include <linux/highmem.h> #include <linux/sched.h> #include <linux/uprobes.h> #include <linux/notifier.h> #include <asm/opcodes.h> #include <asm/traps.h> #include "../decode.h" #include "../decode-arm.h" #include "core.h" #define UPROBE_TRAP_NR UINT_MAX bool is_swbp_insn(uprobe_opcode_t *insn) { return (__mem_to_opcode_arm(*insn) & 0x0fffffff) == (UPROBE_SWBP_ARM_INSN & 0x0fffffff); } int set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr) { return uprobe_write_opcode(auprobe, mm, vaddr, __opcode_to_mem_arm(auprobe->bpinsn)); } bool arch_uprobe_ignore(struct arch_uprobe *auprobe, struct pt_regs *regs) { if (!auprobe->asi.insn_check_cc(regs->ARM_cpsr)) { regs->ARM_pc += 4; return true; } return false; } bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs) { probes_opcode_t opcode; if (!auprobe->simulate) return false; opcode = __mem_to_opcode_arm(*(unsigned int *) auprobe->insn); auprobe->asi.insn_singlestep(opcode, &auprobe->asi, regs); return true; } unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs) { unsigned long orig_ret_vaddr; orig_ret_vaddr = regs->ARM_lr; /* Replace the return addr with trampoline addr */ regs->ARM_lr = trampoline_vaddr; return orig_ret_vaddr; } int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long addr) { unsigned int insn; unsigned int bpinsn; enum probes_insn ret; /* Thumb not yet support */ if (addr & 0x3) return -EINVAL; insn = __mem_to_opcode_arm(*(unsigned int *)auprobe->insn); auprobe->ixol[0] = __opcode_to_mem_arm(insn); auprobe->ixol[1] = __opcode_to_mem_arm(UPROBE_SS_ARM_INSN); ret = arm_probes_decode_insn(insn, &auprobe->asi, false, uprobes_probes_actions, NULL); switch (ret) { case INSN_REJECTED: return -EINVAL; case INSN_GOOD_NO_SLOT: auprobe->simulate = true; break; case INSN_GOOD: default: break; } bpinsn = UPROBE_SWBP_ARM_INSN & 0x0fffffff; if (insn >= 0xe0000000) bpinsn |= 0xe0000000; /* Unconditional instruction */ else bpinsn |= insn & 0xf0000000; /* Copy condition from insn */ auprobe->bpinsn = bpinsn; return 0; } void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, void *src, unsigned long len) { void *xol_page_kaddr = kmap_atomic(page); void *dst = xol_page_kaddr + (vaddr & ~PAGE_MASK); preempt_disable(); /* Initialize the slot */ memcpy(dst, src, len); /* flush caches (dcache/icache) */ flush_uprobe_xol_access(page, vaddr, dst, len); preempt_enable(); kunmap_atomic(xol_page_kaddr); } int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) { struct uprobe_task *utask = current->utask; if (auprobe->prehandler) auprobe->prehandler(auprobe, &utask->autask, regs); utask->autask.saved_trap_no = current->thread.trap_no; current->thread.trap_no = UPROBE_TRAP_NR; regs->ARM_pc = utask->xol_vaddr; return 0; } int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) { struct uprobe_task *utask = current->utask; WARN_ON_ONCE(current->thread.trap_no != UPROBE_TRAP_NR); current->thread.trap_no = utask->autask.saved_trap_no; regs->ARM_pc = utask->vaddr + 4; if (auprobe->posthandler) auprobe->posthandler(auprobe, &utask->autask, regs); return 0; } bool arch_uprobe_xol_was_trapped(struct task_struct *t) { if (t->thread.trap_no != UPROBE_TRAP_NR) return true; return false; } void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) { struct uprobe_task *utask = current->utask; current->thread.trap_no = utask->autask.saved_trap_no; instruction_pointer_set(regs, utask->vaddr); } int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data) { return NOTIFY_DONE; } static int uprobe_trap_handler(struct pt_regs *regs, unsigned int instr) { unsigned long flags; local_irq_save(flags); instr &= 0x0fffffff; if (instr == (UPROBE_SWBP_ARM_INSN & 0x0fffffff)) uprobe_pre_sstep_notifier(regs); else if (instr == (UPROBE_SS_ARM_INSN & 0x0fffffff)) uprobe_post_sstep_notifier(regs); local_irq_restore(flags); return 0; } unsigned long uprobe_get_swbp_addr(struct pt_regs *regs) { return instruction_pointer(regs); } static struct undef_hook uprobes_arm_break_hook = { .instr_mask = 0x0fffffff, .instr_val = (UPROBE_SWBP_ARM_INSN & 0x0fffffff), .cpsr_mask = (PSR_T_BIT | MODE_MASK), .cpsr_val = USR_MODE, .fn = uprobe_trap_handler, }; static struct undef_hook uprobes_arm_ss_hook = { .instr_mask = 0x0fffffff, .instr_val = (UPROBE_SS_ARM_INSN & 0x0fffffff), .cpsr_mask = (PSR_T_BIT | MODE_MASK), .cpsr_val = USR_MODE, .fn = uprobe_trap_handler, }; static int arch_uprobes_init(void) { register_undef_hook(&uprobes_arm_break_hook); register_undef_hook(&uprobes_arm_ss_hook); return 0; } device_initcall(arch_uprobes_init);
linux-master
arch/arm/probes/uprobes/core.c
// SPDX-License-Identifier: GPL-2.0-only /* * arch/arm/probes/kprobes/checkers-thumb.c * * Copyright (C) 2014 Huawei Inc. */ #include <linux/kernel.h> #include "../decode.h" #include "../decode-thumb.h" #include "checkers.h" static enum probes_insn __kprobes t32_check_stack(probes_opcode_t insn, struct arch_probes_insn *asi, const struct decode_header *h) { /* * PROBES_T32_LDMSTM, PROBES_T32_LDRDSTRD and PROBES_T32_LDRSTR * may get here. Simply mark all normal insns as STACK_USE_NONE. */ static const union decode_item table[] = { /* * First, filter out all ldr insns to make our life easier. * Following load insns may come here: * LDM, LDRD, LDR. * In T32 encoding, bit 20 is enough for distinguishing * load and store. All load insns have this bit set, when * all store insns have this bit clear. */ DECODE_CUSTOM (0x00100000, 0x00100000, STACK_USE_NONE), /* * Mark all 'STR{,B,H}, Rt, [Rn, Rm]' as STACK_USE_UNKNOWN * if Rn or Rm is SP. T32 doesn't encode STRD. */ /* xx | Rn | Rt | | Rm |*/ /* STR (register) 1111 1000 0100 xxxx xxxx 0000 00xx xxxx */ /* STRB (register) 1111 1000 0000 xxxx xxxx 0000 00xx xxxx */ /* STRH (register) 1111 1000 0010 xxxx xxxx 0000 00xx xxxx */ /* INVALID INSN 1111 1000 0110 xxxx xxxx 0000 00xx xxxx */ /* By Introducing INVALID INSN, bit 21 and 22 can be ignored. */ DECODE_OR (0xff9f0fc0, 0xf80d0000), DECODE_CUSTOM (0xff900fcf, 0xf800000d, STACK_USE_UNKNOWN), /* xx | Rn | Rt | PUW| imm8 |*/ /* STR (imm 8) 1111 1000 0100 1101 xxxx 110x xxxx xxxx */ /* STRB (imm 8) 1111 1000 0000 1101 xxxx 110x xxxx xxxx */ /* STRH (imm 8) 1111 1000 0010 1101 xxxx 110x xxxx xxxx */ /* INVALID INSN 1111 1000 0110 1101 xxxx 110x xxxx xxxx */ /* Only consider U == 0 and P == 1: strx rx, [sp, #-<imm>] */ DECODE_CUSTOM (0xff9f0e00, 0xf80d0c00, STACK_USE_FIXED_0XX), /* For STR{,B,H} (imm 12), offset is always positive, so ignore them. */ /* P U W | Rn | Rt | Rt2| imm8 |*/ /* STRD (immediate) 1110 1001 01x0 1101 xxxx xxxx xxxx xxxx */ /* * Only consider U == 0 and P == 1. * Also note that STRD in T32 encoding is special: * imm = ZeroExtend(imm8:'00', 32) */ DECODE_CUSTOM (0xffdf0000, 0xe94d0000, STACK_USE_T32STRD), /* | Rn | */ /* STMDB 1110 1001 00x0 1101 xxxx xxxx xxxx xxxx */ DECODE_CUSTOM (0xffdf0000, 0xe90d0000, STACK_USE_STMDX), /* fall through */ DECODE_CUSTOM (0, 0, STACK_USE_NONE), DECODE_END }; return probes_decode_insn(insn, asi, table, false, false, stack_check_actions, NULL); } const struct decode_checker t32_stack_checker[NUM_PROBES_T32_ACTIONS] = { [PROBES_T32_LDMSTM] = {.checker = t32_check_stack}, [PROBES_T32_LDRDSTRD] = {.checker = t32_check_stack}, [PROBES_T32_LDRSTR] = {.checker = t32_check_stack}, }; /* * See following comments. This insn must be 'push'. */ static enum probes_insn __kprobes t16_check_stack(probes_opcode_t insn, struct arch_probes_insn *asi, const struct decode_header *h) { unsigned int reglist = insn & 0x1ff; asi->stack_space = hweight32(reglist) * 4; return INSN_GOOD; } /* * T16 encoding is simple: only the 'push' insn can need extra stack space. * Other insns, like str, can only use r0-r7 as Rn. */ const struct decode_checker t16_stack_checker[NUM_PROBES_T16_ACTIONS] = { [PROBES_T16_PUSH] = {.checker = t16_check_stack}, };
linux-master
arch/arm/probes/kprobes/checkers-thumb.c
// SPDX-License-Identifier: GPL-2.0-only /* * arch/arm/probes/kprobes/actions-common.c * * Copyright (C) 2011 Jon Medhurst <[email protected]>. * * Some contents moved here from arch/arm/include/asm/kprobes-arm.c which is * Copyright (C) 2006, 2007 Motorola Inc. */ #include <linux/kernel.h> #include <linux/kprobes.h> #include <asm/opcodes.h> #include "core.h" static void __kprobes simulate_ldm1stm1(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { int rn = (insn >> 16) & 0xf; int lbit = insn & (1 << 20); int wbit = insn & (1 << 21); int ubit = insn & (1 << 23); int pbit = insn & (1 << 24); long *addr = (long *)regs->uregs[rn]; int reg_bit_vector; int reg_count; reg_count = 0; reg_bit_vector = insn & 0xffff; while (reg_bit_vector) { reg_bit_vector &= (reg_bit_vector - 1); ++reg_count; } if (!ubit) addr -= reg_count; addr += (!pbit == !ubit); reg_bit_vector = insn & 0xffff; while (reg_bit_vector) { int reg = __ffs(reg_bit_vector); reg_bit_vector &= (reg_bit_vector - 1); if (lbit) regs->uregs[reg] = *addr++; else *addr++ = regs->uregs[reg]; } if (wbit) { if (!ubit) addr -= reg_count; addr -= (!pbit == !ubit); regs->uregs[rn] = (long)addr; } } static void __kprobes simulate_stm1_pc(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { unsigned long addr = regs->ARM_pc - 4; regs->ARM_pc = (long)addr + str_pc_offset; simulate_ldm1stm1(insn, asi, regs); regs->ARM_pc = (long)addr + 4; } static void __kprobes simulate_ldm1_pc(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { simulate_ldm1stm1(insn, asi, regs); load_write_pc(regs->ARM_pc, regs); } static void __kprobes emulate_generic_r0_12_noflags(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { register void *rregs asm("r1") = regs; register void *rfn asm("lr") = asi->insn_fn; __asm__ __volatile__ ( ARM( "stmdb sp!, {%[regs], r11} \n\t" ) THUMB( "stmdb sp!, {%[regs], r7} \n\t" ) "ldmia %[regs], {r0-r12} \n\t" #if __LINUX_ARM_ARCH__ >= 6 "blx %[fn] \n\t" #else "str %[fn], [sp, #-4]! \n\t" "adr lr, 1f \n\t" "ldr pc, [sp], #4 \n\t" "1: \n\t" #endif "ldr lr, [sp], #4 \n\t" /* lr = regs */ "stmia lr, {r0-r12} \n\t" ARM( "ldr r11, [sp], #4 \n\t" ) THUMB( "ldr r7, [sp], #4 \n\t" ) : [regs] "=r" (rregs), [fn] "=r" (rfn) : "0" (rregs), "1" (rfn) : "r0", "r2", "r3", "r4", "r5", "r6", ARM("r7") THUMB("r11"), "r8", "r9", "r10", "r12", "memory", "cc" ); } static void __kprobes emulate_generic_r2_14_noflags(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { emulate_generic_r0_12_noflags(insn, asi, (struct pt_regs *)(regs->uregs+2)); } static void __kprobes emulate_ldm_r3_15(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { emulate_generic_r0_12_noflags(insn, asi, (struct pt_regs *)(regs->uregs+3)); load_write_pc(regs->ARM_pc, regs); } enum probes_insn __kprobes kprobe_decode_ldmstm(probes_opcode_t insn, struct arch_probes_insn *asi, const struct decode_header *h) { probes_insn_handler_t *handler = 0; unsigned reglist = insn & 0xffff; int is_ldm = insn & 0x100000; int rn = (insn >> 16) & 0xf; if (rn <= 12 && (reglist & 0xe000) == 0) { /* Instruction only uses registers in the range R0..R12 */ handler = emulate_generic_r0_12_noflags; } else if (rn >= 2 && (reglist & 0x8003) == 0) { /* Instruction only uses registers in the range R2..R14 */ rn -= 2; reglist >>= 2; handler = emulate_generic_r2_14_noflags; } else if (rn >= 3 && (reglist & 0x0007) == 0) { /* Instruction only uses registers in the range R3..R15 */ if (is_ldm && (reglist & 0x8000)) { rn -= 3; reglist >>= 3; handler = emulate_ldm_r3_15; } } if (handler) { /* We can emulate the instruction in (possibly) modified form */ asi->insn[0] = __opcode_to_mem_arm((insn & 0xfff00000) | (rn << 16) | reglist); asi->insn_handler = handler; return INSN_GOOD; } /* Fallback to slower simulation... */ if (reglist & 0x8000) handler = is_ldm ? simulate_ldm1_pc : simulate_stm1_pc; else handler = simulate_ldm1stm1; asi->insn_handler = handler; return INSN_GOOD_NO_SLOT; }
linux-master
arch/arm/probes/kprobes/actions-common.c
// SPDX-License-Identifier: GPL-2.0-only /* * arch/arm/kernel/kprobes-test.c * * Copyright (C) 2011 Jon Medhurst <[email protected]>. */ /* * This file contains test code for ARM kprobes. * * The top level function run_all_tests() executes tests for all of the * supported instruction sets: ARM, 16-bit Thumb, and 32-bit Thumb. These tests * fall into two categories; run_api_tests() checks basic functionality of the * kprobes API, and run_test_cases() is a comprehensive test for kprobes * instruction decoding and simulation. * * run_test_cases() first checks the kprobes decoding table for self consistency * (using table_test()) then executes a series of test cases for each of the CPU * instruction forms. coverage_start() and coverage_end() are used to verify * that these test cases cover all of the possible combinations of instructions * described by the kprobes decoding tables. * * The individual test cases are in kprobes-test-arm.c and kprobes-test-thumb.c * which use the macros defined in kprobes-test.h. The rest of this * documentation will describe the operation of the framework used by these * test cases. */ /* * TESTING METHODOLOGY * ------------------- * * The methodology used to test an ARM instruction 'test_insn' is to use * inline assembler like: * * test_before: nop * test_case: test_insn * test_after: nop * * When the test case is run a kprobe is placed of each nop. The * post-handler of the test_before probe is used to modify the saved CPU * register context to that which we require for the test case. The * pre-handler of the of the test_after probe saves a copy of the CPU * register context. In this way we can execute test_insn with a specific * register context and see the results afterwards. * * To actually test the kprobes instruction emulation we perform the above * step a second time but with an additional kprobe on the test_case * instruction itself. If the emulation is accurate then the results seen * by the test_after probe will be identical to the first run which didn't * have a probe on test_case. * * Each test case is run several times with a variety of variations in the * flags value of stored in CPSR, and for Thumb code, different ITState. * * For instructions which can modify PC, a second test_after probe is used * like this: * * test_before: nop * test_case: test_insn * test_after: nop * b test_done * test_after2: nop * test_done: * * The test case is constructed such that test_insn branches to * test_after2, or, if testing a conditional instruction, it may just * continue to test_after. The probes inserted at both locations let us * determine which happened. A similar approach is used for testing * backwards branches... * * b test_before * b test_done @ helps to cope with off by 1 branches * test_after2: nop * b test_done * test_before: nop * test_case: test_insn * test_after: nop * test_done: * * The macros used to generate the assembler instructions describe above * are TEST_INSTRUCTION, TEST_BRANCH_F (branch forwards) and TEST_BRANCH_B * (branch backwards). In these, the local variables numbered 1, 50, 2 and * 99 represent: test_before, test_case, test_after2 and test_done. * * FRAMEWORK * --------- * * Each test case is wrapped between the pair of macros TESTCASE_START and * TESTCASE_END. As well as performing the inline assembler boilerplate, * these call out to the kprobes_test_case_start() and * kprobes_test_case_end() functions which drive the execution of the test * case. The specific arguments to use for each test case are stored as * inline data constructed using the various TEST_ARG_* macros. Putting * this all together, a simple test case may look like: * * TESTCASE_START("Testing mov r0, r7") * TEST_ARG_REG(7, 0x12345678) // Set r7=0x12345678 * TEST_ARG_END("") * TEST_INSTRUCTION("mov r0, r7") * TESTCASE_END * * Note, in practice the single convenience macro TEST_R would be used for this * instead. * * The above would expand to assembler looking something like: * * @ TESTCASE_START * bl __kprobes_test_case_start * .pushsection .rodata * "10: * .ascii "mov r0, r7" @ text title for test case * .byte 0 * .popsection * @ start of inline data... * .word 10b @ pointer to title in .rodata section * * @ TEST_ARG_REG * .byte ARG_TYPE_REG * .byte 7 * .short 0 * .word 0x1234567 * * @ TEST_ARG_END * .byte ARG_TYPE_END * .byte TEST_ISA @ flags, including ISA being tested * .short 50f-0f @ offset of 'test_before' * .short 2f-0f @ offset of 'test_after2' (if relevent) * .short 99f-0f @ offset of 'test_done' * @ start of test case code... * 0: * .code TEST_ISA @ switch to ISA being tested * * @ TEST_INSTRUCTION * 50: nop @ location for 'test_before' probe * 1: mov r0, r7 @ the test case instruction 'test_insn' * nop @ location for 'test_after' probe * * // TESTCASE_END * 2: * 99: bl __kprobes_test_case_end_##TEST_ISA * .code NONMAL_ISA * * When the above is execute the following happens... * * __kprobes_test_case_start() is an assembler wrapper which sets up space * for a stack buffer and calls the C function kprobes_test_case_start(). * This C function will do some initial processing of the inline data and * setup some global state. It then inserts the test_before and test_after * kprobes and returns a value which causes the assembler wrapper to jump * to the start of the test case code, (local label '0'). * * When the test case code executes, the test_before probe will be hit and * test_before_post_handler will call setup_test_context(). This fills the * stack buffer and CPU registers with a test pattern and then processes * the test case arguments. In our example there is one TEST_ARG_REG which * indicates that R7 should be loaded with the value 0x12345678. * * When the test_before probe ends, the test case continues and executes * the "mov r0, r7" instruction. It then hits the test_after probe and the * pre-handler for this (test_after_pre_handler) will save a copy of the * CPU register context. This should now have R0 holding the same value as * R7. * * Finally we get to the call to __kprobes_test_case_end_{32,16}. This is * an assembler wrapper which switches back to the ISA used by the test * code and calls the C function kprobes_test_case_end(). * * For each run through the test case, test_case_run_count is incremented * by one. For even runs, kprobes_test_case_end() saves a copy of the * register and stack buffer contents from the test case just run. It then * inserts a kprobe on the test case instruction 'test_insn' and returns a * value to cause the test case code to be re-run. * * For odd numbered runs, kprobes_test_case_end() compares the register and * stack buffer contents to those that were saved on the previous even * numbered run (the one without the kprobe on test_insn). These should be * the same if the kprobe instruction simulation routine is correct. * * The pair of test case runs is repeated with different combinations of * flag values in CPSR and, for Thumb, different ITState. This is * controlled by test_context_cpsr(). * * BUILDING TEST CASES * ------------------- * * * As an aid to building test cases, the stack buffer is initialised with * some special values: * * [SP+13*4] Contains SP+120. This can be used to test instructions * which load a value into SP. * * [SP+15*4] When testing branching instructions using TEST_BRANCH_{F,B}, * this holds the target address of the branch, 'test_after2'. * This can be used to test instructions which load a PC value * from memory. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/sched/clock.h> #include <linux/kprobes.h> #include <linux/errno.h> #include <linux/stddef.h> #include <linux/bug.h> #include <asm/opcodes.h> #include "core.h" #include "test-core.h" #include "../decode-arm.h" #include "../decode-thumb.h" #define BENCHMARKING 1 /* * Test basic API */ static bool test_regs_ok; static int test_func_instance; static int pre_handler_called; static int post_handler_called; static int kretprobe_handler_called; static int tests_failed; #define FUNC_ARG1 0x12345678 #define FUNC_ARG2 0xabcdef #ifndef CONFIG_THUMB2_KERNEL #define RET(reg) "mov pc, "#reg long arm_func(long r0, long r1); static void __used __naked __arm_kprobes_test_func(void) { __asm__ __volatile__ ( ".arm \n\t" ".type arm_func, %%function \n\t" "arm_func: \n\t" "adds r0, r0, r1 \n\t" "mov pc, lr \n\t" ".code "NORMAL_ISA /* Back to Thumb if necessary */ : : : "r0", "r1", "cc" ); } #else /* CONFIG_THUMB2_KERNEL */ #define RET(reg) "bx "#reg long thumb16_func(long r0, long r1); long thumb32even_func(long r0, long r1); long thumb32odd_func(long r0, long r1); static void __used __naked __thumb_kprobes_test_funcs(void) { __asm__ __volatile__ ( ".type thumb16_func, %%function \n\t" "thumb16_func: \n\t" "adds.n r0, r0, r1 \n\t" "bx lr \n\t" ".align \n\t" ".type thumb32even_func, %%function \n\t" "thumb32even_func: \n\t" "adds.w r0, r0, r1 \n\t" "bx lr \n\t" ".align \n\t" "nop.n \n\t" ".type thumb32odd_func, %%function \n\t" "thumb32odd_func: \n\t" "adds.w r0, r0, r1 \n\t" "bx lr \n\t" : : : "r0", "r1", "cc" ); } #endif /* CONFIG_THUMB2_KERNEL */ static int call_test_func(long (*func)(long, long), bool check_test_regs) { long ret; ++test_func_instance; test_regs_ok = false; ret = (*func)(FUNC_ARG1, FUNC_ARG2); if (ret != FUNC_ARG1 + FUNC_ARG2) { pr_err("FAIL: call_test_func: func returned %lx\n", ret); return false; } if (check_test_regs && !test_regs_ok) { pr_err("FAIL: test regs not OK\n"); return false; } return true; } static int __kprobes pre_handler(struct kprobe *p, struct pt_regs *regs) { pre_handler_called = test_func_instance; if (regs->ARM_r0 == FUNC_ARG1 && regs->ARM_r1 == FUNC_ARG2) test_regs_ok = true; return 0; } static void __kprobes post_handler(struct kprobe *p, struct pt_regs *regs, unsigned long flags) { post_handler_called = test_func_instance; if (regs->ARM_r0 != FUNC_ARG1 + FUNC_ARG2 || regs->ARM_r1 != FUNC_ARG2) test_regs_ok = false; } static struct kprobe the_kprobe = { .addr = 0, .pre_handler = pre_handler, .post_handler = post_handler }; static int test_kprobe(long (*func)(long, long)) { int ret; the_kprobe.addr = (kprobe_opcode_t *)func; ret = register_kprobe(&the_kprobe); if (ret < 0) { pr_err("FAIL: register_kprobe failed with %d\n", ret); return ret; } ret = call_test_func(func, true); unregister_kprobe(&the_kprobe); the_kprobe.flags = 0; /* Clear disable flag to allow reuse */ if (!ret) return -EINVAL; if (pre_handler_called != test_func_instance) { pr_err("FAIL: kprobe pre_handler not called\n"); return -EINVAL; } if (post_handler_called != test_func_instance) { pr_err("FAIL: kprobe post_handler not called\n"); return -EINVAL; } if (!call_test_func(func, false)) return -EINVAL; if (pre_handler_called == test_func_instance || post_handler_called == test_func_instance) { pr_err("FAIL: probe called after unregistering\n"); return -EINVAL; } return 0; } static int __kprobes kretprobe_handler(struct kretprobe_instance *ri, struct pt_regs *regs) { kretprobe_handler_called = test_func_instance; if (regs_return_value(regs) == FUNC_ARG1 + FUNC_ARG2) test_regs_ok = true; return 0; } static struct kretprobe the_kretprobe = { .handler = kretprobe_handler, }; static int test_kretprobe(long (*func)(long, long)) { int ret; the_kretprobe.kp.addr = (kprobe_opcode_t *)func; ret = register_kretprobe(&the_kretprobe); if (ret < 0) { pr_err("FAIL: register_kretprobe failed with %d\n", ret); return ret; } ret = call_test_func(func, true); unregister_kretprobe(&the_kretprobe); the_kretprobe.kp.flags = 0; /* Clear disable flag to allow reuse */ if (!ret) return -EINVAL; if (kretprobe_handler_called != test_func_instance) { pr_err("FAIL: kretprobe handler not called\n"); return -EINVAL; } if (!call_test_func(func, false)) return -EINVAL; if (kretprobe_handler_called == test_func_instance) { pr_err("FAIL: kretprobe called after unregistering\n"); return -EINVAL; } return 0; } static int run_api_tests(long (*func)(long, long)) { int ret; pr_info(" kprobe\n"); ret = test_kprobe(func); if (ret < 0) return ret; pr_info(" kretprobe\n"); ret = test_kretprobe(func); if (ret < 0) return ret; return 0; } /* * Benchmarking */ #if BENCHMARKING static void __naked benchmark_nop(void) { __asm__ __volatile__ ( "nop \n\t" RET(lr)" \n\t" ); } #ifdef CONFIG_THUMB2_KERNEL #define wide ".w" #else #define wide #endif static void __naked benchmark_pushpop1(void) { __asm__ __volatile__ ( "stmdb"wide" sp!, {r3-r11,lr} \n\t" "ldmia"wide" sp!, {r3-r11,pc}" ); } static void __naked benchmark_pushpop2(void) { __asm__ __volatile__ ( "stmdb"wide" sp!, {r0-r8,lr} \n\t" "ldmia"wide" sp!, {r0-r8,pc}" ); } static void __naked benchmark_pushpop3(void) { __asm__ __volatile__ ( "stmdb"wide" sp!, {r4,lr} \n\t" "ldmia"wide" sp!, {r4,pc}" ); } static void __naked benchmark_pushpop4(void) { __asm__ __volatile__ ( "stmdb"wide" sp!, {r0,lr} \n\t" "ldmia"wide" sp!, {r0,pc}" ); } #ifdef CONFIG_THUMB2_KERNEL static void __naked benchmark_pushpop_thumb(void) { __asm__ __volatile__ ( "push.n {r0-r7,lr} \n\t" "pop.n {r0-r7,pc}" ); } #endif static int __kprobes benchmark_pre_handler(struct kprobe *p, struct pt_regs *regs) { return 0; } static int benchmark(void(*fn)(void)) { unsigned n, i, t, t0; for (n = 1000; ; n *= 2) { t0 = sched_clock(); for (i = n; i > 0; --i) fn(); t = sched_clock() - t0; if (t >= 250000000) break; /* Stop once we took more than 0.25 seconds */ } return t / n; /* Time for one iteration in nanoseconds */ }; static int kprobe_benchmark(void(*fn)(void), unsigned offset) { struct kprobe k = { .addr = (kprobe_opcode_t *)((uintptr_t)fn + offset), .pre_handler = benchmark_pre_handler, }; int ret = register_kprobe(&k); if (ret < 0) { pr_err("FAIL: register_kprobe failed with %d\n", ret); return ret; } ret = benchmark(fn); unregister_kprobe(&k); return ret; }; struct benchmarks { void (*fn)(void); unsigned offset; const char *title; }; static int run_benchmarks(void) { int ret; struct benchmarks list[] = { {&benchmark_nop, 0, "nop"}, /* * benchmark_pushpop{1,3} will have the optimised * instruction emulation, whilst benchmark_pushpop{2,4} will * be the equivalent unoptimised instructions. */ {&benchmark_pushpop1, 0, "stmdb sp!, {r3-r11,lr}"}, {&benchmark_pushpop1, 4, "ldmia sp!, {r3-r11,pc}"}, {&benchmark_pushpop2, 0, "stmdb sp!, {r0-r8,lr}"}, {&benchmark_pushpop2, 4, "ldmia sp!, {r0-r8,pc}"}, {&benchmark_pushpop3, 0, "stmdb sp!, {r4,lr}"}, {&benchmark_pushpop3, 4, "ldmia sp!, {r4,pc}"}, {&benchmark_pushpop4, 0, "stmdb sp!, {r0,lr}"}, {&benchmark_pushpop4, 4, "ldmia sp!, {r0,pc}"}, #ifdef CONFIG_THUMB2_KERNEL {&benchmark_pushpop_thumb, 0, "push.n {r0-r7,lr}"}, {&benchmark_pushpop_thumb, 2, "pop.n {r0-r7,pc}"}, #endif {0} }; struct benchmarks *b; for (b = list; b->fn; ++b) { ret = kprobe_benchmark(b->fn, b->offset); if (ret < 0) return ret; pr_info(" %dns for kprobe %s\n", ret, b->title); } pr_info("\n"); return 0; } #endif /* BENCHMARKING */ /* * Decoding table self-consistency tests */ static const int decode_struct_sizes[NUM_DECODE_TYPES] = { [DECODE_TYPE_TABLE] = sizeof(struct decode_table), [DECODE_TYPE_CUSTOM] = sizeof(struct decode_custom), [DECODE_TYPE_SIMULATE] = sizeof(struct decode_simulate), [DECODE_TYPE_EMULATE] = sizeof(struct decode_emulate), [DECODE_TYPE_OR] = sizeof(struct decode_or), [DECODE_TYPE_REJECT] = sizeof(struct decode_reject) }; static int table_iter(const union decode_item *table, int (*fn)(const struct decode_header *, void *), void *args) { const struct decode_header *h = (struct decode_header *)table; int result; for (;;) { enum decode_type type = h->type_regs.bits & DECODE_TYPE_MASK; if (type == DECODE_TYPE_END) return 0; result = fn(h, args); if (result) return result; h = (struct decode_header *) ((uintptr_t)h + decode_struct_sizes[type]); } } static int table_test_fail(const struct decode_header *h, const char* message) { pr_err("FAIL: kprobes test failure \"%s\" (mask %08x, value %08x)\n", message, h->mask.bits, h->value.bits); return -EINVAL; } struct table_test_args { const union decode_item *root_table; u32 parent_mask; u32 parent_value; }; static int table_test_fn(const struct decode_header *h, void *args) { struct table_test_args *a = (struct table_test_args *)args; enum decode_type type = h->type_regs.bits & DECODE_TYPE_MASK; if (h->value.bits & ~h->mask.bits) return table_test_fail(h, "Match value has bits not in mask"); if ((h->mask.bits & a->parent_mask) != a->parent_mask) return table_test_fail(h, "Mask has bits not in parent mask"); if ((h->value.bits ^ a->parent_value) & a->parent_mask) return table_test_fail(h, "Value is inconsistent with parent"); if (type == DECODE_TYPE_TABLE) { struct decode_table *d = (struct decode_table *)h; struct table_test_args args2 = *a; args2.parent_mask = h->mask.bits; args2.parent_value = h->value.bits; return table_iter(d->table.table, table_test_fn, &args2); } return 0; } static int table_test(const union decode_item *table) { struct table_test_args args = { .root_table = table, .parent_mask = 0, .parent_value = 0 }; return table_iter(args.root_table, table_test_fn, &args); } /* * Decoding table test coverage analysis * * coverage_start() builds a coverage_table which contains a list of * coverage_entry's to match each entry in the specified kprobes instruction * decoding table. * * When test cases are run, coverage_add() is called to process each case. * This looks up the corresponding entry in the coverage_table and sets it as * being matched, as well as clearing the regs flag appropriate for the test. * * After all test cases have been run, coverage_end() is called to check that * all entries in coverage_table have been matched and that all regs flags are * cleared. I.e. that all possible combinations of instructions described by * the kprobes decoding tables have had a test case executed for them. */ bool coverage_fail; #define MAX_COVERAGE_ENTRIES 256 struct coverage_entry { const struct decode_header *header; unsigned regs; unsigned nesting; char matched; }; struct coverage_table { struct coverage_entry *base; unsigned num_entries; unsigned nesting; }; struct coverage_table coverage; #define COVERAGE_ANY_REG (1<<0) #define COVERAGE_SP (1<<1) #define COVERAGE_PC (1<<2) #define COVERAGE_PCWB (1<<3) static const char coverage_register_lookup[16] = { [REG_TYPE_ANY] = COVERAGE_ANY_REG | COVERAGE_SP | COVERAGE_PC, [REG_TYPE_SAMEAS16] = COVERAGE_ANY_REG, [REG_TYPE_SP] = COVERAGE_SP, [REG_TYPE_PC] = COVERAGE_PC, [REG_TYPE_NOSP] = COVERAGE_ANY_REG | COVERAGE_SP, [REG_TYPE_NOSPPC] = COVERAGE_ANY_REG | COVERAGE_SP | COVERAGE_PC, [REG_TYPE_NOPC] = COVERAGE_ANY_REG | COVERAGE_PC, [REG_TYPE_NOPCWB] = COVERAGE_ANY_REG | COVERAGE_PC | COVERAGE_PCWB, [REG_TYPE_NOPCX] = COVERAGE_ANY_REG, [REG_TYPE_NOSPPCX] = COVERAGE_ANY_REG | COVERAGE_SP, }; static unsigned coverage_start_registers(const struct decode_header *h) { unsigned regs = 0; int i; for (i = 0; i < 20; i += 4) { int r = (h->type_regs.bits >> (DECODE_TYPE_BITS + i)) & 0xf; regs |= coverage_register_lookup[r] << i; } return regs; } static int coverage_start_fn(const struct decode_header *h, void *args) { struct coverage_table *coverage = (struct coverage_table *)args; enum decode_type type = h->type_regs.bits & DECODE_TYPE_MASK; struct coverage_entry *entry = coverage->base + coverage->num_entries; if (coverage->num_entries == MAX_COVERAGE_ENTRIES - 1) { pr_err("FAIL: Out of space for test coverage data"); return -ENOMEM; } ++coverage->num_entries; entry->header = h; entry->regs = coverage_start_registers(h); entry->nesting = coverage->nesting; entry->matched = false; if (type == DECODE_TYPE_TABLE) { struct decode_table *d = (struct decode_table *)h; int ret; ++coverage->nesting; ret = table_iter(d->table.table, coverage_start_fn, coverage); --coverage->nesting; return ret; } return 0; } static int coverage_start(const union decode_item *table) { coverage.base = kmalloc_array(MAX_COVERAGE_ENTRIES, sizeof(struct coverage_entry), GFP_KERNEL); coverage.num_entries = 0; coverage.nesting = 0; return table_iter(table, coverage_start_fn, &coverage); } static void coverage_add_registers(struct coverage_entry *entry, kprobe_opcode_t insn) { int regs = entry->header->type_regs.bits >> DECODE_TYPE_BITS; int i; for (i = 0; i < 20; i += 4) { enum decode_reg_type reg_type = (regs >> i) & 0xf; int reg = (insn >> i) & 0xf; int flag; if (!reg_type) continue; if (reg == 13) flag = COVERAGE_SP; else if (reg == 15) flag = COVERAGE_PC; else flag = COVERAGE_ANY_REG; entry->regs &= ~(flag << i); switch (reg_type) { case REG_TYPE_NONE: case REG_TYPE_ANY: case REG_TYPE_SAMEAS16: break; case REG_TYPE_SP: if (reg != 13) return; break; case REG_TYPE_PC: if (reg != 15) return; break; case REG_TYPE_NOSP: if (reg == 13) return; break; case REG_TYPE_NOSPPC: case REG_TYPE_NOSPPCX: if (reg == 13 || reg == 15) return; break; case REG_TYPE_NOPCWB: if (!is_writeback(insn)) break; if (reg == 15) { entry->regs &= ~(COVERAGE_PCWB << i); return; } break; case REG_TYPE_NOPC: case REG_TYPE_NOPCX: if (reg == 15) return; break; } } } static void coverage_add(kprobe_opcode_t insn) { struct coverage_entry *entry = coverage.base; struct coverage_entry *end = coverage.base + coverage.num_entries; bool matched = false; unsigned nesting = 0; for (; entry < end; ++entry) { const struct decode_header *h = entry->header; enum decode_type type = h->type_regs.bits & DECODE_TYPE_MASK; if (entry->nesting > nesting) continue; /* Skip sub-table we didn't match */ if (entry->nesting < nesting) break; /* End of sub-table we were scanning */ if (!matched) { if ((insn & h->mask.bits) != h->value.bits) continue; entry->matched = true; } switch (type) { case DECODE_TYPE_TABLE: ++nesting; break; case DECODE_TYPE_CUSTOM: case DECODE_TYPE_SIMULATE: case DECODE_TYPE_EMULATE: coverage_add_registers(entry, insn); return; case DECODE_TYPE_OR: matched = true; break; case DECODE_TYPE_REJECT: default: return; } } } static void coverage_end(void) { struct coverage_entry *entry = coverage.base; struct coverage_entry *end = coverage.base + coverage.num_entries; for (; entry < end; ++entry) { u32 mask = entry->header->mask.bits; u32 value = entry->header->value.bits; if (entry->regs) { pr_err("FAIL: Register test coverage missing for %08x %08x (%05x)\n", mask, value, entry->regs); coverage_fail = true; } if (!entry->matched) { pr_err("FAIL: Test coverage entry missing for %08x %08x\n", mask, value); coverage_fail = true; } } kfree(coverage.base); } /* * Framework for instruction set test cases */ void __naked __kprobes_test_case_start(void) { __asm__ __volatile__ ( "mov r2, sp \n\t" "bic r3, r2, #7 \n\t" "mov sp, r3 \n\t" "stmdb sp!, {r2-r11} \n\t" "sub sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t" "bic r0, lr, #1 @ r0 = inline data \n\t" "mov r1, sp \n\t" "bl kprobes_test_case_start \n\t" RET(r0)" \n\t" ); } #ifndef CONFIG_THUMB2_KERNEL void __naked __kprobes_test_case_end_32(void) { __asm__ __volatile__ ( "mov r4, lr \n\t" "bl kprobes_test_case_end \n\t" "cmp r0, #0 \n\t" "movne pc, r0 \n\t" "mov r0, r4 \n\t" "add sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t" "ldmia sp!, {r2-r11} \n\t" "mov sp, r2 \n\t" "mov pc, r0 \n\t" ); } #else /* CONFIG_THUMB2_KERNEL */ void __naked __kprobes_test_case_end_16(void) { __asm__ __volatile__ ( "mov r4, lr \n\t" "bl kprobes_test_case_end \n\t" "cmp r0, #0 \n\t" "bxne r0 \n\t" "mov r0, r4 \n\t" "add sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t" "ldmia sp!, {r2-r11} \n\t" "mov sp, r2 \n\t" "bx r0 \n\t" ); } void __naked __kprobes_test_case_end_32(void) { __asm__ __volatile__ ( ".arm \n\t" "orr lr, lr, #1 @ will return to Thumb code \n\t" "ldr pc, 1f \n\t" "1: \n\t" ".word __kprobes_test_case_end_16 \n\t" ); } #endif int kprobe_test_flags; int kprobe_test_cc_position; static int test_try_count; static int test_pass_count; static int test_fail_count; static struct pt_regs initial_regs; static struct pt_regs expected_regs; static struct pt_regs result_regs; static u32 expected_memory[TEST_MEMORY_SIZE/sizeof(u32)]; static const char *current_title; static struct test_arg *current_args; static u32 *current_stack; static uintptr_t current_branch_target; static uintptr_t current_code_start; static kprobe_opcode_t current_instruction; #define TEST_CASE_PASSED -1 #define TEST_CASE_FAILED -2 static int test_case_run_count; static bool test_case_is_thumb; static int test_instance; static unsigned long test_check_cc(int cc, unsigned long cpsr) { int ret = arm_check_condition(cc << 28, cpsr); return (ret != ARM_OPCODE_CONDTEST_FAIL); } static int is_last_scenario; static int probe_should_run; /* 0 = no, 1 = yes, -1 = unknown */ static int memory_needs_checking; static unsigned long test_context_cpsr(int scenario) { unsigned long cpsr; probe_should_run = 1; /* Default case is that we cycle through 16 combinations of flags */ cpsr = (scenario & 0xf) << 28; /* N,Z,C,V flags */ cpsr |= (scenario & 0xf) << 16; /* GE flags */ cpsr |= (scenario & 0x1) << 27; /* Toggle Q flag */ if (!test_case_is_thumb) { /* Testing ARM code */ int cc = current_instruction >> 28; probe_should_run = test_check_cc(cc, cpsr) != 0; if (scenario == 15) is_last_scenario = true; } else if (kprobe_test_flags & TEST_FLAG_NO_ITBLOCK) { /* Testing Thumb code without setting ITSTATE */ if (kprobe_test_cc_position) { int cc = (current_instruction >> kprobe_test_cc_position) & 0xf; probe_should_run = test_check_cc(cc, cpsr) != 0; } if (scenario == 15) is_last_scenario = true; } else if (kprobe_test_flags & TEST_FLAG_FULL_ITBLOCK) { /* Testing Thumb code with all combinations of ITSTATE */ unsigned x = (scenario >> 4); unsigned cond_base = x % 7; /* ITSTATE<7:5> */ unsigned mask = x / 7 + 2; /* ITSTATE<4:0>, bits reversed */ if (mask > 0x1f) { /* Finish by testing state from instruction 'itt al' */ cond_base = 7; mask = 0x4; if ((scenario & 0xf) == 0xf) is_last_scenario = true; } cpsr |= cond_base << 13; /* ITSTATE<7:5> */ cpsr |= (mask & 0x1) << 12; /* ITSTATE<4> */ cpsr |= (mask & 0x2) << 10; /* ITSTATE<3> */ cpsr |= (mask & 0x4) << 8; /* ITSTATE<2> */ cpsr |= (mask & 0x8) << 23; /* ITSTATE<1> */ cpsr |= (mask & 0x10) << 21; /* ITSTATE<0> */ probe_should_run = test_check_cc((cpsr >> 12) & 0xf, cpsr) != 0; } else { /* Testing Thumb code with several combinations of ITSTATE */ switch (scenario) { case 16: /* Clear NZCV flags and 'it eq' state (false as Z=0) */ cpsr = 0x00000800; probe_should_run = 0; break; case 17: /* Set NZCV flags and 'it vc' state (false as V=1) */ cpsr = 0xf0007800; probe_should_run = 0; break; case 18: /* Clear NZCV flags and 'it ls' state (true as C=0) */ cpsr = 0x00009800; break; case 19: /* Set NZCV flags and 'it cs' state (true as C=1) */ cpsr = 0xf0002800; is_last_scenario = true; break; } } return cpsr; } static void setup_test_context(struct pt_regs *regs) { int scenario = test_case_run_count>>1; unsigned long val; struct test_arg *args; int i; is_last_scenario = false; memory_needs_checking = false; /* Initialise test memory on stack */ val = (scenario & 1) ? VALM : ~VALM; for (i = 0; i < TEST_MEMORY_SIZE / sizeof(current_stack[0]); ++i) current_stack[i] = val + (i << 8); /* Put target of branch on stack for tests which load PC from memory */ if (current_branch_target) current_stack[15] = current_branch_target; /* Put a value for SP on stack for tests which load SP from memory */ current_stack[13] = (u32)current_stack + 120; /* Initialise register values to their default state */ val = (scenario & 2) ? VALR : ~VALR; for (i = 0; i < 13; ++i) regs->uregs[i] = val ^ (i << 8); regs->ARM_lr = val ^ (14 << 8); regs->ARM_cpsr &= ~(APSR_MASK | PSR_IT_MASK); regs->ARM_cpsr |= test_context_cpsr(scenario); /* Perform testcase specific register setup */ args = current_args; for (; args[0].type != ARG_TYPE_END; ++args) switch (args[0].type) { case ARG_TYPE_REG: { struct test_arg_regptr *arg = (struct test_arg_regptr *)args; regs->uregs[arg->reg] = arg->val; break; } case ARG_TYPE_PTR: { struct test_arg_regptr *arg = (struct test_arg_regptr *)args; regs->uregs[arg->reg] = (unsigned long)current_stack + arg->val; memory_needs_checking = true; /* * Test memory at an address below SP is in danger of * being altered by an interrupt occurring and pushing * data onto the stack. Disable interrupts to stop this. */ if (arg->reg == 13) regs->ARM_cpsr |= PSR_I_BIT; break; } case ARG_TYPE_MEM: { struct test_arg_mem *arg = (struct test_arg_mem *)args; current_stack[arg->index] = arg->val; break; } default: break; } } struct test_probe { struct kprobe kprobe; bool registered; int hit; }; static void unregister_test_probe(struct test_probe *probe) { if (probe->registered) { unregister_kprobe(&probe->kprobe); probe->kprobe.flags = 0; /* Clear disable flag to allow reuse */ } probe->registered = false; } static int register_test_probe(struct test_probe *probe) { int ret; if (probe->registered) BUG(); ret = register_kprobe(&probe->kprobe); if (ret >= 0) { probe->registered = true; probe->hit = -1; } return ret; } static int __kprobes test_before_pre_handler(struct kprobe *p, struct pt_regs *regs) { container_of(p, struct test_probe, kprobe)->hit = test_instance; return 0; } static void __kprobes test_before_post_handler(struct kprobe *p, struct pt_regs *regs, unsigned long flags) { setup_test_context(regs); initial_regs = *regs; initial_regs.ARM_cpsr &= ~PSR_IGNORE_BITS; } static int __kprobes test_case_pre_handler(struct kprobe *p, struct pt_regs *regs) { container_of(p, struct test_probe, kprobe)->hit = test_instance; return 0; } static int __kprobes test_after_pre_handler(struct kprobe *p, struct pt_regs *regs) { struct test_arg *args; if (container_of(p, struct test_probe, kprobe)->hit == test_instance) return 0; /* Already run for this test instance */ result_regs = *regs; /* Mask out results which are indeterminate */ result_regs.ARM_cpsr &= ~PSR_IGNORE_BITS; for (args = current_args; args[0].type != ARG_TYPE_END; ++args) if (args[0].type == ARG_TYPE_REG_MASKED) { struct test_arg_regptr *arg = (struct test_arg_regptr *)args; result_regs.uregs[arg->reg] &= arg->val; } /* Undo any changes done to SP by the test case */ regs->ARM_sp = (unsigned long)current_stack; /* Enable interrupts in case setup_test_context disabled them */ regs->ARM_cpsr &= ~PSR_I_BIT; container_of(p, struct test_probe, kprobe)->hit = test_instance; return 0; } static struct test_probe test_before_probe = { .kprobe.pre_handler = test_before_pre_handler, .kprobe.post_handler = test_before_post_handler, }; static struct test_probe test_case_probe = { .kprobe.pre_handler = test_case_pre_handler, }; static struct test_probe test_after_probe = { .kprobe.pre_handler = test_after_pre_handler, }; static struct test_probe test_after2_probe = { .kprobe.pre_handler = test_after_pre_handler, }; static void test_case_cleanup(void) { unregister_test_probe(&test_before_probe); unregister_test_probe(&test_case_probe); unregister_test_probe(&test_after_probe); unregister_test_probe(&test_after2_probe); } static void print_registers(struct pt_regs *regs) { pr_err("r0 %08lx | r1 %08lx | r2 %08lx | r3 %08lx\n", regs->ARM_r0, regs->ARM_r1, regs->ARM_r2, regs->ARM_r3); pr_err("r4 %08lx | r5 %08lx | r6 %08lx | r7 %08lx\n", regs->ARM_r4, regs->ARM_r5, regs->ARM_r6, regs->ARM_r7); pr_err("r8 %08lx | r9 %08lx | r10 %08lx | r11 %08lx\n", regs->ARM_r8, regs->ARM_r9, regs->ARM_r10, regs->ARM_fp); pr_err("r12 %08lx | sp %08lx | lr %08lx | pc %08lx\n", regs->ARM_ip, regs->ARM_sp, regs->ARM_lr, regs->ARM_pc); pr_err("cpsr %08lx\n", regs->ARM_cpsr); } static void print_memory(u32 *mem, size_t size) { int i; for (i = 0; i < size / sizeof(u32); i += 4) pr_err("%08x %08x %08x %08x\n", mem[i], mem[i+1], mem[i+2], mem[i+3]); } static size_t expected_memory_size(u32 *sp) { size_t size = sizeof(expected_memory); int offset = (uintptr_t)sp - (uintptr_t)current_stack; if (offset > 0) size -= offset; return size; } static void test_case_failed(const char *message) { test_case_cleanup(); pr_err("FAIL: %s\n", message); pr_err("FAIL: Test %s\n", current_title); pr_err("FAIL: Scenario %d\n", test_case_run_count >> 1); } static unsigned long next_instruction(unsigned long pc) { #ifdef CONFIG_THUMB2_KERNEL if ((pc & 1) && !is_wide_instruction(__mem_to_opcode_thumb16(*(u16 *)(pc - 1)))) return pc + 2; else #endif return pc + 4; } static uintptr_t __used kprobes_test_case_start(const char **title, void *stack) { struct test_arg *args; struct test_arg_end *end_arg; unsigned long test_code; current_title = *title++; args = (struct test_arg *)title; current_args = args; current_stack = stack; ++test_try_count; while (args->type != ARG_TYPE_END) ++args; end_arg = (struct test_arg_end *)args; test_code = (unsigned long)(args + 1); /* Code starts after args */ test_case_is_thumb = end_arg->flags & ARG_FLAG_THUMB; if (test_case_is_thumb) test_code |= 1; current_code_start = test_code; current_branch_target = 0; if (end_arg->branch_offset != end_arg->end_offset) current_branch_target = test_code + end_arg->branch_offset; test_code += end_arg->code_offset; test_before_probe.kprobe.addr = (kprobe_opcode_t *)test_code; test_code = next_instruction(test_code); test_case_probe.kprobe.addr = (kprobe_opcode_t *)test_code; if (test_case_is_thumb) { u16 *p = (u16 *)(test_code & ~1); current_instruction = __mem_to_opcode_thumb16(p[0]); if (is_wide_instruction(current_instruction)) { u16 instr2 = __mem_to_opcode_thumb16(p[1]); current_instruction = __opcode_thumb32_compose(current_instruction, instr2); } } else { current_instruction = __mem_to_opcode_arm(*(u32 *)test_code); } if (current_title[0] == '.') verbose("%s\n", current_title); else verbose("%s\t@ %0*x\n", current_title, test_case_is_thumb ? 4 : 8, current_instruction); test_code = next_instruction(test_code); test_after_probe.kprobe.addr = (kprobe_opcode_t *)test_code; if (kprobe_test_flags & TEST_FLAG_NARROW_INSTR) { if (!test_case_is_thumb || is_wide_instruction(current_instruction)) { test_case_failed("expected 16-bit instruction"); goto fail; } } else { if (test_case_is_thumb && !is_wide_instruction(current_instruction)) { test_case_failed("expected 32-bit instruction"); goto fail; } } coverage_add(current_instruction); if (end_arg->flags & ARG_FLAG_UNSUPPORTED) { if (register_test_probe(&test_case_probe) < 0) goto pass; test_case_failed("registered probe for unsupported instruction"); goto fail; } if (end_arg->flags & ARG_FLAG_SUPPORTED) { if (register_test_probe(&test_case_probe) >= 0) goto pass; test_case_failed("couldn't register probe for supported instruction"); goto fail; } if (register_test_probe(&test_before_probe) < 0) { test_case_failed("register test_before_probe failed"); goto fail; } if (register_test_probe(&test_after_probe) < 0) { test_case_failed("register test_after_probe failed"); goto fail; } if (current_branch_target) { test_after2_probe.kprobe.addr = (kprobe_opcode_t *)current_branch_target; if (register_test_probe(&test_after2_probe) < 0) { test_case_failed("register test_after2_probe failed"); goto fail; } } /* Start first run of test case */ test_case_run_count = 0; ++test_instance; return current_code_start; pass: test_case_run_count = TEST_CASE_PASSED; return (uintptr_t)test_after_probe.kprobe.addr; fail: test_case_run_count = TEST_CASE_FAILED; return (uintptr_t)test_after_probe.kprobe.addr; } static bool check_test_results(void) { size_t mem_size = 0; u32 *mem = 0; if (memcmp(&expected_regs, &result_regs, sizeof(expected_regs))) { test_case_failed("registers differ"); goto fail; } if (memory_needs_checking) { mem = (u32 *)result_regs.ARM_sp; mem_size = expected_memory_size(mem); if (memcmp(expected_memory, mem, mem_size)) { test_case_failed("test memory differs"); goto fail; } } return true; fail: pr_err("initial_regs:\n"); print_registers(&initial_regs); pr_err("expected_regs:\n"); print_registers(&expected_regs); pr_err("result_regs:\n"); print_registers(&result_regs); if (mem) { pr_err("expected_memory:\n"); print_memory(expected_memory, mem_size); pr_err("result_memory:\n"); print_memory(mem, mem_size); } return false; } static uintptr_t __used kprobes_test_case_end(void) { if (test_case_run_count < 0) { if (test_case_run_count == TEST_CASE_PASSED) /* kprobes_test_case_start did all the needed testing */ goto pass; else /* kprobes_test_case_start failed */ goto fail; } if (test_before_probe.hit != test_instance) { test_case_failed("test_before_handler not run"); goto fail; } if (test_after_probe.hit != test_instance && test_after2_probe.hit != test_instance) { test_case_failed("test_after_handler not run"); goto fail; } /* * Even numbered test runs ran without a probe on the test case so * we can gather reference results. The subsequent odd numbered run * will have the probe inserted. */ if ((test_case_run_count & 1) == 0) { /* Save results from run without probe */ u32 *mem = (u32 *)result_regs.ARM_sp; expected_regs = result_regs; memcpy(expected_memory, mem, expected_memory_size(mem)); /* Insert probe onto test case instruction */ if (register_test_probe(&test_case_probe) < 0) { test_case_failed("register test_case_probe failed"); goto fail; } } else { /* Check probe ran as expected */ if (probe_should_run == 1) { if (test_case_probe.hit != test_instance) { test_case_failed("test_case_handler not run"); goto fail; } } else if (probe_should_run == 0) { if (test_case_probe.hit == test_instance) { test_case_failed("test_case_handler ran"); goto fail; } } /* Remove probe for any subsequent reference run */ unregister_test_probe(&test_case_probe); if (!check_test_results()) goto fail; if (is_last_scenario) goto pass; } /* Do next test run */ ++test_case_run_count; ++test_instance; return current_code_start; fail: ++test_fail_count; goto end; pass: ++test_pass_count; end: test_case_cleanup(); return 0; } /* * Top level test functions */ static int run_test_cases(void (*tests)(void), const union decode_item *table) { int ret; pr_info(" Check decoding tables\n"); ret = table_test(table); if (ret) return ret; pr_info(" Run test cases\n"); ret = coverage_start(table); if (ret) return ret; tests(); coverage_end(); return 0; } static int __init run_all_tests(void) { int ret = 0; pr_info("Beginning kprobe tests...\n"); #ifndef CONFIG_THUMB2_KERNEL pr_info("Probe ARM code\n"); ret = run_api_tests(arm_func); if (ret) goto out; pr_info("ARM instruction simulation\n"); ret = run_test_cases(kprobe_arm_test_cases, probes_decode_arm_table); if (ret) goto out; #else /* CONFIG_THUMB2_KERNEL */ pr_info("Probe 16-bit Thumb code\n"); ret = run_api_tests(thumb16_func); if (ret) goto out; pr_info("Probe 32-bit Thumb code, even halfword\n"); ret = run_api_tests(thumb32even_func); if (ret) goto out; pr_info("Probe 32-bit Thumb code, odd halfword\n"); ret = run_api_tests(thumb32odd_func); if (ret) goto out; pr_info("16-bit Thumb instruction simulation\n"); ret = run_test_cases(kprobe_thumb16_test_cases, probes_decode_thumb16_table); if (ret) goto out; pr_info("32-bit Thumb instruction simulation\n"); ret = run_test_cases(kprobe_thumb32_test_cases, probes_decode_thumb32_table); if (ret) goto out; #endif pr_info("Total instruction simulation tests=%d, pass=%d fail=%d\n", test_try_count, test_pass_count, test_fail_count); if (test_fail_count) { ret = -EINVAL; goto out; } #if BENCHMARKING pr_info("Benchmarks\n"); ret = run_benchmarks(); if (ret) goto out; #endif #if __LINUX_ARM_ARCH__ >= 7 /* We are able to run all test cases so coverage should be complete */ if (coverage_fail) { pr_err("FAIL: Test coverage checks failed\n"); ret = -EINVAL; goto out; } #endif out: if (ret == 0) ret = tests_failed; if (ret == 0) pr_info("Finished kprobe tests OK\n"); else pr_err("kprobe tests failed\n"); return ret; } /* * Module setup */ #ifdef MODULE static void __exit kprobe_test_exit(void) { } module_init(run_all_tests) module_exit(kprobe_test_exit) MODULE_LICENSE("GPL"); #else /* !MODULE */ late_initcall(run_all_tests); #endif
linux-master
arch/arm/probes/kprobes/test-core.c
// SPDX-License-Identifier: GPL-2.0-only /* * arch/arm/probes/kprobes/actions-thumb.c * * Copyright (C) 2011 Jon Medhurst <[email protected]>. */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/ptrace.h> #include <linux/kprobes.h> #include "../decode-thumb.h" #include "core.h" #include "checkers.h" /* These emulation encodings are functionally equivalent... */ #define t32_emulate_rd8rn16rm0ra12_noflags \ t32_emulate_rdlo12rdhi8rn16rm0_noflags /* t32 thumb actions */ static void __kprobes t32_simulate_table_branch(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { unsigned long pc = regs->ARM_pc; int rn = (insn >> 16) & 0xf; int rm = insn & 0xf; unsigned long rnv = (rn == 15) ? pc : regs->uregs[rn]; unsigned long rmv = regs->uregs[rm]; unsigned int halfwords; if (insn & 0x10) /* TBH */ halfwords = ((u16 *)rnv)[rmv]; else /* TBB */ halfwords = ((u8 *)rnv)[rmv]; regs->ARM_pc = pc + 2 * halfwords; } static void __kprobes t32_simulate_mrs(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { int rd = (insn >> 8) & 0xf; unsigned long mask = 0xf8ff03df; /* Mask out execution state */ regs->uregs[rd] = regs->ARM_cpsr & mask; } static void __kprobes t32_simulate_cond_branch(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { unsigned long pc = regs->ARM_pc; long offset = insn & 0x7ff; /* imm11 */ offset += (insn & 0x003f0000) >> 5; /* imm6 */ offset += (insn & 0x00002000) << 4; /* J1 */ offset += (insn & 0x00000800) << 7; /* J2 */ offset -= (insn & 0x04000000) >> 7; /* Apply sign bit */ regs->ARM_pc = pc + (offset * 2); } static enum probes_insn __kprobes t32_decode_cond_branch(probes_opcode_t insn, struct arch_probes_insn *asi, const struct decode_header *d) { int cc = (insn >> 22) & 0xf; asi->insn_check_cc = probes_condition_checks[cc]; asi->insn_handler = t32_simulate_cond_branch; return INSN_GOOD_NO_SLOT; } static void __kprobes t32_simulate_branch(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { unsigned long pc = regs->ARM_pc; long offset = insn & 0x7ff; /* imm11 */ offset += (insn & 0x03ff0000) >> 5; /* imm10 */ offset += (insn & 0x00002000) << 9; /* J1 */ offset += (insn & 0x00000800) << 10; /* J2 */ if (insn & 0x04000000) offset -= 0x00800000; /* Apply sign bit */ else offset ^= 0x00600000; /* Invert J1 and J2 */ if (insn & (1 << 14)) { /* BL or BLX */ regs->ARM_lr = regs->ARM_pc | 1; if (!(insn & (1 << 12))) { /* BLX so switch to ARM mode */ regs->ARM_cpsr &= ~PSR_T_BIT; pc &= ~3; } } regs->ARM_pc = pc + (offset * 2); } static void __kprobes t32_simulate_ldr_literal(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { unsigned long addr = regs->ARM_pc & ~3; int rt = (insn >> 12) & 0xf; unsigned long rtv; long offset = insn & 0xfff; if (insn & 0x00800000) addr += offset; else addr -= offset; if (insn & 0x00400000) { /* LDR */ rtv = *(unsigned long *)addr; if (rt == 15) { bx_write_pc(rtv, regs); return; } } else if (insn & 0x00200000) { /* LDRH */ if (insn & 0x01000000) rtv = *(s16 *)addr; else rtv = *(u16 *)addr; } else { /* LDRB */ if (insn & 0x01000000) rtv = *(s8 *)addr; else rtv = *(u8 *)addr; } regs->uregs[rt] = rtv; } static enum probes_insn __kprobes t32_decode_ldmstm(probes_opcode_t insn, struct arch_probes_insn *asi, const struct decode_header *d) { enum probes_insn ret = kprobe_decode_ldmstm(insn, asi, d); /* Fixup modified instruction to have halfwords in correct order...*/ insn = __mem_to_opcode_arm(asi->insn[0]); ((u16 *)asi->insn)[0] = __opcode_to_mem_thumb16(insn >> 16); ((u16 *)asi->insn)[1] = __opcode_to_mem_thumb16(insn & 0xffff); return ret; } static void __kprobes t32_emulate_ldrdstrd(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { unsigned long pc = regs->ARM_pc & ~3; int rt1 = (insn >> 12) & 0xf; int rt2 = (insn >> 8) & 0xf; int rn = (insn >> 16) & 0xf; register unsigned long rt1v asm("r0") = regs->uregs[rt1]; register unsigned long rt2v asm("r1") = regs->uregs[rt2]; register unsigned long rnv asm("r2") = (rn == 15) ? pc : regs->uregs[rn]; __asm__ __volatile__ ( "blx %[fn]" : "=r" (rt1v), "=r" (rt2v), "=r" (rnv) : "0" (rt1v), "1" (rt2v), "2" (rnv), [fn] "r" (asi->insn_fn) : "lr", "memory", "cc" ); if (rn != 15) regs->uregs[rn] = rnv; /* Writeback base register */ regs->uregs[rt1] = rt1v; regs->uregs[rt2] = rt2v; } static void __kprobes t32_emulate_ldrstr(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { int rt = (insn >> 12) & 0xf; int rn = (insn >> 16) & 0xf; int rm = insn & 0xf; register unsigned long rtv asm("r0") = regs->uregs[rt]; register unsigned long rnv asm("r2") = regs->uregs[rn]; register unsigned long rmv asm("r3") = regs->uregs[rm]; __asm__ __volatile__ ( "blx %[fn]" : "=r" (rtv), "=r" (rnv) : "0" (rtv), "1" (rnv), "r" (rmv), [fn] "r" (asi->insn_fn) : "lr", "memory", "cc" ); regs->uregs[rn] = rnv; /* Writeback base register */ if (rt == 15) /* Can't be true for a STR as they aren't allowed */ bx_write_pc(rtv, regs); else regs->uregs[rt] = rtv; } static void __kprobes t32_emulate_rd8rn16rm0_rwflags(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { int rd = (insn >> 8) & 0xf; int rn = (insn >> 16) & 0xf; int rm = insn & 0xf; register unsigned long rdv asm("r1") = regs->uregs[rd]; register unsigned long rnv asm("r2") = regs->uregs[rn]; register unsigned long rmv asm("r3") = regs->uregs[rm]; unsigned long cpsr = regs->ARM_cpsr; __asm__ __volatile__ ( "msr cpsr_fs, %[cpsr] \n\t" "blx %[fn] \n\t" "mrs %[cpsr], cpsr \n\t" : "=r" (rdv), [cpsr] "=r" (cpsr) : "0" (rdv), "r" (rnv), "r" (rmv), "1" (cpsr), [fn] "r" (asi->insn_fn) : "lr", "memory", "cc" ); regs->uregs[rd] = rdv; regs->ARM_cpsr = (regs->ARM_cpsr & ~APSR_MASK) | (cpsr & APSR_MASK); } static void __kprobes t32_emulate_rd8pc16_noflags(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { unsigned long pc = regs->ARM_pc; int rd = (insn >> 8) & 0xf; register unsigned long rdv asm("r1") = regs->uregs[rd]; register unsigned long rnv asm("r2") = pc & ~3; __asm__ __volatile__ ( "blx %[fn]" : "=r" (rdv) : "0" (rdv), "r" (rnv), [fn] "r" (asi->insn_fn) : "lr", "memory", "cc" ); regs->uregs[rd] = rdv; } static void __kprobes t32_emulate_rd8rn16_noflags(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { int rd = (insn >> 8) & 0xf; int rn = (insn >> 16) & 0xf; register unsigned long rdv asm("r1") = regs->uregs[rd]; register unsigned long rnv asm("r2") = regs->uregs[rn]; __asm__ __volatile__ ( "blx %[fn]" : "=r" (rdv) : "0" (rdv), "r" (rnv), [fn] "r" (asi->insn_fn) : "lr", "memory", "cc" ); regs->uregs[rd] = rdv; } static void __kprobes t32_emulate_rdlo12rdhi8rn16rm0_noflags(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { int rdlo = (insn >> 12) & 0xf; int rdhi = (insn >> 8) & 0xf; int rn = (insn >> 16) & 0xf; int rm = insn & 0xf; register unsigned long rdlov asm("r0") = regs->uregs[rdlo]; register unsigned long rdhiv asm("r1") = regs->uregs[rdhi]; register unsigned long rnv asm("r2") = regs->uregs[rn]; register unsigned long rmv asm("r3") = regs->uregs[rm]; __asm__ __volatile__ ( "blx %[fn]" : "=r" (rdlov), "=r" (rdhiv) : "0" (rdlov), "1" (rdhiv), "r" (rnv), "r" (rmv), [fn] "r" (asi->insn_fn) : "lr", "memory", "cc" ); regs->uregs[rdlo] = rdlov; regs->uregs[rdhi] = rdhiv; } /* t16 thumb actions */ static void __kprobes t16_simulate_bxblx(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { unsigned long pc = regs->ARM_pc + 2; int rm = (insn >> 3) & 0xf; unsigned long rmv = (rm == 15) ? pc : regs->uregs[rm]; if (insn & (1 << 7)) /* BLX ? */ regs->ARM_lr = regs->ARM_pc | 1; bx_write_pc(rmv, regs); } static void __kprobes t16_simulate_ldr_literal(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { unsigned long *base = (unsigned long *)((regs->ARM_pc + 2) & ~3); long index = insn & 0xff; int rt = (insn >> 8) & 0x7; regs->uregs[rt] = base[index]; } static void __kprobes t16_simulate_ldrstr_sp_relative(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { unsigned long* base = (unsigned long *)regs->ARM_sp; long index = insn & 0xff; int rt = (insn >> 8) & 0x7; if (insn & 0x800) /* LDR */ regs->uregs[rt] = base[index]; else /* STR */ base[index] = regs->uregs[rt]; } static void __kprobes t16_simulate_reladr(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { unsigned long base = (insn & 0x800) ? regs->ARM_sp : ((regs->ARM_pc + 2) & ~3); long offset = insn & 0xff; int rt = (insn >> 8) & 0x7; regs->uregs[rt] = base + offset * 4; } static void __kprobes t16_simulate_add_sp_imm(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { long imm = insn & 0x7f; if (insn & 0x80) /* SUB */ regs->ARM_sp -= imm * 4; else /* ADD */ regs->ARM_sp += imm * 4; } static void __kprobes t16_simulate_cbz(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { int rn = insn & 0x7; probes_opcode_t nonzero = regs->uregs[rn] ? insn : ~insn; if (nonzero & 0x800) { long i = insn & 0x200; long imm5 = insn & 0xf8; unsigned long pc = regs->ARM_pc + 2; regs->ARM_pc = pc + (i >> 3) + (imm5 >> 2); } } static void __kprobes t16_simulate_it(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { /* * The 8 IT state bits are split into two parts in CPSR: * ITSTATE<1:0> are in CPSR<26:25> * ITSTATE<7:2> are in CPSR<15:10> * The new IT state is in the lower byte of insn. */ unsigned long cpsr = regs->ARM_cpsr; cpsr &= ~PSR_IT_MASK; cpsr |= (insn & 0xfc) << 8; cpsr |= (insn & 0x03) << 25; regs->ARM_cpsr = cpsr; } static void __kprobes t16_singlestep_it(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { regs->ARM_pc += 2; t16_simulate_it(insn, asi, regs); } static enum probes_insn __kprobes t16_decode_it(probes_opcode_t insn, struct arch_probes_insn *asi, const struct decode_header *d) { asi->insn_singlestep = t16_singlestep_it; return INSN_GOOD_NO_SLOT; } static void __kprobes t16_simulate_cond_branch(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { unsigned long pc = regs->ARM_pc + 2; long offset = insn & 0x7f; offset -= insn & 0x80; /* Apply sign bit */ regs->ARM_pc = pc + (offset * 2); } static enum probes_insn __kprobes t16_decode_cond_branch(probes_opcode_t insn, struct arch_probes_insn *asi, const struct decode_header *d) { int cc = (insn >> 8) & 0xf; asi->insn_check_cc = probes_condition_checks[cc]; asi->insn_handler = t16_simulate_cond_branch; return INSN_GOOD_NO_SLOT; } static void __kprobes t16_simulate_branch(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { unsigned long pc = regs->ARM_pc + 2; long offset = insn & 0x3ff; offset -= insn & 0x400; /* Apply sign bit */ regs->ARM_pc = pc + (offset * 2); } static unsigned long __kprobes t16_emulate_loregs(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { unsigned long oldcpsr = regs->ARM_cpsr; unsigned long newcpsr; __asm__ __volatile__ ( "msr cpsr_fs, %[oldcpsr] \n\t" "mov r11, r7 \n\t" "ldmia %[regs], {r0-r7} \n\t" "blx %[fn] \n\t" "stmia %[regs], {r0-r7} \n\t" "mov r7, r11 \n\t" "mrs %[newcpsr], cpsr \n\t" : [newcpsr] "=r" (newcpsr) : [oldcpsr] "r" (oldcpsr), [regs] "r" (regs), [fn] "r" (asi->insn_fn) : "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r11", "lr", "memory", "cc" ); return (oldcpsr & ~APSR_MASK) | (newcpsr & APSR_MASK); } static void __kprobes t16_emulate_loregs_rwflags(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { regs->ARM_cpsr = t16_emulate_loregs(insn, asi, regs); } static void __kprobes t16_emulate_loregs_noitrwflags(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { unsigned long cpsr = t16_emulate_loregs(insn, asi, regs); if (!in_it_block(cpsr)) regs->ARM_cpsr = cpsr; } static void __kprobes t16_emulate_hiregs(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { unsigned long pc = regs->ARM_pc + 2; int rdn = (insn & 0x7) | ((insn & 0x80) >> 4); int rm = (insn >> 3) & 0xf; register unsigned long rdnv asm("r1"); register unsigned long rmv asm("r0"); unsigned long cpsr = regs->ARM_cpsr; rdnv = (rdn == 15) ? pc : regs->uregs[rdn]; rmv = (rm == 15) ? pc : regs->uregs[rm]; __asm__ __volatile__ ( "msr cpsr_fs, %[cpsr] \n\t" "blx %[fn] \n\t" "mrs %[cpsr], cpsr \n\t" : "=r" (rdnv), [cpsr] "=r" (cpsr) : "0" (rdnv), "r" (rmv), "1" (cpsr), [fn] "r" (asi->insn_fn) : "lr", "memory", "cc" ); if (rdn == 15) rdnv &= ~1; regs->uregs[rdn] = rdnv; regs->ARM_cpsr = (regs->ARM_cpsr & ~APSR_MASK) | (cpsr & APSR_MASK); } static enum probes_insn __kprobes t16_decode_hiregs(probes_opcode_t insn, struct arch_probes_insn *asi, const struct decode_header *d) { insn &= ~0x00ff; insn |= 0x001; /* Set Rdn = R1 and Rm = R0 */ ((u16 *)asi->insn)[0] = __opcode_to_mem_thumb16(insn); asi->insn_handler = t16_emulate_hiregs; return INSN_GOOD; } static void __kprobes t16_emulate_push(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { __asm__ __volatile__ ( "mov r11, r7 \n\t" "ldr r9, [%[regs], #13*4] \n\t" "ldr r8, [%[regs], #14*4] \n\t" "ldmia %[regs], {r0-r7} \n\t" "blx %[fn] \n\t" "str r9, [%[regs], #13*4] \n\t" "mov r7, r11 \n\t" : : [regs] "r" (regs), [fn] "r" (asi->insn_fn) : "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r8", "r9", "r11", "lr", "memory", "cc" ); } static enum probes_insn __kprobes t16_decode_push(probes_opcode_t insn, struct arch_probes_insn *asi, const struct decode_header *d) { /* * To simulate a PUSH we use a Thumb-2 "STMDB R9!, {registers}" * and call it with R9=SP and LR in the register list represented * by R8. */ /* 1st half STMDB R9!,{} */ ((u16 *)asi->insn)[0] = __opcode_to_mem_thumb16(0xe929); /* 2nd half (register list) */ ((u16 *)asi->insn)[1] = __opcode_to_mem_thumb16(insn & 0x1ff); asi->insn_handler = t16_emulate_push; return INSN_GOOD; } static void __kprobes t16_emulate_pop_nopc(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { __asm__ __volatile__ ( "mov r11, r7 \n\t" "ldr r9, [%[regs], #13*4] \n\t" "ldmia %[regs], {r0-r7} \n\t" "blx %[fn] \n\t" "stmia %[regs], {r0-r7} \n\t" "str r9, [%[regs], #13*4] \n\t" "mov r7, r11 \n\t" : : [regs] "r" (regs), [fn] "r" (asi->insn_fn) : "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r9", "r11", "lr", "memory", "cc" ); } static void __kprobes t16_emulate_pop_pc(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { register unsigned long pc asm("r8"); __asm__ __volatile__ ( "mov r11, r7 \n\t" "ldr r9, [%[regs], #13*4] \n\t" "ldmia %[regs], {r0-r7} \n\t" "blx %[fn] \n\t" "stmia %[regs], {r0-r7} \n\t" "str r9, [%[regs], #13*4] \n\t" "mov r7, r11 \n\t" : "=r" (pc) : [regs] "r" (regs), [fn] "r" (asi->insn_fn) : "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r9", "r11", "lr", "memory", "cc" ); bx_write_pc(pc, regs); } static enum probes_insn __kprobes t16_decode_pop(probes_opcode_t insn, struct arch_probes_insn *asi, const struct decode_header *d) { /* * To simulate a POP we use a Thumb-2 "LDMDB R9!, {registers}" * and call it with R9=SP and PC in the register list represented * by R8. */ /* 1st half LDMIA R9!,{} */ ((u16 *)asi->insn)[0] = __opcode_to_mem_thumb16(0xe8b9); /* 2nd half (register list) */ ((u16 *)asi->insn)[1] = __opcode_to_mem_thumb16(insn & 0x1ff); asi->insn_handler = insn & 0x100 ? t16_emulate_pop_pc : t16_emulate_pop_nopc; return INSN_GOOD; } const union decode_action kprobes_t16_actions[NUM_PROBES_T16_ACTIONS] = { [PROBES_T16_ADD_SP] = {.handler = t16_simulate_add_sp_imm}, [PROBES_T16_CBZ] = {.handler = t16_simulate_cbz}, [PROBES_T16_SIGN_EXTEND] = {.handler = t16_emulate_loregs_rwflags}, [PROBES_T16_PUSH] = {.decoder = t16_decode_push}, [PROBES_T16_POP] = {.decoder = t16_decode_pop}, [PROBES_T16_SEV] = {.handler = probes_emulate_none}, [PROBES_T16_WFE] = {.handler = probes_simulate_nop}, [PROBES_T16_IT] = {.decoder = t16_decode_it}, [PROBES_T16_CMP] = {.handler = t16_emulate_loregs_rwflags}, [PROBES_T16_ADDSUB] = {.handler = t16_emulate_loregs_noitrwflags}, [PROBES_T16_LOGICAL] = {.handler = t16_emulate_loregs_noitrwflags}, [PROBES_T16_LDR_LIT] = {.handler = t16_simulate_ldr_literal}, [PROBES_T16_BLX] = {.handler = t16_simulate_bxblx}, [PROBES_T16_HIREGOPS] = {.decoder = t16_decode_hiregs}, [PROBES_T16_LDRHSTRH] = {.handler = t16_emulate_loregs_rwflags}, [PROBES_T16_LDRSTR] = {.handler = t16_simulate_ldrstr_sp_relative}, [PROBES_T16_ADR] = {.handler = t16_simulate_reladr}, [PROBES_T16_LDMSTM] = {.handler = t16_emulate_loregs_rwflags}, [PROBES_T16_BRANCH_COND] = {.decoder = t16_decode_cond_branch}, [PROBES_T16_BRANCH] = {.handler = t16_simulate_branch}, }; const union decode_action kprobes_t32_actions[NUM_PROBES_T32_ACTIONS] = { [PROBES_T32_LDMSTM] = {.decoder = t32_decode_ldmstm}, [PROBES_T32_LDRDSTRD] = {.handler = t32_emulate_ldrdstrd}, [PROBES_T32_TABLE_BRANCH] = {.handler = t32_simulate_table_branch}, [PROBES_T32_TST] = {.handler = t32_emulate_rd8rn16rm0_rwflags}, [PROBES_T32_MOV] = {.handler = t32_emulate_rd8rn16rm0_rwflags}, [PROBES_T32_ADDSUB] = {.handler = t32_emulate_rd8rn16rm0_rwflags}, [PROBES_T32_LOGICAL] = {.handler = t32_emulate_rd8rn16rm0_rwflags}, [PROBES_T32_CMP] = {.handler = t32_emulate_rd8rn16rm0_rwflags}, [PROBES_T32_ADDWSUBW_PC] = {.handler = t32_emulate_rd8pc16_noflags,}, [PROBES_T32_ADDWSUBW] = {.handler = t32_emulate_rd8rn16_noflags}, [PROBES_T32_MOVW] = {.handler = t32_emulate_rd8rn16_noflags}, [PROBES_T32_SAT] = {.handler = t32_emulate_rd8rn16rm0_rwflags}, [PROBES_T32_BITFIELD] = {.handler = t32_emulate_rd8rn16_noflags}, [PROBES_T32_SEV] = {.handler = probes_emulate_none}, [PROBES_T32_WFE] = {.handler = probes_simulate_nop}, [PROBES_T32_MRS] = {.handler = t32_simulate_mrs}, [PROBES_T32_BRANCH_COND] = {.decoder = t32_decode_cond_branch}, [PROBES_T32_BRANCH] = {.handler = t32_simulate_branch}, [PROBES_T32_PLDI] = {.handler = probes_simulate_nop}, [PROBES_T32_LDR_LIT] = {.handler = t32_simulate_ldr_literal}, [PROBES_T32_LDRSTR] = {.handler = t32_emulate_ldrstr}, [PROBES_T32_SIGN_EXTEND] = {.handler = t32_emulate_rd8rn16rm0_rwflags}, [PROBES_T32_MEDIA] = {.handler = t32_emulate_rd8rn16rm0_rwflags}, [PROBES_T32_REVERSE] = {.handler = t32_emulate_rd8rn16_noflags}, [PROBES_T32_MUL_ADD] = {.handler = t32_emulate_rd8rn16rm0_rwflags}, [PROBES_T32_MUL_ADD2] = {.handler = t32_emulate_rd8rn16rm0ra12_noflags}, [PROBES_T32_MUL_ADD_LONG] = { .handler = t32_emulate_rdlo12rdhi8rn16rm0_noflags}, }; const struct decode_checker *kprobes_t32_checkers[] = {t32_stack_checker, NULL}; const struct decode_checker *kprobes_t16_checkers[] = {t16_stack_checker, NULL};
linux-master
arch/arm/probes/kprobes/actions-thumb.c
// SPDX-License-Identifier: GPL-2.0-only /* * arch/arm/probes/kprobes/actions-arm.c * * Copyright (C) 2006, 2007 Motorola Inc. */ /* * We do not have hardware single-stepping on ARM, This * effort is further complicated by the ARM not having a * "next PC" register. Instructions that change the PC * can't be safely single-stepped in a MP environment, so * we have a lot of work to do: * * In the prepare phase: * *) If it is an instruction that does anything * with the CPU mode, we reject it for a kprobe. * (This is out of laziness rather than need. The * instructions could be simulated.) * * *) Otherwise, decode the instruction rewriting its * registers to take fixed, ordered registers and * setting a handler for it to run the instruction. * * In the execution phase by an instruction's handler: * * *) If the PC is written to by the instruction, the * instruction must be fully simulated in software. * * *) Otherwise, a modified form of the instruction is * directly executed. Its handler calls the * instruction in insn[0]. In insn[1] is a * "mov pc, lr" to return. * * Before calling, load up the reordered registers * from the original instruction's registers. If one * of the original input registers is the PC, compute * and adjust the appropriate input register. * * After call completes, copy the output registers to * the original instruction's original registers. * * We don't use a real breakpoint instruction since that * would have us in the kernel go from SVC mode to SVC * mode losing the link register. Instead we use an * undefined instruction. To simplify processing, the * undefined instruction used for kprobes must be reserved * exclusively for kprobes use. * * TODO: ifdef out some instruction decoding based on architecture. */ #include <linux/kernel.h> #include <linux/kprobes.h> #include <linux/ptrace.h> #include "../decode-arm.h" #include "core.h" #include "checkers.h" #if __LINUX_ARM_ARCH__ >= 6 #define BLX(reg) "blx "reg" \n\t" #else #define BLX(reg) "mov lr, pc \n\t" \ "mov pc, "reg" \n\t" #endif static void __kprobes emulate_ldrdstrd(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { unsigned long pc = regs->ARM_pc + 4; int rt = (insn >> 12) & 0xf; int rn = (insn >> 16) & 0xf; int rm = insn & 0xf; register unsigned long rtv asm("r0") = regs->uregs[rt]; register unsigned long rt2v asm("r1") = regs->uregs[rt+1]; register unsigned long rnv asm("r2") = (rn == 15) ? pc : regs->uregs[rn]; register unsigned long rmv asm("r3") = regs->uregs[rm]; __asm__ __volatile__ ( BLX("%[fn]") : "=r" (rtv), "=r" (rt2v), "=r" (rnv) : "0" (rtv), "1" (rt2v), "2" (rnv), "r" (rmv), [fn] "r" (asi->insn_fn) : "lr", "memory", "cc" ); regs->uregs[rt] = rtv; regs->uregs[rt+1] = rt2v; if (is_writeback(insn)) regs->uregs[rn] = rnv; } static void __kprobes emulate_ldr(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { unsigned long pc = regs->ARM_pc + 4; int rt = (insn >> 12) & 0xf; int rn = (insn >> 16) & 0xf; int rm = insn & 0xf; register unsigned long rtv asm("r0"); register unsigned long rnv asm("r2") = (rn == 15) ? pc : regs->uregs[rn]; register unsigned long rmv asm("r3") = regs->uregs[rm]; __asm__ __volatile__ ( BLX("%[fn]") : "=r" (rtv), "=r" (rnv) : "1" (rnv), "r" (rmv), [fn] "r" (asi->insn_fn) : "lr", "memory", "cc" ); if (rt == 15) load_write_pc(rtv, regs); else regs->uregs[rt] = rtv; if (is_writeback(insn)) regs->uregs[rn] = rnv; } static void __kprobes emulate_str(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { unsigned long rtpc = regs->ARM_pc - 4 + str_pc_offset; unsigned long rnpc = regs->ARM_pc + 4; int rt = (insn >> 12) & 0xf; int rn = (insn >> 16) & 0xf; int rm = insn & 0xf; register unsigned long rtv asm("r0") = (rt == 15) ? rtpc : regs->uregs[rt]; register unsigned long rnv asm("r2") = (rn == 15) ? rnpc : regs->uregs[rn]; register unsigned long rmv asm("r3") = regs->uregs[rm]; __asm__ __volatile__ ( BLX("%[fn]") : "=r" (rnv) : "r" (rtv), "0" (rnv), "r" (rmv), [fn] "r" (asi->insn_fn) : "lr", "memory", "cc" ); if (is_writeback(insn)) regs->uregs[rn] = rnv; } static void __kprobes emulate_rd12rn16rm0rs8_rwflags(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { unsigned long pc = regs->ARM_pc + 4; int rd = (insn >> 12) & 0xf; int rn = (insn >> 16) & 0xf; int rm = insn & 0xf; int rs = (insn >> 8) & 0xf; register unsigned long rdv asm("r0") = regs->uregs[rd]; register unsigned long rnv asm("r2") = (rn == 15) ? pc : regs->uregs[rn]; register unsigned long rmv asm("r3") = (rm == 15) ? pc : regs->uregs[rm]; register unsigned long rsv asm("r1") = regs->uregs[rs]; unsigned long cpsr = regs->ARM_cpsr; __asm__ __volatile__ ( "msr cpsr_fs, %[cpsr] \n\t" BLX("%[fn]") "mrs %[cpsr], cpsr \n\t" : "=r" (rdv), [cpsr] "=r" (cpsr) : "0" (rdv), "r" (rnv), "r" (rmv), "r" (rsv), "1" (cpsr), [fn] "r" (asi->insn_fn) : "lr", "memory", "cc" ); if (rd == 15) alu_write_pc(rdv, regs); else regs->uregs[rd] = rdv; regs->ARM_cpsr = (regs->ARM_cpsr & ~APSR_MASK) | (cpsr & APSR_MASK); } static void __kprobes emulate_rd12rn16rm0_rwflags_nopc(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { int rd = (insn >> 12) & 0xf; int rn = (insn >> 16) & 0xf; int rm = insn & 0xf; register unsigned long rdv asm("r0") = regs->uregs[rd]; register unsigned long rnv asm("r2") = regs->uregs[rn]; register unsigned long rmv asm("r3") = regs->uregs[rm]; unsigned long cpsr = regs->ARM_cpsr; __asm__ __volatile__ ( "msr cpsr_fs, %[cpsr] \n\t" BLX("%[fn]") "mrs %[cpsr], cpsr \n\t" : "=r" (rdv), [cpsr] "=r" (cpsr) : "0" (rdv), "r" (rnv), "r" (rmv), "1" (cpsr), [fn] "r" (asi->insn_fn) : "lr", "memory", "cc" ); regs->uregs[rd] = rdv; regs->ARM_cpsr = (regs->ARM_cpsr & ~APSR_MASK) | (cpsr & APSR_MASK); } static void __kprobes emulate_rd16rn12rm0rs8_rwflags_nopc(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { int rd = (insn >> 16) & 0xf; int rn = (insn >> 12) & 0xf; int rm = insn & 0xf; int rs = (insn >> 8) & 0xf; register unsigned long rdv asm("r2") = regs->uregs[rd]; register unsigned long rnv asm("r0") = regs->uregs[rn]; register unsigned long rmv asm("r3") = regs->uregs[rm]; register unsigned long rsv asm("r1") = regs->uregs[rs]; unsigned long cpsr = regs->ARM_cpsr; __asm__ __volatile__ ( "msr cpsr_fs, %[cpsr] \n\t" BLX("%[fn]") "mrs %[cpsr], cpsr \n\t" : "=r" (rdv), [cpsr] "=r" (cpsr) : "0" (rdv), "r" (rnv), "r" (rmv), "r" (rsv), "1" (cpsr), [fn] "r" (asi->insn_fn) : "lr", "memory", "cc" ); regs->uregs[rd] = rdv; regs->ARM_cpsr = (regs->ARM_cpsr & ~APSR_MASK) | (cpsr & APSR_MASK); } static void __kprobes emulate_rd12rm0_noflags_nopc(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { int rd = (insn >> 12) & 0xf; int rm = insn & 0xf; register unsigned long rdv asm("r0") = regs->uregs[rd]; register unsigned long rmv asm("r3") = regs->uregs[rm]; __asm__ __volatile__ ( BLX("%[fn]") : "=r" (rdv) : "0" (rdv), "r" (rmv), [fn] "r" (asi->insn_fn) : "lr", "memory", "cc" ); regs->uregs[rd] = rdv; } static void __kprobes emulate_rdlo12rdhi16rn0rm8_rwflags_nopc(probes_opcode_t insn, struct arch_probes_insn *asi, struct pt_regs *regs) { int rdlo = (insn >> 12) & 0xf; int rdhi = (insn >> 16) & 0xf; int rn = insn & 0xf; int rm = (insn >> 8) & 0xf; register unsigned long rdlov asm("r0") = regs->uregs[rdlo]; register unsigned long rdhiv asm("r2") = regs->uregs[rdhi]; register unsigned long rnv asm("r3") = regs->uregs[rn]; register unsigned long rmv asm("r1") = regs->uregs[rm]; unsigned long cpsr = regs->ARM_cpsr; __asm__ __volatile__ ( "msr cpsr_fs, %[cpsr] \n\t" BLX("%[fn]") "mrs %[cpsr], cpsr \n\t" : "=r" (rdlov), "=r" (rdhiv), [cpsr] "=r" (cpsr) : "0" (rdlov), "1" (rdhiv), "r" (rnv), "r" (rmv), "2" (cpsr), [fn] "r" (asi->insn_fn) : "lr", "memory", "cc" ); regs->uregs[rdlo] = rdlov; regs->uregs[rdhi] = rdhiv; regs->ARM_cpsr = (regs->ARM_cpsr & ~APSR_MASK) | (cpsr & APSR_MASK); } const union decode_action kprobes_arm_actions[NUM_PROBES_ARM_ACTIONS] = { [PROBES_PRELOAD_IMM] = {.handler = probes_simulate_nop}, [PROBES_PRELOAD_REG] = {.handler = probes_simulate_nop}, [PROBES_BRANCH_IMM] = {.handler = simulate_blx1}, [PROBES_MRS] = {.handler = simulate_mrs}, [PROBES_BRANCH_REG] = {.handler = simulate_blx2bx}, [PROBES_CLZ] = {.handler = emulate_rd12rm0_noflags_nopc}, [PROBES_SATURATING_ARITHMETIC] = { .handler = emulate_rd12rn16rm0_rwflags_nopc}, [PROBES_MUL1] = {.handler = emulate_rdlo12rdhi16rn0rm8_rwflags_nopc}, [PROBES_MUL2] = {.handler = emulate_rd16rn12rm0rs8_rwflags_nopc}, [PROBES_SWP] = {.handler = emulate_rd12rn16rm0_rwflags_nopc}, [PROBES_LDRSTRD] = {.handler = emulate_ldrdstrd}, [PROBES_LOAD_EXTRA] = {.handler = emulate_ldr}, [PROBES_LOAD] = {.handler = emulate_ldr}, [PROBES_STORE_EXTRA] = {.handler = emulate_str}, [PROBES_STORE] = {.handler = emulate_str}, [PROBES_MOV_IP_SP] = {.handler = simulate_mov_ipsp}, [PROBES_DATA_PROCESSING_REG] = { .handler = emulate_rd12rn16rm0rs8_rwflags}, [PROBES_DATA_PROCESSING_IMM] = { .handler = emulate_rd12rn16rm0rs8_rwflags}, [PROBES_MOV_HALFWORD] = {.handler = emulate_rd12rm0_noflags_nopc}, [PROBES_SEV] = {.handler = probes_emulate_none}, [PROBES_WFE] = {.handler = probes_simulate_nop}, [PROBES_SATURATE] = {.handler = emulate_rd12rn16rm0_rwflags_nopc}, [PROBES_REV] = {.handler = emulate_rd12rm0_noflags_nopc}, [PROBES_MMI] = {.handler = emulate_rd12rn16rm0_rwflags_nopc}, [PROBES_PACK] = {.handler = emulate_rd12rn16rm0_rwflags_nopc}, [PROBES_EXTEND] = {.handler = emulate_rd12rm0_noflags_nopc}, [PROBES_EXTEND_ADD] = {.handler = emulate_rd12rn16rm0_rwflags_nopc}, [PROBES_MUL_ADD_LONG] = { .handler = emulate_rdlo12rdhi16rn0rm8_rwflags_nopc}, [PROBES_MUL_ADD] = {.handler = emulate_rd16rn12rm0rs8_rwflags_nopc}, [PROBES_BITFIELD] = {.handler = emulate_rd12rm0_noflags_nopc}, [PROBES_BRANCH] = {.handler = simulate_bbl}, [PROBES_LDMSTM] = {.decoder = kprobe_decode_ldmstm} }; const struct decode_checker *kprobes_arm_checkers[] = {arm_stack_checker, arm_regs_checker, NULL};
linux-master
arch/arm/probes/kprobes/actions-arm.c
// SPDX-License-Identifier: GPL-2.0-only /* * arch/arm/kernel/kprobes.c * * Kprobes on ARM * * Abhishek Sagar <[email protected]> * Copyright (C) 2006, 2007 Motorola Inc. * * Nicolas Pitre <[email protected]> * Copyright (C) 2007 Marvell Ltd. */ #define pr_fmt(fmt) "kprobes: " fmt #include <linux/kernel.h> #include <linux/kprobes.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/stop_machine.h> #include <linux/sched/debug.h> #include <linux/stringify.h> #include <asm/traps.h> #include <asm/opcodes.h> #include <asm/cacheflush.h> #include <linux/percpu.h> #include <linux/bug.h> #include <asm/patch.h> #include <asm/sections.h> #include "../decode-arm.h" #include "../decode-thumb.h" #include "core.h" #define MIN_STACK_SIZE(addr) \ min((unsigned long)MAX_STACK_SIZE, \ (unsigned long)current_thread_info() + THREAD_START_SP - (addr)) #define flush_insns(addr, size) \ flush_icache_range((unsigned long)(addr), \ (unsigned long)(addr) + \ (size)) DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); int __kprobes arch_prepare_kprobe(struct kprobe *p) { kprobe_opcode_t insn; kprobe_opcode_t tmp_insn[MAX_INSN_SIZE]; unsigned long addr = (unsigned long)p->addr; bool thumb; kprobe_decode_insn_t *decode_insn; const union decode_action *actions; int is; const struct decode_checker **checkers; #ifdef CONFIG_THUMB2_KERNEL thumb = true; addr &= ~1; /* Bit 0 would normally be set to indicate Thumb code */ insn = __mem_to_opcode_thumb16(((u16 *)addr)[0]); if (is_wide_instruction(insn)) { u16 inst2 = __mem_to_opcode_thumb16(((u16 *)addr)[1]); insn = __opcode_thumb32_compose(insn, inst2); decode_insn = thumb32_probes_decode_insn; actions = kprobes_t32_actions; checkers = kprobes_t32_checkers; } else { decode_insn = thumb16_probes_decode_insn; actions = kprobes_t16_actions; checkers = kprobes_t16_checkers; } #else /* !CONFIG_THUMB2_KERNEL */ thumb = false; if (addr & 0x3) return -EINVAL; insn = __mem_to_opcode_arm(*p->addr); decode_insn = arm_probes_decode_insn; actions = kprobes_arm_actions; checkers = kprobes_arm_checkers; #endif p->opcode = insn; p->ainsn.insn = tmp_insn; switch ((*decode_insn)(insn, &p->ainsn, true, actions, checkers)) { case INSN_REJECTED: /* not supported */ return -EINVAL; case INSN_GOOD: /* instruction uses slot */ p->ainsn.insn = get_insn_slot(); if (!p->ainsn.insn) return -ENOMEM; for (is = 0; is < MAX_INSN_SIZE; ++is) p->ainsn.insn[is] = tmp_insn[is]; flush_insns(p->ainsn.insn, sizeof(p->ainsn.insn[0]) * MAX_INSN_SIZE); p->ainsn.insn_fn = (probes_insn_fn_t *) ((uintptr_t)p->ainsn.insn | thumb); break; case INSN_GOOD_NO_SLOT: /* instruction doesn't need insn slot */ p->ainsn.insn = NULL; break; } /* * Never instrument insn like 'str r0, [sp, +/-r1]'. Also, insn likes * 'str r0, [sp, #-68]' should also be prohibited. * See __und_svc. */ if ((p->ainsn.stack_space < 0) || (p->ainsn.stack_space > MAX_STACK_SIZE)) return -EINVAL; return 0; } void __kprobes arch_arm_kprobe(struct kprobe *p) { unsigned int brkp; void *addr; if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) { /* Remove any Thumb flag */ addr = (void *)((uintptr_t)p->addr & ~1); if (is_wide_instruction(p->opcode)) brkp = KPROBE_THUMB32_BREAKPOINT_INSTRUCTION; else brkp = KPROBE_THUMB16_BREAKPOINT_INSTRUCTION; } else { kprobe_opcode_t insn = p->opcode; addr = p->addr; brkp = KPROBE_ARM_BREAKPOINT_INSTRUCTION; if (insn >= 0xe0000000) brkp |= 0xe0000000; /* Unconditional instruction */ else brkp |= insn & 0xf0000000; /* Copy condition from insn */ } patch_text(addr, brkp); } /* * The actual disarming is done here on each CPU and synchronized using * stop_machine. This synchronization is necessary on SMP to avoid removing * a probe between the moment the 'Undefined Instruction' exception is raised * and the moment the exception handler reads the faulting instruction from * memory. It is also needed to atomically set the two half-words of a 32-bit * Thumb breakpoint. */ struct patch { void *addr; unsigned int insn; }; static int __kprobes_remove_breakpoint(void *data) { struct patch *p = data; __patch_text(p->addr, p->insn); return 0; } void __kprobes kprobes_remove_breakpoint(void *addr, unsigned int insn) { struct patch p = { .addr = addr, .insn = insn, }; stop_machine_cpuslocked(__kprobes_remove_breakpoint, &p, cpu_online_mask); } void __kprobes arch_disarm_kprobe(struct kprobe *p) { kprobes_remove_breakpoint((void *)((uintptr_t)p->addr & ~1), p->opcode); } void __kprobes arch_remove_kprobe(struct kprobe *p) { if (p->ainsn.insn) { free_insn_slot(p->ainsn.insn, 0); p->ainsn.insn = NULL; } } static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) { kcb->prev_kprobe.kp = kprobe_running(); kcb->prev_kprobe.status = kcb->kprobe_status; } static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) { __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); kcb->kprobe_status = kcb->prev_kprobe.status; } static void __kprobes set_current_kprobe(struct kprobe *p) { __this_cpu_write(current_kprobe, p); } static void __kprobes singlestep_skip(struct kprobe *p, struct pt_regs *regs) { #ifdef CONFIG_THUMB2_KERNEL regs->ARM_cpsr = it_advance(regs->ARM_cpsr); if (is_wide_instruction(p->opcode)) regs->ARM_pc += 4; else regs->ARM_pc += 2; #else regs->ARM_pc += 4; #endif } static inline void __kprobes singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb) { p->ainsn.insn_singlestep(p->opcode, &p->ainsn, regs); } /* * Called with IRQs disabled. IRQs must remain disabled from that point * all the way until processing this kprobe is complete. The current * kprobes implementation cannot process more than one nested level of * kprobe, and that level is reserved for user kprobe handlers, so we can't * risk encountering a new kprobe in an interrupt handler. */ static void __kprobes kprobe_handler(struct pt_regs *regs) { struct kprobe *p, *cur; struct kprobe_ctlblk *kcb; kcb = get_kprobe_ctlblk(); cur = kprobe_running(); #ifdef CONFIG_THUMB2_KERNEL /* * First look for a probe which was registered using an address with * bit 0 set, this is the usual situation for pointers to Thumb code. * If not found, fallback to looking for one with bit 0 clear. */ p = get_kprobe((kprobe_opcode_t *)(regs->ARM_pc | 1)); if (!p) p = get_kprobe((kprobe_opcode_t *)regs->ARM_pc); #else /* ! CONFIG_THUMB2_KERNEL */ p = get_kprobe((kprobe_opcode_t *)regs->ARM_pc); #endif if (p) { if (!p->ainsn.insn_check_cc(regs->ARM_cpsr)) { /* * Probe hit but conditional execution check failed, * so just skip the instruction and continue as if * nothing had happened. * In this case, we can skip recursing check too. */ singlestep_skip(p, regs); } else if (cur) { /* Kprobe is pending, so we're recursing. */ switch (kcb->kprobe_status) { case KPROBE_HIT_ACTIVE: case KPROBE_HIT_SSDONE: case KPROBE_HIT_SS: /* A pre- or post-handler probe got us here. */ kprobes_inc_nmissed_count(p); save_previous_kprobe(kcb); set_current_kprobe(p); kcb->kprobe_status = KPROBE_REENTER; singlestep(p, regs, kcb); restore_previous_kprobe(kcb); break; case KPROBE_REENTER: /* A nested probe was hit in FIQ, it is a BUG */ pr_warn("Failed to recover from reentered kprobes.\n"); dump_kprobe(p); fallthrough; default: /* impossible cases */ BUG(); } } else { /* Probe hit and conditional execution check ok. */ set_current_kprobe(p); kcb->kprobe_status = KPROBE_HIT_ACTIVE; /* * If we have no pre-handler or it returned 0, we * continue with normal processing. If we have a * pre-handler and it returned non-zero, it will * modify the execution path and no need to single * stepping. Let's just reset current kprobe and exit. */ if (!p->pre_handler || !p->pre_handler(p, regs)) { kcb->kprobe_status = KPROBE_HIT_SS; singlestep(p, regs, kcb); if (p->post_handler) { kcb->kprobe_status = KPROBE_HIT_SSDONE; p->post_handler(p, regs, 0); } } reset_current_kprobe(); } } else { /* * The probe was removed and a race is in progress. * There is nothing we can do about it. Let's restart * the instruction. By the time we can restart, the * real instruction will be there. */ } } static int __kprobes kprobe_trap_handler(struct pt_regs *regs, unsigned int instr) { unsigned long flags; local_irq_save(flags); kprobe_handler(regs); local_irq_restore(flags); return 0; } int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr) { struct kprobe *cur = kprobe_running(); struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); switch (kcb->kprobe_status) { case KPROBE_HIT_SS: case KPROBE_REENTER: /* * We are here because the instruction being single * stepped caused a page fault. We reset the current * kprobe and the PC to point back to the probe address * and allow the page fault handler to continue as a * normal page fault. */ regs->ARM_pc = (long)cur->addr; if (kcb->kprobe_status == KPROBE_REENTER) { restore_previous_kprobe(kcb); } else { reset_current_kprobe(); } break; } return 0; } int __kprobes kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *data) { /* * notify_die() is currently never called on ARM, * so this callback is currently empty. */ return NOTIFY_DONE; } /* * When a retprobed function returns, trampoline_handler() is called, * calling the kretprobe's handler. We construct a struct pt_regs to * give a view of registers r0-r11, sp, lr, and pc to the user * return-handler. This is not a complete pt_regs structure, but that * should be enough for stacktrace from the return handler with or * without pt_regs. */ void __naked __kprobes __kretprobe_trampoline(void) { __asm__ __volatile__ ( #ifdef CONFIG_FRAME_POINTER "ldr lr, =__kretprobe_trampoline \n\t" /* __kretprobe_trampoline makes a framepointer on pt_regs. */ #ifdef CONFIG_CC_IS_CLANG "stmdb sp, {sp, lr, pc} \n\t" "sub sp, sp, #12 \n\t" /* In clang case, pt_regs->ip = lr. */ "stmdb sp!, {r0 - r11, lr} \n\t" /* fp points regs->r11 (fp) */ "add fp, sp, #44 \n\t" #else /* !CONFIG_CC_IS_CLANG */ /* In gcc case, pt_regs->ip = fp. */ "stmdb sp, {fp, sp, lr, pc} \n\t" "sub sp, sp, #16 \n\t" "stmdb sp!, {r0 - r11} \n\t" /* fp points regs->r15 (pc) */ "add fp, sp, #60 \n\t" #endif /* CONFIG_CC_IS_CLANG */ #else /* !CONFIG_FRAME_POINTER */ "sub sp, sp, #16 \n\t" "stmdb sp!, {r0 - r11} \n\t" #endif /* CONFIG_FRAME_POINTER */ "mov r0, sp \n\t" "bl trampoline_handler \n\t" "mov lr, r0 \n\t" "ldmia sp!, {r0 - r11} \n\t" "add sp, sp, #16 \n\t" #ifdef CONFIG_THUMB2_KERNEL "bx lr \n\t" #else "mov pc, lr \n\t" #endif : : : "memory"); } /* Called from __kretprobe_trampoline */ static __used __kprobes void *trampoline_handler(struct pt_regs *regs) { return (void *)kretprobe_trampoline_handler(regs, (void *)regs->ARM_fp); } void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) { ri->ret_addr = (kprobe_opcode_t *)regs->ARM_lr; ri->fp = (void *)regs->ARM_fp; /* Replace the return addr with trampoline addr. */ regs->ARM_lr = (unsigned long)&__kretprobe_trampoline; } int __kprobes arch_trampoline_kprobe(struct kprobe *p) { return 0; } #ifdef CONFIG_THUMB2_KERNEL static struct undef_hook kprobes_thumb16_break_hook = { .instr_mask = 0xffff, .instr_val = KPROBE_THUMB16_BREAKPOINT_INSTRUCTION, .cpsr_mask = MODE_MASK, .cpsr_val = SVC_MODE, .fn = kprobe_trap_handler, }; static struct undef_hook kprobes_thumb32_break_hook = { .instr_mask = 0xffffffff, .instr_val = KPROBE_THUMB32_BREAKPOINT_INSTRUCTION, .cpsr_mask = MODE_MASK, .cpsr_val = SVC_MODE, .fn = kprobe_trap_handler, }; #else /* !CONFIG_THUMB2_KERNEL */ static struct undef_hook kprobes_arm_break_hook = { .instr_mask = 0x0fffffff, .instr_val = KPROBE_ARM_BREAKPOINT_INSTRUCTION, .cpsr_mask = MODE_MASK, .cpsr_val = SVC_MODE, .fn = kprobe_trap_handler, }; #endif /* !CONFIG_THUMB2_KERNEL */ int __init arch_init_kprobes(void) { arm_probes_decode_init(); #ifdef CONFIG_THUMB2_KERNEL register_undef_hook(&kprobes_thumb16_break_hook); register_undef_hook(&kprobes_thumb32_break_hook); #else register_undef_hook(&kprobes_arm_break_hook); #endif return 0; } bool arch_within_kprobe_blacklist(unsigned long addr) { void *a = (void *)addr; return __in_irqentry_text(addr) || in_entry_text(addr) || in_idmap_text(addr) || memory_contains(__kprobes_text_start, __kprobes_text_end, a, 1); }
linux-master
arch/arm/probes/kprobes/core.c
// SPDX-License-Identifier: GPL-2.0-only /* * arch/arm/probes/kprobes/test-thumb.c * * Copyright (C) 2011 Jon Medhurst <[email protected]>. */ #include <linux/kernel.h> #include <linux/module.h> #include <asm/opcodes.h> #include <asm/probes.h> #include "test-core.h" #define TEST_ISA "16" #define DONT_TEST_IN_ITBLOCK(tests) \ kprobe_test_flags |= TEST_FLAG_NO_ITBLOCK; \ tests \ kprobe_test_flags &= ~TEST_FLAG_NO_ITBLOCK; #define CONDITION_INSTRUCTIONS(cc_pos, tests) \ kprobe_test_cc_position = cc_pos; \ DONT_TEST_IN_ITBLOCK(tests) \ kprobe_test_cc_position = 0; #define TEST_ITBLOCK(code) \ kprobe_test_flags |= TEST_FLAG_FULL_ITBLOCK; \ TESTCASE_START(code) \ TEST_ARG_END("") \ "50: nop \n\t" \ "1: "code" \n\t" \ " mov r1, #0x11 \n\t" \ " mov r2, #0x22 \n\t" \ " mov r3, #0x33 \n\t" \ "2: nop \n\t" \ TESTCASE_END \ kprobe_test_flags &= ~TEST_FLAG_FULL_ITBLOCK; #define TEST_THUMB_TO_ARM_INTERWORK_P(code1, reg, val, code2) \ TESTCASE_START(code1 #reg code2) \ TEST_ARG_PTR(reg, val) \ TEST_ARG_REG(14, 99f+1) \ TEST_ARG_MEM(15, 3f) \ TEST_ARG_END("") \ " nop \n\t" /* To align 1f */ \ "50: nop \n\t" \ "1: "code1 #reg code2" \n\t" \ " bx lr \n\t" \ ".arm \n\t" \ "3: adr lr, 2f+1 \n\t" \ " bx lr \n\t" \ ".thumb \n\t" \ "2: nop \n\t" \ TESTCASE_END void kprobe_thumb16_test_cases(void) { kprobe_test_flags = TEST_FLAG_NARROW_INSTR; TEST_GROUP("Shift (immediate), add, subtract, move, and compare") TEST_R( "lsls r7, r",0,VAL1,", #5") TEST_R( "lsls r0, r",7,VAL2,", #11") TEST_R( "lsrs r7, r",0,VAL1,", #5") TEST_R( "lsrs r0, r",7,VAL2,", #11") TEST_R( "asrs r7, r",0,VAL1,", #5") TEST_R( "asrs r0, r",7,VAL2,", #11") TEST_RR( "adds r2, r",0,VAL1,", r",7,VAL2,"") TEST_RR( "adds r5, r",7,VAL2,", r",0,VAL2,"") TEST_RR( "subs r2, r",0,VAL1,", r",7,VAL2,"") TEST_RR( "subs r5, r",7,VAL2,", r",0,VAL2,"") TEST_R( "adds r7, r",0,VAL1,", #5") TEST_R( "adds r0, r",7,VAL2,", #2") TEST_R( "subs r7, r",0,VAL1,", #5") TEST_R( "subs r0, r",7,VAL2,", #2") TEST( "movs.n r0, #0x5f") TEST( "movs.n r7, #0xa0") TEST_R( "cmp.n r",0,0x5e, ", #0x5f") TEST_R( "cmp.n r",5,0x15f,", #0x5f") TEST_R( "cmp.n r",7,0xa0, ", #0xa0") TEST_R( "adds.n r",0,VAL1,", #0x5f") TEST_R( "adds.n r",7,VAL2,", #0xa0") TEST_R( "subs.n r",0,VAL1,", #0x5f") TEST_R( "subs.n r",7,VAL2,", #0xa0") TEST_GROUP("16-bit Thumb data-processing instructions") #define DATA_PROCESSING16(op,val) \ TEST_RR( op" r",0,VAL1,", r",7,val,"") \ TEST_RR( op" r",7,VAL2,", r",0,val,"") DATA_PROCESSING16("ands",0xf00f00ff) DATA_PROCESSING16("eors",0xf00f00ff) DATA_PROCESSING16("lsls",11) DATA_PROCESSING16("lsrs",11) DATA_PROCESSING16("asrs",11) DATA_PROCESSING16("adcs",VAL2) DATA_PROCESSING16("sbcs",VAL2) DATA_PROCESSING16("rors",11) DATA_PROCESSING16("tst",0xf00f00ff) TEST_R("rsbs r",0,VAL1,", #0") TEST_R("rsbs r",7,VAL2,", #0") DATA_PROCESSING16("cmp",0xf00f00ff) DATA_PROCESSING16("cmn",0xf00f00ff) DATA_PROCESSING16("orrs",0xf00f00ff) DATA_PROCESSING16("muls",VAL2) DATA_PROCESSING16("bics",0xf00f00ff) DATA_PROCESSING16("mvns",VAL2) TEST_GROUP("Special data instructions and branch and exchange") TEST_RR( "add r",0, VAL1,", r",7,VAL2,"") TEST_RR( "add r",3, VAL2,", r",8,VAL3,"") TEST_RR( "add r",8, VAL3,", r",0,VAL1,"") TEST_R( "add sp" ", r",8,-8, "") TEST_R( "add r",14,VAL1,", pc") TEST_BF_R("add pc" ", r",0,2f-1f-8,"") TEST_UNSUPPORTED(__inst_thumb16(0x44ff) " @ add pc, pc") TEST_RR( "cmp r",3,VAL1,", r",8,VAL2,"") TEST_RR( "cmp r",8,VAL2,", r",0,VAL1,"") TEST_R( "cmp sp" ", r",8,-8, "") TEST_R( "mov r0, r",7,VAL2,"") TEST_R( "mov r3, r",8,VAL3,"") TEST_R( "mov r8, r",0,VAL1,"") TEST_P( "mov sp, r",8,-8, "") TEST( "mov lr, pc") TEST_BF_R("mov pc, r",0,2f, "") TEST_BF_R("bx r",0, 2f+1,"") TEST_BF_R("bx r",14,2f+1,"") TESTCASE_START("bx pc") TEST_ARG_REG(14, 99f+1) TEST_ARG_END("") " nop \n\t" /* To align the bx pc*/ "50: nop \n\t" "1: bx pc \n\t" " bx lr \n\t" ".arm \n\t" " adr lr, 2f+1 \n\t" " bx lr \n\t" ".thumb \n\t" "2: nop \n\t" TESTCASE_END TEST_BF_R("blx r",0, 2f+1,"") TEST_BB_R("blx r",14,2f+1,"") TEST_UNSUPPORTED(__inst_thumb16(0x47f8) " @ blx pc") TEST_GROUP("Load from Literal Pool") TEST_X( "ldr r0, 3f", ".align \n\t" "3: .word "__stringify(VAL1)) TEST_X( "ldr r7, 3f", ".space 128 \n\t" ".align \n\t" "3: .word "__stringify(VAL2)) TEST_GROUP("16-bit Thumb Load/store instructions") TEST_RPR("str r",0, VAL1,", [r",1, 24,", r",2, 48,"]") TEST_RPR("str r",7, VAL2,", [r",6, 24,", r",5, 48,"]") TEST_RPR("strh r",0, VAL1,", [r",1, 24,", r",2, 48,"]") TEST_RPR("strh r",7, VAL2,", [r",6, 24,", r",5, 48,"]") TEST_RPR("strb r",0, VAL1,", [r",1, 24,", r",2, 48,"]") TEST_RPR("strb r",7, VAL2,", [r",6, 24,", r",5, 48,"]") TEST_PR( "ldrsb r0, [r",1, 24,", r",2, 48,"]") TEST_PR( "ldrsb r7, [r",6, 24,", r",5, 50,"]") TEST_PR( "ldr r0, [r",1, 24,", r",2, 48,"]") TEST_PR( "ldr r7, [r",6, 24,", r",5, 48,"]") TEST_PR( "ldrh r0, [r",1, 24,", r",2, 48,"]") TEST_PR( "ldrh r7, [r",6, 24,", r",5, 50,"]") TEST_PR( "ldrb r0, [r",1, 24,", r",2, 48,"]") TEST_PR( "ldrb r7, [r",6, 24,", r",5, 50,"]") TEST_PR( "ldrsh r0, [r",1, 24,", r",2, 48,"]") TEST_PR( "ldrsh r7, [r",6, 24,", r",5, 50,"]") TEST_RP("str r",0, VAL1,", [r",1, 24,", #120]") TEST_RP("str r",7, VAL2,", [r",6, 24,", #120]") TEST_P( "ldr r0, [r",1, 24,", #120]") TEST_P( "ldr r7, [r",6, 24,", #120]") TEST_RP("strb r",0, VAL1,", [r",1, 24,", #30]") TEST_RP("strb r",7, VAL2,", [r",6, 24,", #30]") TEST_P( "ldrb r0, [r",1, 24,", #30]") TEST_P( "ldrb r7, [r",6, 24,", #30]") TEST_RP("strh r",0, VAL1,", [r",1, 24,", #60]") TEST_RP("strh r",7, VAL2,", [r",6, 24,", #60]") TEST_P( "ldrh r0, [r",1, 24,", #60]") TEST_P( "ldrh r7, [r",6, 24,", #60]") TEST_R( "str r",0, VAL1,", [sp, #0]") TEST_R( "str r",7, VAL2,", [sp, #160]") TEST( "ldr r0, [sp, #0]") TEST( "ldr r7, [sp, #160]") TEST_RP("str r",0, VAL1,", [r",0, 24,"]") TEST_P( "ldr r0, [r",0, 24,"]") TEST_GROUP("Generate PC-/SP-relative address") TEST("add r0, pc, #4") TEST("add r7, pc, #1020") TEST("add r0, sp, #4") TEST("add r7, sp, #1020") TEST_GROUP("Miscellaneous 16-bit instructions") TEST_UNSUPPORTED( "cpsie i") TEST_UNSUPPORTED( "cpsid i") TEST_UNSUPPORTED( "setend le") TEST_UNSUPPORTED( "setend be") TEST("add sp, #"__stringify(TEST_MEMORY_SIZE)) /* Assumes TEST_MEMORY_SIZE < 0x400 */ TEST("sub sp, #0x7f*4") DONT_TEST_IN_ITBLOCK( TEST_BF_R( "cbnz r",0,0, ", 2f") TEST_BF_R( "cbz r",2,-1,", 2f") TEST_BF_RX( "cbnz r",4,1, ", 2f", SPACE_0x20) TEST_BF_RX( "cbz r",7,0, ", 2f", SPACE_0x40) ) TEST_R("sxth r0, r",7, HH1,"") TEST_R("sxth r7, r",0, HH2,"") TEST_R("sxtb r0, r",7, HH1,"") TEST_R("sxtb r7, r",0, HH2,"") TEST_R("uxth r0, r",7, HH1,"") TEST_R("uxth r7, r",0, HH2,"") TEST_R("uxtb r0, r",7, HH1,"") TEST_R("uxtb r7, r",0, HH2,"") TEST_R("rev r0, r",7, VAL1,"") TEST_R("rev r7, r",0, VAL2,"") TEST_R("rev16 r0, r",7, VAL1,"") TEST_R("rev16 r7, r",0, VAL2,"") TEST_UNSUPPORTED(__inst_thumb16(0xba80) "") TEST_UNSUPPORTED(__inst_thumb16(0xbabf) "") TEST_R("revsh r0, r",7, VAL1,"") TEST_R("revsh r7, r",0, VAL2,"") #define TEST_POPPC(code, offset) \ TESTCASE_START(code) \ TEST_ARG_PTR(13, offset) \ TEST_ARG_END("") \ TEST_BRANCH_F(code) \ TESTCASE_END TEST("push {r0}") TEST("push {r7}") TEST("push {r14}") TEST("push {r0-r7,r14}") TEST("push {r0,r2,r4,r6,r14}") TEST("push {r1,r3,r5,r7}") TEST("pop {r0}") TEST("pop {r7}") TEST("pop {r0,r2,r4,r6}") TEST_POPPC("pop {pc}",15*4) TEST_POPPC("pop {r0-r7,pc}",7*4) TEST_POPPC("pop {r1,r3,r5,r7,pc}",11*4) TEST_THUMB_TO_ARM_INTERWORK_P("pop {pc} @ ",13,15*4,"") TEST_THUMB_TO_ARM_INTERWORK_P("pop {r0-r7,pc} @ ",13,7*4,"") TEST_UNSUPPORTED("bkpt.n 0") TEST_UNSUPPORTED("bkpt.n 255") TEST_SUPPORTED("yield") TEST("sev") TEST("nop") TEST("wfi") TEST_SUPPORTED("wfe") TEST_UNSUPPORTED(__inst_thumb16(0xbf50) "") /* Unassigned hints */ TEST_UNSUPPORTED(__inst_thumb16(0xbff0) "") /* Unassigned hints */ #define TEST_IT(code, code2) \ TESTCASE_START(code) \ TEST_ARG_END("") \ "50: nop \n\t" \ "1: "code" \n\t" \ " "code2" \n\t" \ "2: nop \n\t" \ TESTCASE_END DONT_TEST_IN_ITBLOCK( TEST_IT("it eq","moveq r0,#0") TEST_IT("it vc","movvc r0,#0") TEST_IT("it le","movle r0,#0") TEST_IT("ite eq","moveq r0,#0\n\t movne r1,#1") TEST_IT("itet vc","movvc r0,#0\n\t movvs r1,#1\n\t movvc r2,#2") TEST_IT("itete le","movle r0,#0\n\t movgt r1,#1\n\t movle r2,#2\n\t movgt r3,#3") TEST_IT("itttt le","movle r0,#0\n\t movle r1,#1\n\t movle r2,#2\n\t movle r3,#3") TEST_IT("iteee le","movle r0,#0\n\t movgt r1,#1\n\t movgt r2,#2\n\t movgt r3,#3") ) TEST_GROUP("Load and store multiple") TEST_P("ldmia r",4, 16*4,"!, {r0,r7}") TEST_P("ldmia r",7, 16*4,"!, {r0-r6}") TEST_P("stmia r",4, 16*4,"!, {r0,r7}") TEST_P("stmia r",0, 16*4,"!, {r0-r7}") TEST_GROUP("Conditional branch and Supervisor Call instructions") CONDITION_INSTRUCTIONS(8, TEST_BF("beq 2f") TEST_BB("bne 2b") TEST_BF("bgt 2f") TEST_BB("blt 2b") ) TEST_UNSUPPORTED(__inst_thumb16(0xde00) "") TEST_UNSUPPORTED(__inst_thumb16(0xdeff) "") TEST_UNSUPPORTED("svc #0x00") TEST_UNSUPPORTED("svc #0xff") TEST_GROUP("Unconditional branch") TEST_BF( "b 2f") TEST_BB( "b 2b") TEST_BF_X("b 2f", SPACE_0x400) TEST_BB_X("b 2b", SPACE_0x400) TEST_GROUP("Testing instructions in IT blocks") TEST_ITBLOCK("subs.n r0, r0") verbose("\n"); } void kprobe_thumb32_test_cases(void) { kprobe_test_flags = 0; TEST_GROUP("Load/store multiple") TEST_UNSUPPORTED("rfedb sp") TEST_UNSUPPORTED("rfeia sp") TEST_UNSUPPORTED("rfedb sp!") TEST_UNSUPPORTED("rfeia sp!") TEST_P( "stmia r",0, 16*4,", {r0,r8}") TEST_P( "stmia r",4, 16*4,", {r0-r12,r14}") TEST_P( "stmia r",7, 16*4,"!, {r8-r12,r14}") TEST_P( "stmia r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}") TEST_P( "ldmia r",0, 16*4,", {r0,r8}") TEST_P( "ldmia r",4, 0, ", {r0-r12,r14}") TEST_BF_P("ldmia r",5, 8*4, "!, {r6-r12,r15}") TEST_P( "ldmia r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}") TEST_BF_P("ldmia r",14,14*4,"!, {r4,pc}") TEST_P( "stmdb r",0, 16*4,", {r0,r8}") TEST_P( "stmdb r",4, 16*4,", {r0-r12,r14}") TEST_P( "stmdb r",5, 16*4,"!, {r8-r12,r14}") TEST_P( "stmdb r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}") TEST_P( "ldmdb r",0, 16*4,", {r0,r8}") TEST_P( "ldmdb r",4, 16*4,", {r0-r12,r14}") TEST_BF_P("ldmdb r",5, 16*4,"!, {r6-r12,r15}") TEST_P( "ldmdb r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}") TEST_BF_P("ldmdb r",14,16*4,"!, {r4,pc}") TEST_P( "stmdb r",13,16*4,"!, {r3-r12,lr}") TEST_P( "stmdb r",13,16*4,"!, {r3-r12}") TEST_P( "stmdb r",2, 16*4,", {r3-r12,lr}") TEST_P( "stmdb r",13,16*4,"!, {r2-r12,lr}") TEST_P( "stmdb r",0, 16*4,", {r0-r12}") TEST_P( "stmdb r",0, 16*4,", {r0-r12,lr}") TEST_BF_P("ldmia r",13,5*4, "!, {r3-r12,pc}") TEST_P( "ldmia r",13,5*4, "!, {r3-r12}") TEST_BF_P("ldmia r",2, 5*4, "!, {r3-r12,pc}") TEST_BF_P("ldmia r",13,4*4, "!, {r2-r12,pc}") TEST_P( "ldmia r",0, 16*4,", {r0-r12}") TEST_P( "ldmia r",0, 16*4,", {r0-r12,lr}") TEST_THUMB_TO_ARM_INTERWORK_P("ldmia r",0,14*4,", {r12,pc}") TEST_THUMB_TO_ARM_INTERWORK_P("ldmia r",13,2*4,", {r0-r12,pc}") TEST_UNSUPPORTED(__inst_thumb32(0xe88f0101) " @ stmia pc, {r0,r8}") TEST_UNSUPPORTED(__inst_thumb32(0xe92f5f00) " @ stmdb pc!, {r8-r12,r14}") TEST_UNSUPPORTED(__inst_thumb32(0xe8bdc000) " @ ldmia r13!, {r14,pc}") TEST_UNSUPPORTED(__inst_thumb32(0xe93ec000) " @ ldmdb r14!, {r14,pc}") TEST_UNSUPPORTED(__inst_thumb32(0xe8a73f00) " @ stmia r7!, {r8-r12,sp}") TEST_UNSUPPORTED(__inst_thumb32(0xe8a79f00) " @ stmia r7!, {r8-r12,pc}") TEST_UNSUPPORTED(__inst_thumb32(0xe93e2010) " @ ldmdb r14!, {r4,sp}") TEST_GROUP("Load/store double or exclusive, table branch") TEST_P( "ldrd r0, r1, [r",1, 24,", #-16]") TEST( "ldrd r12, r14, [sp, #16]") TEST_P( "ldrd r1, r0, [r",7, 24,", #-16]!") TEST( "ldrd r14, r12, [sp, #16]!") TEST_P( "ldrd r1, r0, [r",7, 24,"], #16") TEST( "ldrd r7, r8, [sp], #-16") TEST_X( "ldrd r12, r14, 3f", ".align 3 \n\t" "3: .word "__stringify(VAL1)" \n\t" " .word "__stringify(VAL2)) TEST_UNSUPPORTED(__inst_thumb32(0xe9ffec04) " @ ldrd r14, r12, [pc, #16]!") TEST_UNSUPPORTED(__inst_thumb32(0xe8ffec04) " @ ldrd r14, r12, [pc], #16") TEST_UNSUPPORTED(__inst_thumb32(0xe9d4d800) " @ ldrd sp, r8, [r4]") TEST_UNSUPPORTED(__inst_thumb32(0xe9d4f800) " @ ldrd pc, r8, [r4]") TEST_UNSUPPORTED(__inst_thumb32(0xe9d47d00) " @ ldrd r7, sp, [r4]") TEST_UNSUPPORTED(__inst_thumb32(0xe9d47f00) " @ ldrd r7, pc, [r4]") TEST_RRP("strd r",0, VAL1,", r",1, VAL2,", [r",1, 24,", #-16]") TEST_RR( "strd r",12,VAL2,", r",14,VAL1,", [sp, #16]") TEST_RRP("strd r",1, VAL1,", r",0, VAL2,", [r",7, 24,", #-16]!") TEST_RR( "strd r",14,VAL2,", r",12,VAL1,", [sp, #16]!") TEST_RRP("strd r",1, VAL1,", r",0, VAL2,", [r",7, 24,"], #16") TEST_RR( "strd r",7, VAL2,", r",8, VAL1,", [sp], #-16") TEST_RRP("strd r",6, VAL1,", r",7, VAL2,", [r",13, TEST_MEMORY_SIZE,", #-"__stringify(MAX_STACK_SIZE)"]!") TEST_UNSUPPORTED("strd r6, r7, [r13, #-"__stringify(MAX_STACK_SIZE)"-8]!") TEST_RRP("strd r",4, VAL1,", r",5, VAL2,", [r",14, TEST_MEMORY_SIZE,", #-"__stringify(MAX_STACK_SIZE)"-8]!") TEST_UNSUPPORTED(__inst_thumb32(0xe9efec04) " @ strd r14, r12, [pc, #16]!") TEST_UNSUPPORTED(__inst_thumb32(0xe8efec04) " @ strd r14, r12, [pc], #16") TEST_RX("tbb [pc, r",0, (9f-(1f+4)),"]", "9: \n\t" ".byte (2f-1b-4)>>1 \n\t" ".byte (3f-1b-4)>>1 \n\t" "3: mvn r0, r0 \n\t" "2: nop \n\t") TEST_RX("tbb [pc, r",4, (9f-(1f+4)+1),"]", "9: \n\t" ".byte (2f-1b-4)>>1 \n\t" ".byte (3f-1b-4)>>1 \n\t" "3: mvn r0, r0 \n\t" "2: nop \n\t") TEST_RRX("tbb [r",1,9f,", r",2,0,"]", "9: \n\t" ".byte (2f-1b-4)>>1 \n\t" ".byte (3f-1b-4)>>1 \n\t" "3: mvn r0, r0 \n\t" "2: nop \n\t") TEST_RX("tbh [pc, r",7, (9f-(1f+4))>>1,", lsl #1]", "9: \n\t" ".short (2f-1b-4)>>1 \n\t" ".short (3f-1b-4)>>1 \n\t" "3: mvn r0, r0 \n\t" "2: nop \n\t") TEST_RX("tbh [pc, r",12, ((9f-(1f+4))>>1)+1,", lsl #1]", "9: \n\t" ".short (2f-1b-4)>>1 \n\t" ".short (3f-1b-4)>>1 \n\t" "3: mvn r0, r0 \n\t" "2: nop \n\t") TEST_RRX("tbh [r",1,9f, ", r",14,1,", lsl #1]", "9: \n\t" ".short (2f-1b-4)>>1 \n\t" ".short (3f-1b-4)>>1 \n\t" "3: mvn r0, r0 \n\t" "2: nop \n\t") TEST_UNSUPPORTED(__inst_thumb32(0xe8d1f01f) " @ tbh [r1, pc]") TEST_UNSUPPORTED(__inst_thumb32(0xe8d1f01d) " @ tbh [r1, sp]") TEST_UNSUPPORTED(__inst_thumb32(0xe8ddf012) " @ tbh [sp, r2]") TEST_UNSUPPORTED("strexb r0, r1, [r2]") TEST_UNSUPPORTED("strexh r0, r1, [r2]") TEST_UNSUPPORTED("strexd r0, r1, r2, [r2]") TEST_UNSUPPORTED("ldrexb r0, [r1]") TEST_UNSUPPORTED("ldrexh r0, [r1]") TEST_UNSUPPORTED("ldrexd r0, r1, [r1]") TEST_GROUP("Data-processing (shifted register) and (modified immediate)") #define _DATA_PROCESSING32_DNM(op,s,val) \ TEST_RR(op s".w r0, r",1, VAL1,", r",2, val, "") \ TEST_RR(op s" r1, r",1, VAL1,", r",2, val, ", lsl #3") \ TEST_RR(op s" r2, r",3, VAL1,", r",2, val, ", lsr #4") \ TEST_RR(op s" r3, r",3, VAL1,", r",2, val, ", asr #5") \ TEST_RR(op s" r4, r",5, VAL1,", r",2, N(val),", asr #6") \ TEST_RR(op s" r5, r",5, VAL1,", r",2, val, ", ror #7") \ TEST_RR(op s" r8, r",9, VAL1,", r",10,val, ", rrx") \ TEST_R( op s" r0, r",11,VAL1,", #0x00010001") \ TEST_R( op s" r11, r",0, VAL1,", #0xf5000000") \ TEST_R( op s" r7, r",8, VAL2,", #0x000af000") #define DATA_PROCESSING32_DNM(op,val) \ _DATA_PROCESSING32_DNM(op,"",val) \ _DATA_PROCESSING32_DNM(op,"s",val) #define DATA_PROCESSING32_NM(op,val) \ TEST_RR(op".w r",1, VAL1,", r",2, val, "") \ TEST_RR(op" r",1, VAL1,", r",2, val, ", lsl #3") \ TEST_RR(op" r",3, VAL1,", r",2, val, ", lsr #4") \ TEST_RR(op" r",3, VAL1,", r",2, val, ", asr #5") \ TEST_RR(op" r",5, VAL1,", r",2, N(val),", asr #6") \ TEST_RR(op" r",5, VAL1,", r",2, val, ", ror #7") \ TEST_RR(op" r",9, VAL1,", r",10,val, ", rrx") \ TEST_R( op" r",11,VAL1,", #0x00010001") \ TEST_R( op" r",0, VAL1,", #0xf5000000") \ TEST_R( op" r",8, VAL2,", #0x000af000") #define _DATA_PROCESSING32_DM(op,s,val) \ TEST_R( op s".w r0, r",14, val, "") \ TEST_R( op s" r1, r",12, val, ", lsl #3") \ TEST_R( op s" r2, r",11, val, ", lsr #4") \ TEST_R( op s" r3, r",10, val, ", asr #5") \ TEST_R( op s" r4, r",9, N(val),", asr #6") \ TEST_R( op s" r5, r",8, val, ", ror #7") \ TEST_R( op s" r8, r",7,val, ", rrx") \ TEST( op s" r0, #0x00010001") \ TEST( op s" r11, #0xf5000000") \ TEST( op s" r7, #0x000af000") \ TEST( op s" r4, #0x00005a00") #define DATA_PROCESSING32_DM(op,val) \ _DATA_PROCESSING32_DM(op,"",val) \ _DATA_PROCESSING32_DM(op,"s",val) DATA_PROCESSING32_DNM("and",0xf00f00ff) DATA_PROCESSING32_NM("tst",0xf00f00ff) DATA_PROCESSING32_DNM("bic",0xf00f00ff) DATA_PROCESSING32_DNM("orr",0xf00f00ff) DATA_PROCESSING32_DM("mov",VAL2) DATA_PROCESSING32_DNM("orn",0xf00f00ff) DATA_PROCESSING32_DM("mvn",VAL2) DATA_PROCESSING32_DNM("eor",0xf00f00ff) DATA_PROCESSING32_NM("teq",0xf00f00ff) DATA_PROCESSING32_DNM("add",VAL2) DATA_PROCESSING32_NM("cmn",VAL2) DATA_PROCESSING32_DNM("adc",VAL2) DATA_PROCESSING32_DNM("sbc",VAL2) DATA_PROCESSING32_DNM("sub",VAL2) DATA_PROCESSING32_NM("cmp",VAL2) DATA_PROCESSING32_DNM("rsb",VAL2) TEST_RR("pkhbt r0, r",0, HH1,", r",1, HH2,"") TEST_RR("pkhbt r14,r",12, HH1,", r",10,HH2,", lsl #2") TEST_RR("pkhtb r0, r",0, HH1,", r",1, HH2,"") TEST_RR("pkhtb r14,r",12, HH1,", r",10,HH2,", asr #2") TEST_UNSUPPORTED(__inst_thumb32(0xea170f0d) " @ tst.w r7, sp") TEST_UNSUPPORTED(__inst_thumb32(0xea170f0f) " @ tst.w r7, pc") TEST_UNSUPPORTED(__inst_thumb32(0xea1d0f07) " @ tst.w sp, r7") TEST_UNSUPPORTED(__inst_thumb32(0xea1f0f07) " @ tst.w pc, r7") TEST_UNSUPPORTED(__inst_thumb32(0xf01d1f08) " @ tst sp, #0x00080008") TEST_UNSUPPORTED(__inst_thumb32(0xf01f1f08) " @ tst pc, #0x00080008") TEST_UNSUPPORTED(__inst_thumb32(0xea970f0d) " @ teq.w r7, sp") TEST_UNSUPPORTED(__inst_thumb32(0xea970f0f) " @ teq.w r7, pc") TEST_UNSUPPORTED(__inst_thumb32(0xea9d0f07) " @ teq.w sp, r7") TEST_UNSUPPORTED(__inst_thumb32(0xea9f0f07) " @ teq.w pc, r7") TEST_UNSUPPORTED(__inst_thumb32(0xf09d1f08) " @ tst sp, #0x00080008") TEST_UNSUPPORTED(__inst_thumb32(0xf09f1f08) " @ tst pc, #0x00080008") TEST_UNSUPPORTED(__inst_thumb32(0xeb170f0d) " @ cmn.w r7, sp") TEST_UNSUPPORTED(__inst_thumb32(0xeb170f0f) " @ cmn.w r7, pc") TEST_P("cmn.w sp, r",7,0,"") TEST_UNSUPPORTED(__inst_thumb32(0xeb1f0f07) " @ cmn.w pc, r7") TEST( "cmn sp, #0x00080008") TEST_UNSUPPORTED(__inst_thumb32(0xf11f1f08) " @ cmn pc, #0x00080008") TEST_UNSUPPORTED(__inst_thumb32(0xebb70f0d) " @ cmp.w r7, sp") TEST_UNSUPPORTED(__inst_thumb32(0xebb70f0f) " @ cmp.w r7, pc") TEST_P("cmp.w sp, r",7,0,"") TEST_UNSUPPORTED(__inst_thumb32(0xebbf0f07) " @ cmp.w pc, r7") TEST( "cmp sp, #0x00080008") TEST_UNSUPPORTED(__inst_thumb32(0xf1bf1f08) " @ cmp pc, #0x00080008") TEST_UNSUPPORTED(__inst_thumb32(0xea5f070d) " @ movs.w r7, sp") TEST_UNSUPPORTED(__inst_thumb32(0xea5f070f) " @ movs.w r7, pc") TEST_UNSUPPORTED(__inst_thumb32(0xea5f0d07) " @ movs.w sp, r7") TEST_UNSUPPORTED(__inst_thumb32(0xea4f0f07) " @ mov.w pc, r7") TEST_UNSUPPORTED(__inst_thumb32(0xf04f1d08) " @ mov sp, #0x00080008") TEST_UNSUPPORTED(__inst_thumb32(0xf04f1f08) " @ mov pc, #0x00080008") TEST_R("add.w r0, sp, r",1, 4,"") TEST_R("adds r0, sp, r",1, 4,", asl #3") TEST_R("add r0, sp, r",1, 4,", asl #4") TEST_R("add r0, sp, r",1, 16,", ror #1") TEST_R("add.w sp, sp, r",1, 4,"") TEST_R("add sp, sp, r",1, 4,", asl #3") TEST_UNSUPPORTED(__inst_thumb32(0xeb0d1d01) " @ add sp, sp, r1, asl #4") TEST_UNSUPPORTED(__inst_thumb32(0xeb0d0d71) " @ add sp, sp, r1, ror #1") TEST( "add.w r0, sp, #24") TEST( "add.w sp, sp, #24") TEST_UNSUPPORTED(__inst_thumb32(0xeb0d0f01) " @ add pc, sp, r1") TEST_UNSUPPORTED(__inst_thumb32(0xeb0d000f) " @ add r0, sp, pc") TEST_UNSUPPORTED(__inst_thumb32(0xeb0d000d) " @ add r0, sp, sp") TEST_UNSUPPORTED(__inst_thumb32(0xeb0d0d0f) " @ add sp, sp, pc") TEST_UNSUPPORTED(__inst_thumb32(0xeb0d0d0d) " @ add sp, sp, sp") TEST_R("sub.w r0, sp, r",1, 4,"") TEST_R("subs r0, sp, r",1, 4,", asl #3") TEST_R("sub r0, sp, r",1, 4,", asl #4") TEST_R("sub r0, sp, r",1, 16,", ror #1") TEST_R("sub.w sp, sp, r",1, 4,"") TEST_R("sub sp, sp, r",1, 4,", asl #3") TEST_UNSUPPORTED(__inst_thumb32(0xebad1d01) " @ sub sp, sp, r1, asl #4") TEST_UNSUPPORTED(__inst_thumb32(0xebad0d71) " @ sub sp, sp, r1, ror #1") TEST_UNSUPPORTED(__inst_thumb32(0xebad0f01) " @ sub pc, sp, r1") TEST( "sub.w r0, sp, #24") TEST( "sub.w sp, sp, #24") TEST_UNSUPPORTED(__inst_thumb32(0xea02010f) " @ and r1, r2, pc") TEST_UNSUPPORTED(__inst_thumb32(0xea0f0103) " @ and r1, pc, r3") TEST_UNSUPPORTED(__inst_thumb32(0xea020f03) " @ and pc, r2, r3") TEST_UNSUPPORTED(__inst_thumb32(0xea02010d) " @ and r1, r2, sp") TEST_UNSUPPORTED(__inst_thumb32(0xea0d0103) " @ and r1, sp, r3") TEST_UNSUPPORTED(__inst_thumb32(0xea020d03) " @ and sp, r2, r3") TEST_UNSUPPORTED(__inst_thumb32(0xf00d1108) " @ and r1, sp, #0x00080008") TEST_UNSUPPORTED(__inst_thumb32(0xf00f1108) " @ and r1, pc, #0x00080008") TEST_UNSUPPORTED(__inst_thumb32(0xf0021d08) " @ and sp, r8, #0x00080008") TEST_UNSUPPORTED(__inst_thumb32(0xf0021f08) " @ and pc, r8, #0x00080008") TEST_UNSUPPORTED(__inst_thumb32(0xeb02010f) " @ add r1, r2, pc") TEST_UNSUPPORTED(__inst_thumb32(0xeb0f0103) " @ add r1, pc, r3") TEST_UNSUPPORTED(__inst_thumb32(0xeb020f03) " @ add pc, r2, r3") TEST_UNSUPPORTED(__inst_thumb32(0xeb02010d) " @ add r1, r2, sp") TEST_SUPPORTED( __inst_thumb32(0xeb0d0103) " @ add r1, sp, r3") TEST_UNSUPPORTED(__inst_thumb32(0xeb020d03) " @ add sp, r2, r3") TEST_SUPPORTED( __inst_thumb32(0xf10d1108) " @ add r1, sp, #0x00080008") TEST_UNSUPPORTED(__inst_thumb32(0xf10d1f08) " @ add pc, sp, #0x00080008") TEST_UNSUPPORTED(__inst_thumb32(0xf10f1108) " @ add r1, pc, #0x00080008") TEST_UNSUPPORTED(__inst_thumb32(0xf1021d08) " @ add sp, r8, #0x00080008") TEST_UNSUPPORTED(__inst_thumb32(0xf1021f08) " @ add pc, r8, #0x00080008") TEST_UNSUPPORTED(__inst_thumb32(0xeaa00000) "") TEST_UNSUPPORTED(__inst_thumb32(0xeaf00000) "") TEST_UNSUPPORTED(__inst_thumb32(0xeb200000) "") TEST_UNSUPPORTED(__inst_thumb32(0xeb800000) "") TEST_UNSUPPORTED(__inst_thumb32(0xebe00000) "") TEST_UNSUPPORTED(__inst_thumb32(0xf0a00000) "") TEST_UNSUPPORTED(__inst_thumb32(0xf0c00000) "") TEST_UNSUPPORTED(__inst_thumb32(0xf0f00000) "") TEST_UNSUPPORTED(__inst_thumb32(0xf1200000) "") TEST_UNSUPPORTED(__inst_thumb32(0xf1800000) "") TEST_UNSUPPORTED(__inst_thumb32(0xf1e00000) "") TEST_GROUP("Coprocessor instructions") TEST_UNSUPPORTED(__inst_thumb32(0xec000000) "") TEST_UNSUPPORTED(__inst_thumb32(0xeff00000) "") TEST_UNSUPPORTED(__inst_thumb32(0xfc000000) "") TEST_UNSUPPORTED(__inst_thumb32(0xfff00000) "") TEST_GROUP("Data-processing (plain binary immediate)") TEST_R("addw r0, r",1, VAL1,", #0x123") TEST( "addw r14, sp, #0xf5a") TEST( "addw sp, sp, #0x20") TEST( "addw r7, pc, #0x888") TEST_UNSUPPORTED(__inst_thumb32(0xf20f1f20) " @ addw pc, pc, #0x120") TEST_UNSUPPORTED(__inst_thumb32(0xf20d1f20) " @ addw pc, sp, #0x120") TEST_UNSUPPORTED(__inst_thumb32(0xf20f1d20) " @ addw sp, pc, #0x120") TEST_UNSUPPORTED(__inst_thumb32(0xf2001d20) " @ addw sp, r0, #0x120") TEST_R("subw r0, r",1, VAL1,", #0x123") TEST( "subw r14, sp, #0xf5a") TEST( "subw sp, sp, #0x20") TEST( "subw r7, pc, #0x888") TEST_UNSUPPORTED(__inst_thumb32(0xf2af1f20) " @ subw pc, pc, #0x120") TEST_UNSUPPORTED(__inst_thumb32(0xf2ad1f20) " @ subw pc, sp, #0x120") TEST_UNSUPPORTED(__inst_thumb32(0xf2af1d20) " @ subw sp, pc, #0x120") TEST_UNSUPPORTED(__inst_thumb32(0xf2a01d20) " @ subw sp, r0, #0x120") TEST("movw r0, #0") TEST("movw r0, #0xffff") TEST("movw lr, #0xffff") TEST_UNSUPPORTED(__inst_thumb32(0xf2400d00) " @ movw sp, #0") TEST_UNSUPPORTED(__inst_thumb32(0xf2400f00) " @ movw pc, #0") TEST_R("movt r",0, VAL1,", #0") TEST_R("movt r",0, VAL2,", #0xffff") TEST_R("movt r",14,VAL1,", #0xffff") TEST_UNSUPPORTED(__inst_thumb32(0xf2c00d00) " @ movt sp, #0") TEST_UNSUPPORTED(__inst_thumb32(0xf2c00f00) " @ movt pc, #0") TEST_R( "ssat r0, #24, r",0, VAL1,"") TEST_R( "ssat r14, #24, r",12, VAL2,"") TEST_R( "ssat r0, #24, r",0, VAL1,", lsl #8") TEST_R( "ssat r14, #24, r",12, VAL2,", asr #8") TEST_UNSUPPORTED(__inst_thumb32(0xf30c0d17) " @ ssat sp, #24, r12") TEST_UNSUPPORTED(__inst_thumb32(0xf30c0f17) " @ ssat pc, #24, r12") TEST_UNSUPPORTED(__inst_thumb32(0xf30d0c17) " @ ssat r12, #24, sp") TEST_UNSUPPORTED(__inst_thumb32(0xf30f0c17) " @ ssat r12, #24, pc") TEST_R( "usat r0, #24, r",0, VAL1,"") TEST_R( "usat r14, #24, r",12, VAL2,"") TEST_R( "usat r0, #24, r",0, VAL1,", lsl #8") TEST_R( "usat r14, #24, r",12, VAL2,", asr #8") TEST_UNSUPPORTED(__inst_thumb32(0xf38c0d17) " @ usat sp, #24, r12") TEST_UNSUPPORTED(__inst_thumb32(0xf38c0f17) " @ usat pc, #24, r12") TEST_UNSUPPORTED(__inst_thumb32(0xf38d0c17) " @ usat r12, #24, sp") TEST_UNSUPPORTED(__inst_thumb32(0xf38f0c17) " @ usat r12, #24, pc") TEST_R( "ssat16 r0, #12, r",0, HH1,"") TEST_R( "ssat16 r14, #12, r",12, HH2,"") TEST_UNSUPPORTED(__inst_thumb32(0xf32c0d0b) " @ ssat16 sp, #12, r12") TEST_UNSUPPORTED(__inst_thumb32(0xf32c0f0b) " @ ssat16 pc, #12, r12") TEST_UNSUPPORTED(__inst_thumb32(0xf32d0c0b) " @ ssat16 r12, #12, sp") TEST_UNSUPPORTED(__inst_thumb32(0xf32f0c0b) " @ ssat16 r12, #12, pc") TEST_R( "usat16 r0, #12, r",0, HH1,"") TEST_R( "usat16 r14, #12, r",12, HH2,"") TEST_UNSUPPORTED(__inst_thumb32(0xf3ac0d0b) " @ usat16 sp, #12, r12") TEST_UNSUPPORTED(__inst_thumb32(0xf3ac0f0b) " @ usat16 pc, #12, r12") TEST_UNSUPPORTED(__inst_thumb32(0xf3ad0c0b) " @ usat16 r12, #12, sp") TEST_UNSUPPORTED(__inst_thumb32(0xf3af0c0b) " @ usat16 r12, #12, pc") TEST_R( "sbfx r0, r",0 , VAL1,", #0, #31") TEST_R( "sbfx r14, r",12, VAL2,", #8, #16") TEST_R( "sbfx r4, r",10, VAL1,", #16, #15") TEST_UNSUPPORTED(__inst_thumb32(0xf34c2d0f) " @ sbfx sp, r12, #8, #16") TEST_UNSUPPORTED(__inst_thumb32(0xf34c2f0f) " @ sbfx pc, r12, #8, #16") TEST_UNSUPPORTED(__inst_thumb32(0xf34d2c0f) " @ sbfx r12, sp, #8, #16") TEST_UNSUPPORTED(__inst_thumb32(0xf34f2c0f) " @ sbfx r12, pc, #8, #16") TEST_R( "ubfx r0, r",0 , VAL1,", #0, #31") TEST_R( "ubfx r14, r",12, VAL2,", #8, #16") TEST_R( "ubfx r4, r",10, VAL1,", #16, #15") TEST_UNSUPPORTED(__inst_thumb32(0xf3cc2d0f) " @ ubfx sp, r12, #8, #16") TEST_UNSUPPORTED(__inst_thumb32(0xf3cc2f0f) " @ ubfx pc, r12, #8, #16") TEST_UNSUPPORTED(__inst_thumb32(0xf3cd2c0f) " @ ubfx r12, sp, #8, #16") TEST_UNSUPPORTED(__inst_thumb32(0xf3cf2c0f) " @ ubfx r12, pc, #8, #16") TEST_R( "bfc r",0, VAL1,", #4, #20") TEST_R( "bfc r",14,VAL2,", #4, #20") TEST_R( "bfc r",7, VAL1,", #0, #31") TEST_R( "bfc r",8, VAL2,", #0, #31") TEST_UNSUPPORTED(__inst_thumb32(0xf36f0d1e) " @ bfc sp, #0, #31") TEST_UNSUPPORTED(__inst_thumb32(0xf36f0f1e) " @ bfc pc, #0, #31") TEST_RR( "bfi r",0, VAL1,", r",0 , VAL2,", #0, #31") TEST_RR( "bfi r",12,VAL1,", r",14 , VAL2,", #4, #20") TEST_UNSUPPORTED(__inst_thumb32(0xf36e1d17) " @ bfi sp, r14, #4, #20") TEST_UNSUPPORTED(__inst_thumb32(0xf36e1f17) " @ bfi pc, r14, #4, #20") TEST_UNSUPPORTED(__inst_thumb32(0xf36d1e17) " @ bfi r14, sp, #4, #20") TEST_GROUP("Branches and miscellaneous control") CONDITION_INSTRUCTIONS(22, TEST_BF("beq.w 2f") TEST_BB("bne.w 2b") TEST_BF("bgt.w 2f") TEST_BB("blt.w 2b") TEST_BF_X("bpl.w 2f", SPACE_0x1000) ) TEST_UNSUPPORTED("msr cpsr, r0") TEST_UNSUPPORTED("msr cpsr_f, r1") TEST_UNSUPPORTED("msr spsr, r2") TEST_UNSUPPORTED("cpsie.w i") TEST_UNSUPPORTED("cpsid.w i") TEST_UNSUPPORTED("cps 0x13") TEST_SUPPORTED("yield.w") TEST("sev.w") TEST("nop.w") TEST("wfi.w") TEST_SUPPORTED("wfe.w") TEST_UNSUPPORTED("dbg.w #0") TEST_UNSUPPORTED("clrex") TEST_UNSUPPORTED("dsb") TEST_UNSUPPORTED("dmb") TEST_UNSUPPORTED("isb") TEST_UNSUPPORTED("bxj r0") TEST_UNSUPPORTED("subs pc, lr, #4") TEST_RMASKED("mrs r",0,~PSR_IGNORE_BITS,", cpsr") TEST_RMASKED("mrs r",14,~PSR_IGNORE_BITS,", cpsr") TEST_UNSUPPORTED(__inst_thumb32(0xf3ef8d00) " @ mrs sp, spsr") TEST_UNSUPPORTED(__inst_thumb32(0xf3ef8f00) " @ mrs pc, spsr") TEST_UNSUPPORTED("mrs r0, spsr") TEST_UNSUPPORTED("mrs lr, spsr") TEST_UNSUPPORTED(__inst_thumb32(0xf7f08000) " @ smc #0") TEST_UNSUPPORTED(__inst_thumb32(0xf7f0a000) " @ undefeined") TEST_BF( "b.w 2f") TEST_BB( "b.w 2b") TEST_BF_X("b.w 2f", SPACE_0x1000) TEST_BF( "bl.w 2f") TEST_BB( "bl.w 2b") TEST_BB_X("bl.w 2b", SPACE_0x1000) TEST_X( "blx __dummy_arm_subroutine", ".arm \n\t" ".align \n\t" ".type __dummy_arm_subroutine, %%function \n\t" "__dummy_arm_subroutine: \n\t" "mov r0, pc \n\t" "bx lr \n\t" ".thumb \n\t" ) TEST( "blx __dummy_arm_subroutine") TEST_GROUP("Store single data item") #define SINGLE_STORE(size) \ TEST_RP( "str"size" r",0, VAL1,", [r",11,-1024,", #1024]") \ TEST_RP( "str"size" r",14,VAL2,", [r",1, -1024,", #1080]") \ TEST_RP( "str"size" r",0, VAL1,", [r",11,256, ", #-120]") \ TEST_RP( "str"size" r",14,VAL2,", [r",1, 256, ", #-128]") \ TEST_RP( "str"size" r",0, VAL1,", [r",11,24, "], #120") \ TEST_RP( "str"size" r",14,VAL2,", [r",1, 24, "], #128") \ TEST_RP( "str"size" r",0, VAL1,", [r",11,24, "], #-120") \ TEST_RP( "str"size" r",14,VAL2,", [r",1, 24, "], #-128") \ TEST_RP( "str"size" r",0, VAL1,", [r",11,24, ", #120]!") \ TEST_RP( "str"size" r",14,VAL2,", [r",1, 24, ", #128]!") \ TEST_RP( "str"size" r",0, VAL1,", [r",11,256, ", #-120]!") \ TEST_RP( "str"size" r",14,VAL2,", [r",1, 256, ", #-128]!") \ TEST_RPR("str"size".w r",0, VAL1,", [r",1, 0,", r",2, 4,"]") \ TEST_RPR("str"size" r",14,VAL2,", [r",10,0,", r",11,4,", lsl #1]") \ TEST_UNSUPPORTED("str"size" r0, [r13, r1]") \ TEST_R( "str"size".w r",7, VAL1,", [sp, #24]") \ TEST_RP( "str"size".w r",0, VAL2,", [r",0,0, "]") \ TEST_RP( "str"size" r",6, VAL1,", [r",13, TEST_MEMORY_SIZE,", #-"__stringify(MAX_STACK_SIZE)"]!") \ TEST_UNSUPPORTED("str"size" r6, [r13, #-"__stringify(MAX_STACK_SIZE)"-8]!") \ TEST_RP( "str"size" r",4, VAL2,", [r",12, TEST_MEMORY_SIZE,", #-"__stringify(MAX_STACK_SIZE)"-8]!") \ TEST_UNSUPPORTED("str"size"t r0, [r1, #4]") SINGLE_STORE("b") SINGLE_STORE("h") SINGLE_STORE("") TEST_UNSUPPORTED(__inst_thumb32(0xf801000d) " @ strb r0, [r1, r13]") TEST_UNSUPPORTED(__inst_thumb32(0xf821000d) " @ strh r0, [r1, r13]") TEST_UNSUPPORTED(__inst_thumb32(0xf841000d) " @ str r0, [r1, r13]") TEST("str sp, [sp]") TEST_UNSUPPORTED(__inst_thumb32(0xf8cfe000) " @ str r14, [pc]") TEST_UNSUPPORTED(__inst_thumb32(0xf8cef000) " @ str pc, [r14]") TEST_GROUP("Advanced SIMD element or structure load/store instructions") TEST_UNSUPPORTED(__inst_thumb32(0xf9000000) "") TEST_UNSUPPORTED(__inst_thumb32(0xf92fffff) "") TEST_UNSUPPORTED(__inst_thumb32(0xf9800000) "") TEST_UNSUPPORTED(__inst_thumb32(0xf9efffff) "") TEST_GROUP("Load single data item and memory hints") #define SINGLE_LOAD(size) \ TEST_P( "ldr"size" r0, [r",11,-1024, ", #1024]") \ TEST_P( "ldr"size" r14, [r",1, -1024,", #1080]") \ TEST_P( "ldr"size" r0, [r",11,256, ", #-120]") \ TEST_P( "ldr"size" r14, [r",1, 256, ", #-128]") \ TEST_P( "ldr"size" r0, [r",11,24, "], #120") \ TEST_P( "ldr"size" r14, [r",1, 24, "], #128") \ TEST_P( "ldr"size" r0, [r",11,24, "], #-120") \ TEST_P( "ldr"size" r14, [r",1,24, "], #-128") \ TEST_P( "ldr"size" r0, [r",11,24, ", #120]!") \ TEST_P( "ldr"size" r14, [r",1, 24, ", #128]!") \ TEST_P( "ldr"size" r0, [r",11,256, ", #-120]!") \ TEST_P( "ldr"size" r14, [r",1, 256, ", #-128]!") \ TEST_PR("ldr"size".w r0, [r",1, 0,", r",2, 4,"]") \ TEST_PR("ldr"size" r14, [r",10,0,", r",11,4,", lsl #1]") \ TEST_X( "ldr"size".w r0, 3f", \ ".align 3 \n\t" \ "3: .word "__stringify(VAL1)) \ TEST_X( "ldr"size".w r14, 3f", \ ".align 3 \n\t" \ "3: .word "__stringify(VAL2)) \ TEST( "ldr"size".w r7, 3b") \ TEST( "ldr"size".w r7, [sp, #24]") \ TEST_P( "ldr"size".w r0, [r",0,0, "]") \ TEST_UNSUPPORTED("ldr"size"t r0, [r1, #4]") SINGLE_LOAD("b") SINGLE_LOAD("sb") SINGLE_LOAD("h") SINGLE_LOAD("sh") SINGLE_LOAD("") TEST_BF_P("ldr pc, [r",14, 15*4,"]") TEST_P( "ldr sp, [r",14, 13*4,"]") TEST_BF_R("ldr pc, [sp, r",14, 15*4,"]") TEST_R( "ldr sp, [sp, r",14, 13*4,"]") TEST_THUMB_TO_ARM_INTERWORK_P("ldr pc, [r",0,0,", #15*4]") TEST_SUPPORTED("ldr sp, 99f") TEST_SUPPORTED("ldr pc, 99f") TEST_UNSUPPORTED(__inst_thumb32(0xf854700d) " @ ldr r7, [r4, sp]") TEST_UNSUPPORTED(__inst_thumb32(0xf854700f) " @ ldr r7, [r4, pc]") TEST_UNSUPPORTED(__inst_thumb32(0xf814700d) " @ ldrb r7, [r4, sp]") TEST_UNSUPPORTED(__inst_thumb32(0xf814700f) " @ ldrb r7, [r4, pc]") TEST_UNSUPPORTED(__inst_thumb32(0xf89fd004) " @ ldrb sp, 99f") TEST_UNSUPPORTED(__inst_thumb32(0xf814d008) " @ ldrb sp, [r4, r8]") TEST_UNSUPPORTED(__inst_thumb32(0xf894d000) " @ ldrb sp, [r4]") TEST_UNSUPPORTED(__inst_thumb32(0xf8600000) "") /* Unallocated space */ TEST_UNSUPPORTED(__inst_thumb32(0xf9ffffff) "") /* Unallocated space */ TEST_UNSUPPORTED(__inst_thumb32(0xf9500000) "") /* Unallocated space */ TEST_UNSUPPORTED(__inst_thumb32(0xf95fffff) "") /* Unallocated space */ TEST_UNSUPPORTED(__inst_thumb32(0xf8000800) "") /* Unallocated space */ TEST_UNSUPPORTED(__inst_thumb32(0xf97ffaff) "") /* Unallocated space */ TEST( "pli [pc, #4]") TEST( "pli [pc, #-4]") TEST( "pld [pc, #4]") TEST( "pld [pc, #-4]") TEST_P( "pld [r",0,-1024,", #1024]") TEST( __inst_thumb32(0xf8b0f400) " @ pldw [r0, #1024]") TEST_P( "pli [r",4, 0b,", #1024]") TEST_P( "pld [r",7, 120,", #-120]") TEST( __inst_thumb32(0xf837fc78) " @ pldw [r7, #-120]") TEST_P( "pli [r",11,120,", #-120]") TEST( "pld [sp, #0]") TEST_PR("pld [r",7, 24, ", r",0, 16,"]") TEST_PR("pld [r",8, 24, ", r",12,16,", lsl #3]") TEST_SUPPORTED(__inst_thumb32(0xf837f000) " @ pldw [r7, r0]") TEST_SUPPORTED(__inst_thumb32(0xf838f03c) " @ pldw [r8, r12, lsl #3]"); TEST_RR("pli [r",12,0b,", r",0, 16,"]") TEST_RR("pli [r",0, 0b,", r",12,16,", lsl #3]") TEST_R( "pld [sp, r",1, 16,"]") TEST_UNSUPPORTED(__inst_thumb32(0xf817f00d) " @pld [r7, sp]") TEST_UNSUPPORTED(__inst_thumb32(0xf817f00f) " @pld [r7, pc]") TEST_GROUP("Data-processing (register)") #define SHIFTS32(op) \ TEST_RR(op" r0, r",1, VAL1,", r",2, 3, "") \ TEST_RR(op" r14, r",12,VAL2,", r",11,10,"") SHIFTS32("lsl") SHIFTS32("lsls") SHIFTS32("lsr") SHIFTS32("lsrs") SHIFTS32("asr") SHIFTS32("asrs") SHIFTS32("ror") SHIFTS32("rors") TEST_UNSUPPORTED(__inst_thumb32(0xfa01ff02) " @ lsl pc, r1, r2") TEST_UNSUPPORTED(__inst_thumb32(0xfa01fd02) " @ lsl sp, r1, r2") TEST_UNSUPPORTED(__inst_thumb32(0xfa0ff002) " @ lsl r0, pc, r2") TEST_UNSUPPORTED(__inst_thumb32(0xfa0df002) " @ lsl r0, sp, r2") TEST_UNSUPPORTED(__inst_thumb32(0xfa01f00f) " @ lsl r0, r1, pc") TEST_UNSUPPORTED(__inst_thumb32(0xfa01f00d) " @ lsl r0, r1, sp") TEST_RR( "sxtah r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "sxtah r14,r",12, HH2,", r",10,HH1,", ror #8") TEST_R( "sxth r8, r",7, HH1,"") TEST_UNSUPPORTED(__inst_thumb32(0xfa0fff87) " @ sxth pc, r7"); TEST_UNSUPPORTED(__inst_thumb32(0xfa0ffd87) " @ sxth sp, r7"); TEST_UNSUPPORTED(__inst_thumb32(0xfa0ff88f) " @ sxth r8, pc"); TEST_UNSUPPORTED(__inst_thumb32(0xfa0ff88d) " @ sxth r8, sp"); TEST_RR( "uxtah r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "uxtah r14,r",12, HH2,", r",10,HH1,", ror #8") TEST_R( "uxth r8, r",7, HH1,"") TEST_RR( "sxtab16 r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "sxtab16 r14,r",12, HH2,", r",10,HH1,", ror #8") TEST_R( "sxtb16 r8, r",7, HH1,"") TEST_RR( "uxtab16 r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "uxtab16 r14,r",12, HH2,", r",10,HH1,", ror #8") TEST_R( "uxtb16 r8, r",7, HH1,"") TEST_RR( "sxtab r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "sxtab r14,r",12, HH2,", r",10,HH1,", ror #8") TEST_R( "sxtb r8, r",7, HH1,"") TEST_RR( "uxtab r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "uxtab r14,r",12, HH2,", r",10,HH1,", ror #8") TEST_R( "uxtb r8, r",7, HH1,"") TEST_UNSUPPORTED(__inst_thumb32(0xfa6000f0) "") TEST_UNSUPPORTED(__inst_thumb32(0xfa7fffff) "") #define PARALLEL_ADD_SUB(op) \ TEST_RR( op"add16 r0, r",0, HH1,", r",1, HH2,"") \ TEST_RR( op"add16 r14, r",12,HH2,", r",10,HH1,"") \ TEST_RR( op"asx r0, r",0, HH1,", r",1, HH2,"") \ TEST_RR( op"asx r14, r",12,HH2,", r",10,HH1,"") \ TEST_RR( op"sax r0, r",0, HH1,", r",1, HH2,"") \ TEST_RR( op"sax r14, r",12,HH2,", r",10,HH1,"") \ TEST_RR( op"sub16 r0, r",0, HH1,", r",1, HH2,"") \ TEST_RR( op"sub16 r14, r",12,HH2,", r",10,HH1,"") \ TEST_RR( op"add8 r0, r",0, HH1,", r",1, HH2,"") \ TEST_RR( op"add8 r14, r",12,HH2,", r",10,HH1,"") \ TEST_RR( op"sub8 r0, r",0, HH1,", r",1, HH2,"") \ TEST_RR( op"sub8 r14, r",12,HH2,", r",10,HH1,"") TEST_GROUP("Parallel addition and subtraction, signed") PARALLEL_ADD_SUB("s") PARALLEL_ADD_SUB("q") PARALLEL_ADD_SUB("sh") TEST_GROUP("Parallel addition and subtraction, unsigned") PARALLEL_ADD_SUB("u") PARALLEL_ADD_SUB("uq") PARALLEL_ADD_SUB("uh") TEST_GROUP("Miscellaneous operations") TEST_RR("qadd r0, r",1, VAL1,", r",2, VAL2,"") TEST_RR("qadd lr, r",9, VAL2,", r",8, VAL1,"") TEST_RR("qsub r0, r",1, VAL1,", r",2, VAL2,"") TEST_RR("qsub lr, r",9, VAL2,", r",8, VAL1,"") TEST_RR("qdadd r0, r",1, VAL1,", r",2, VAL2,"") TEST_RR("qdadd lr, r",9, VAL2,", r",8, VAL1,"") TEST_RR("qdsub r0, r",1, VAL1,", r",2, VAL2,"") TEST_RR("qdsub lr, r",9, VAL2,", r",8, VAL1,"") TEST_R("rev.w r0, r",0, VAL1,"") TEST_R("rev r14, r",12, VAL2,"") TEST_R("rev16.w r0, r",0, VAL1,"") TEST_R("rev16 r14, r",12, VAL2,"") TEST_R("rbit r0, r",0, VAL1,"") TEST_R("rbit r14, r",12, VAL2,"") TEST_R("revsh.w r0, r",0, VAL1,"") TEST_R("revsh r14, r",12, VAL2,"") TEST_UNSUPPORTED(__inst_thumb32(0xfa9cff8c) " @ rev pc, r12"); TEST_UNSUPPORTED(__inst_thumb32(0xfa9cfd8c) " @ rev sp, r12"); TEST_UNSUPPORTED(__inst_thumb32(0xfa9ffe8f) " @ rev r14, pc"); TEST_UNSUPPORTED(__inst_thumb32(0xfa9dfe8d) " @ rev r14, sp"); TEST_RR("sel r0, r",0, VAL1,", r",1, VAL2,"") TEST_RR("sel r14, r",12,VAL1,", r",10, VAL2,"") TEST_R("clz r0, r",0, 0x0,"") TEST_R("clz r7, r",14,0x1,"") TEST_R("clz lr, r",7, 0xffffffff,"") TEST_UNSUPPORTED(__inst_thumb32(0xfa80f030) "") /* Unallocated space */ TEST_UNSUPPORTED(__inst_thumb32(0xfaffff7f) "") /* Unallocated space */ TEST_UNSUPPORTED(__inst_thumb32(0xfab0f000) "") /* Unallocated space */ TEST_UNSUPPORTED(__inst_thumb32(0xfaffff7f) "") /* Unallocated space */ TEST_GROUP("Multiply, multiply accumulate, and absolute difference operations") TEST_RR( "mul r0, r",1, VAL1,", r",2, VAL2,"") TEST_RR( "mul r7, r",8, VAL2,", r",9, VAL2,"") TEST_UNSUPPORTED(__inst_thumb32(0xfb08ff09) " @ mul pc, r8, r9") TEST_UNSUPPORTED(__inst_thumb32(0xfb08fd09) " @ mul sp, r8, r9") TEST_UNSUPPORTED(__inst_thumb32(0xfb0ff709) " @ mul r7, pc, r9") TEST_UNSUPPORTED(__inst_thumb32(0xfb0df709) " @ mul r7, sp, r9") TEST_UNSUPPORTED(__inst_thumb32(0xfb08f70f) " @ mul r7, r8, pc") TEST_UNSUPPORTED(__inst_thumb32(0xfb08f70d) " @ mul r7, r8, sp") TEST_RRR( "mla r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"") TEST_RRR( "mla r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"") TEST_UNSUPPORTED(__inst_thumb32(0xfb08af09) " @ mla pc, r8, r9, r10"); TEST_UNSUPPORTED(__inst_thumb32(0xfb08ad09) " @ mla sp, r8, r9, r10"); TEST_UNSUPPORTED(__inst_thumb32(0xfb0fa709) " @ mla r7, pc, r9, r10"); TEST_UNSUPPORTED(__inst_thumb32(0xfb0da709) " @ mla r7, sp, r9, r10"); TEST_UNSUPPORTED(__inst_thumb32(0xfb08a70f) " @ mla r7, r8, pc, r10"); TEST_UNSUPPORTED(__inst_thumb32(0xfb08a70d) " @ mla r7, r8, sp, r10"); TEST_UNSUPPORTED(__inst_thumb32(0xfb08d709) " @ mla r7, r8, r9, sp"); TEST_RRR( "mls r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"") TEST_RRR( "mls r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"") TEST_RRR( "smlabb r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"") TEST_RRR( "smlabb r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"") TEST_RRR( "smlatb r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"") TEST_RRR( "smlatb r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"") TEST_RRR( "smlabt r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"") TEST_RRR( "smlabt r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"") TEST_RRR( "smlatt r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"") TEST_RRR( "smlatt r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"") TEST_RR( "smulbb r0, r",1, VAL1,", r",2, VAL2,"") TEST_RR( "smulbb r7, r",8, VAL3,", r",9, VAL1,"") TEST_RR( "smultb r0, r",1, VAL1,", r",2, VAL2,"") TEST_RR( "smultb r7, r",8, VAL3,", r",9, VAL1,"") TEST_RR( "smulbt r0, r",1, VAL1,", r",2, VAL2,"") TEST_RR( "smulbt r7, r",8, VAL3,", r",9, VAL1,"") TEST_RR( "smultt r0, r",1, VAL1,", r",2, VAL2,"") TEST_RR( "smultt r7, r",8, VAL3,", r",9, VAL1,"") TEST_RRR( "smlad r0, r",0, HH1,", r",1, HH2,", r",2, VAL1,"") TEST_RRR( "smlad r14, r",12,HH2,", r",10,HH1,", r",8, VAL2,"") TEST_RRR( "smladx r0, r",0, HH1,", r",1, HH2,", r",2, VAL1,"") TEST_RRR( "smladx r14, r",12,HH2,", r",10,HH1,", r",8, VAL2,"") TEST_RR( "smuad r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "smuad r14, r",12,HH2,", r",10,HH1,"") TEST_RR( "smuadx r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "smuadx r14, r",12,HH2,", r",10,HH1,"") TEST_RRR( "smlawb r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"") TEST_RRR( "smlawb r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"") TEST_RRR( "smlawt r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"") TEST_RRR( "smlawt r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"") TEST_RR( "smulwb r0, r",1, VAL1,", r",2, VAL2,"") TEST_RR( "smulwb r7, r",8, VAL3,", r",9, VAL1,"") TEST_RR( "smulwt r0, r",1, VAL1,", r",2, VAL2,"") TEST_RR( "smulwt r7, r",8, VAL3,", r",9, VAL1,"") TEST_RRR( "smlsd r0, r",0, HH1,", r",1, HH2,", r",2, VAL1,"") TEST_RRR( "smlsd r14, r",12,HH2,", r",10,HH1,", r",8, VAL2,"") TEST_RRR( "smlsdx r0, r",0, HH1,", r",1, HH2,", r",2, VAL1,"") TEST_RRR( "smlsdx r14, r",12,HH2,", r",10,HH1,", r",8, VAL2,"") TEST_RR( "smusd r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "smusd r14, r",12,HH2,", r",10,HH1,"") TEST_RR( "smusdx r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "smusdx r14, r",12,HH2,", r",10,HH1,"") TEST_RRR( "smmla r0, r",0, VAL1,", r",1, VAL2,", r",2, VAL1,"") TEST_RRR( "smmla r14, r",12,VAL2,", r",10,VAL1,", r",8, VAL2,"") TEST_RRR( "smmlar r0, r",0, VAL1,", r",1, VAL2,", r",2, VAL1,"") TEST_RRR( "smmlar r14, r",12,VAL2,", r",10,VAL1,", r",8, VAL2,"") TEST_RR( "smmul r0, r",0, VAL1,", r",1, VAL2,"") TEST_RR( "smmul r14, r",12,VAL2,", r",10,VAL1,"") TEST_RR( "smmulr r0, r",0, VAL1,", r",1, VAL2,"") TEST_RR( "smmulr r14, r",12,VAL2,", r",10,VAL1,"") TEST_RRR( "smmls r0, r",0, VAL1,", r",1, VAL2,", r",2, VAL1,"") TEST_RRR( "smmls r14, r",12,VAL2,", r",10,VAL1,", r",8, VAL2,"") TEST_RRR( "smmlsr r0, r",0, VAL1,", r",1, VAL2,", r",2, VAL1,"") TEST_RRR( "smmlsr r14, r",12,VAL2,", r",10,VAL1,", r",8, VAL2,"") TEST_RRR( "usada8 r0, r",0, VAL1,", r",1, VAL2,", r",2, VAL3,"") TEST_RRR( "usada8 r14, r",12,VAL2,", r",10,VAL1,", r",8, VAL3,"") TEST_RR( "usad8 r0, r",0, VAL1,", r",1, VAL2,"") TEST_RR( "usad8 r14, r",12,VAL2,", r",10,VAL1,"") TEST_UNSUPPORTED(__inst_thumb32(0xfb00f010) "") /* Unallocated space */ TEST_UNSUPPORTED(__inst_thumb32(0xfb0fff1f) "") /* Unallocated space */ TEST_UNSUPPORTED(__inst_thumb32(0xfb70f010) "") /* Unallocated space */ TEST_UNSUPPORTED(__inst_thumb32(0xfb7fff1f) "") /* Unallocated space */ TEST_UNSUPPORTED(__inst_thumb32(0xfb700010) "") /* Unallocated space */ TEST_UNSUPPORTED(__inst_thumb32(0xfb7fff1f) "") /* Unallocated space */ TEST_GROUP("Long multiply, long multiply accumulate, and divide") TEST_RR( "smull r0, r1, r",2, VAL1,", r",3, VAL2,"") TEST_RR( "smull r7, r8, r",9, VAL2,", r",10, VAL1,"") TEST_UNSUPPORTED(__inst_thumb32(0xfb89f80a) " @ smull pc, r8, r9, r10"); TEST_UNSUPPORTED(__inst_thumb32(0xfb89d80a) " @ smull sp, r8, r9, r10"); TEST_UNSUPPORTED(__inst_thumb32(0xfb897f0a) " @ smull r7, pc, r9, r10"); TEST_UNSUPPORTED(__inst_thumb32(0xfb897d0a) " @ smull r7, sp, r9, r10"); TEST_UNSUPPORTED(__inst_thumb32(0xfb8f780a) " @ smull r7, r8, pc, r10"); TEST_UNSUPPORTED(__inst_thumb32(0xfb8d780a) " @ smull r7, r8, sp, r10"); TEST_UNSUPPORTED(__inst_thumb32(0xfb89780f) " @ smull r7, r8, r9, pc"); TEST_UNSUPPORTED(__inst_thumb32(0xfb89780d) " @ smull r7, r8, r9, sp"); TEST_RR( "umull r0, r1, r",2, VAL1,", r",3, VAL2,"") TEST_RR( "umull r7, r8, r",9, VAL2,", r",10, VAL1,"") TEST_RRRR( "smlal r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4) TEST_RRRR( "smlal r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3) TEST_RRRR( "smlalbb r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4) TEST_RRRR( "smlalbb r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3) TEST_RRRR( "smlalbt r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4) TEST_RRRR( "smlalbt r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3) TEST_RRRR( "smlaltb r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4) TEST_RRRR( "smlaltb r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3) TEST_RRRR( "smlaltt r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4) TEST_RRRR( "smlaltt r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3) TEST_RRRR( "smlald r",0, VAL1,", r",1, VAL2, ", r",0, HH1,", r",1, HH2) TEST_RRRR( "smlald r",11,VAL2,", r",10,VAL1, ", r",9, HH2,", r",8, HH1) TEST_RRRR( "smlaldx r",0, VAL1,", r",1, VAL2, ", r",0, HH1,", r",1, HH2) TEST_RRRR( "smlaldx r",11,VAL2,", r",10,VAL1, ", r",9, HH2,", r",8, HH1) TEST_RRRR( "smlsld r",0, VAL1,", r",1, VAL2, ", r",0, HH1,", r",1, HH2) TEST_RRRR( "smlsld r",11,VAL2,", r",10,VAL1, ", r",9, HH2,", r",8, HH1) TEST_RRRR( "smlsldx r",0, VAL1,", r",1, VAL2, ", r",0, HH1,", r",1, HH2) TEST_RRRR( "smlsldx r",11,VAL2,", r",10,VAL1, ", r",9, HH2,", r",8, HH1) TEST_RRRR( "umlal r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4) TEST_RRRR( "umlal r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3) TEST_RRRR( "umaal r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4) TEST_RRRR( "umaal r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3) TEST_GROUP("Coprocessor instructions") TEST_UNSUPPORTED(__inst_thumb32(0xfc000000) "") TEST_UNSUPPORTED(__inst_thumb32(0xffffffff) "") TEST_GROUP("Testing instructions in IT blocks") TEST_ITBLOCK("sub.w r0, r0") verbose("\n"); }
linux-master
arch/arm/probes/kprobes/test-thumb.c
// SPDX-License-Identifier: GPL-2.0-only /* * arch/arm/kernel/kprobes-test-arm.c * * Copyright (C) 2011 Jon Medhurst <[email protected]>. */ #include <linux/kernel.h> #include <linux/module.h> #include <asm/system_info.h> #include <asm/opcodes.h> #include <asm/probes.h> #include "test-core.h" #define TEST_ISA "32" #define TEST_ARM_TO_THUMB_INTERWORK_R(code1, reg, val, code2) \ TESTCASE_START(code1 #reg code2) \ TEST_ARG_REG(reg, val) \ TEST_ARG_REG(14, 99f) \ TEST_ARG_END("") \ "50: nop \n\t" \ "1: "code1 #reg code2" \n\t" \ " bx lr \n\t" \ ".thumb \n\t" \ "3: adr lr, 2f \n\t" \ " bx lr \n\t" \ ".arm \n\t" \ "2: nop \n\t" \ TESTCASE_END #define TEST_ARM_TO_THUMB_INTERWORK_P(code1, reg, val, code2) \ TESTCASE_START(code1 #reg code2) \ TEST_ARG_PTR(reg, val) \ TEST_ARG_REG(14, 99f) \ TEST_ARG_MEM(15, 3f+1) \ TEST_ARG_END("") \ "50: nop \n\t" \ "1: "code1 #reg code2" \n\t" \ " bx lr \n\t" \ ".thumb \n\t" \ "3: adr lr, 2f \n\t" \ " bx lr \n\t" \ ".arm \n\t" \ "2: nop \n\t" \ TESTCASE_END void kprobe_arm_test_cases(void) { kprobe_test_flags = 0; TEST_GROUP("Data-processing (register), (register-shifted register), (immediate)") #define _DATA_PROCESSING_DNM(op,s,val) \ TEST_RR( op s "eq r0, r",1, VAL1,", r",2, val, "") \ TEST_RR( op s "ne r1, r",1, VAL1,", r",2, val, ", lsl #3") \ TEST_RR( op s "cs r2, r",3, VAL1,", r",2, val, ", lsr #4") \ TEST_RR( op s "cc r3, r",3, VAL1,", r",2, val, ", asr #5") \ TEST_RR( op s "mi r4, r",5, VAL1,", r",2, N(val),", asr #6") \ TEST_RR( op s "pl r5, r",5, VAL1,", r",2, val, ", ror #7") \ TEST_RR( op s "vs r6, r",7, VAL1,", r",2, val, ", rrx") \ TEST_R( op s "vc r6, r",7, VAL1,", pc, lsl #3") \ TEST_R( op s "vc r6, r",7, VAL1,", sp, lsr #4") \ TEST_R( op s "vc r6, pc, r",7, VAL1,", asr #5") \ TEST_R( op s "vc r6, sp, r",7, VAL1,", ror #6") \ TEST_RRR( op s "hi r8, r",9, VAL1,", r",14,val, ", lsl r",0, 3,"")\ TEST_RRR( op s "ls r9, r",9, VAL1,", r",14,val, ", lsr r",7, 4,"")\ TEST_RRR( op s "ge r10, r",11,VAL1,", r",14,val, ", asr r",7, 5,"")\ TEST_RRR( op s "lt r11, r",11,VAL1,", r",14,N(val),", asr r",7, 6,"")\ TEST_RR( op s "gt r12, r13" ", r",14,val, ", ror r",14,7,"")\ TEST_RR( op s "le r14, r",0, val, ", r13" ", lsl r",14,8,"")\ TEST_R( op s "eq r0, r",11,VAL1,", #0xf5") \ TEST_R( op s "ne r11, r",0, VAL1,", #0xf5000000") \ TEST_R( op s " r7, r",8, VAL2,", #0x000af000") \ TEST( op s " r4, pc" ", #0x00005a00") #define DATA_PROCESSING_DNM(op,val) \ _DATA_PROCESSING_DNM(op,"",val) \ _DATA_PROCESSING_DNM(op,"s",val) #define DATA_PROCESSING_NM(op,val) \ TEST_RR( op "ne r",1, VAL1,", r",2, val, "") \ TEST_RR( op "eq r",1, VAL1,", r",2, val, ", lsl #3") \ TEST_RR( op "cc r",3, VAL1,", r",2, val, ", lsr #4") \ TEST_RR( op "cs r",3, VAL1,", r",2, val, ", asr #5") \ TEST_RR( op "pl r",5, VAL1,", r",2, N(val),", asr #6") \ TEST_RR( op "mi r",5, VAL1,", r",2, val, ", ror #7") \ TEST_RR( op "vc r",7, VAL1,", r",2, val, ", rrx") \ TEST_R ( op "vs r",7, VAL1,", pc, lsl #3") \ TEST_R ( op "vs r",7, VAL1,", sp, lsr #4") \ TEST_R( op "vs pc, r",7, VAL1,", asr #5") \ TEST_R( op "vs sp, r",7, VAL1,", ror #6") \ TEST_RRR( op "ls r",9, VAL1,", r",14,val, ", lsl r",0, 3,"") \ TEST_RRR( op "hi r",9, VAL1,", r",14,val, ", lsr r",7, 4,"") \ TEST_RRR( op "lt r",11,VAL1,", r",14,val, ", asr r",7, 5,"") \ TEST_RRR( op "ge r",11,VAL1,", r",14,N(val),", asr r",7, 6,"") \ TEST_RR( op "le r13" ", r",14,val, ", ror r",14,7,"") \ TEST_RR( op "gt r",0, val, ", r13" ", lsl r",14,8,"") \ TEST_R( op "eq r",11,VAL1,", #0xf5") \ TEST_R( op "ne r",0, VAL1,", #0xf5000000") \ TEST_R( op " r",8, VAL2,", #0x000af000") #define _DATA_PROCESSING_DM(op,s,val) \ TEST_R( op s "eq r0, r",1, val, "") \ TEST_R( op s "ne r1, r",1, val, ", lsl #3") \ TEST_R( op s "cs r2, r",3, val, ", lsr #4") \ TEST_R( op s "cc r3, r",3, val, ", asr #5") \ TEST_R( op s "mi r4, r",5, N(val),", asr #6") \ TEST_R( op s "pl r5, r",5, val, ", ror #7") \ TEST_R( op s "vs r6, r",10,val, ", rrx") \ TEST( op s "vs r7, pc, lsl #3") \ TEST( op s "vs r7, sp, lsr #4") \ TEST_RR( op s "vc r8, r",7, val, ", lsl r",0, 3,"") \ TEST_RR( op s "hi r9, r",9, val, ", lsr r",7, 4,"") \ TEST_RR( op s "ls r10, r",9, val, ", asr r",7, 5,"") \ TEST_RR( op s "ge r11, r",11,N(val),", asr r",7, 6,"") \ TEST_RR( op s "lt r12, r",11,val, ", ror r",14,7,"") \ TEST_R( op s "gt r14, r13" ", lsl r",14,8,"") \ TEST( op s "eq r0, #0xf5") \ TEST( op s "ne r11, #0xf5000000") \ TEST( op s " r7, #0x000af000") \ TEST( op s " r4, #0x00005a00") #define DATA_PROCESSING_DM(op,val) \ _DATA_PROCESSING_DM(op,"",val) \ _DATA_PROCESSING_DM(op,"s",val) DATA_PROCESSING_DNM("and",0xf00f00ff) DATA_PROCESSING_DNM("eor",0xf00f00ff) DATA_PROCESSING_DNM("sub",VAL2) DATA_PROCESSING_DNM("rsb",VAL2) DATA_PROCESSING_DNM("add",VAL2) DATA_PROCESSING_DNM("adc",VAL2) DATA_PROCESSING_DNM("sbc",VAL2) DATA_PROCESSING_DNM("rsc",VAL2) DATA_PROCESSING_NM("tst",0xf00f00ff) DATA_PROCESSING_NM("teq",0xf00f00ff) DATA_PROCESSING_NM("cmp",VAL2) DATA_PROCESSING_NM("cmn",VAL2) DATA_PROCESSING_DNM("orr",0xf00f00ff) DATA_PROCESSING_DM("mov",VAL2) DATA_PROCESSING_DNM("bic",0xf00f00ff) DATA_PROCESSING_DM("mvn",VAL2) TEST("mov ip, sp") /* This has special case emulation code */ TEST_SUPPORTED("mov pc, #0x1000"); TEST_SUPPORTED("mov sp, #0x1000"); TEST_SUPPORTED("cmp pc, #0x1000"); TEST_SUPPORTED("cmp sp, #0x1000"); /* Data-processing with PC and a shift count in a register */ TEST_UNSUPPORTED(__inst_arm(0xe15c0f1e) " @ cmp r12, r14, asl pc") TEST_UNSUPPORTED(__inst_arm(0xe1a0cf1e) " @ mov r12, r14, asl pc") TEST_UNSUPPORTED(__inst_arm(0xe08caf1e) " @ add r10, r12, r14, asl pc") TEST_UNSUPPORTED(__inst_arm(0xe151021f) " @ cmp r1, pc, lsl r2") TEST_UNSUPPORTED(__inst_arm(0xe17f0211) " @ cmn pc, r1, lsl r2") TEST_UNSUPPORTED(__inst_arm(0xe1a0121f) " @ mov r1, pc, lsl r2") TEST_UNSUPPORTED(__inst_arm(0xe1a0f211) " @ mov pc, r1, lsl r2") TEST_UNSUPPORTED(__inst_arm(0xe042131f) " @ sub r1, r2, pc, lsl r3") TEST_UNSUPPORTED(__inst_arm(0xe1cf1312) " @ bic r1, pc, r2, lsl r3") TEST_UNSUPPORTED(__inst_arm(0xe081f312) " @ add pc, r1, r2, lsl r3") /* Data-processing with PC as a target and status registers updated */ TEST_UNSUPPORTED("movs pc, r1") TEST_UNSUPPORTED(__inst_arm(0xe1b0f211) " @movs pc, r1, lsl r2") TEST_UNSUPPORTED("movs pc, #0x10000") TEST_UNSUPPORTED("adds pc, lr, r1") TEST_UNSUPPORTED(__inst_arm(0xe09ef211) " @adds pc, lr, r1, lsl r2") TEST_UNSUPPORTED("adds pc, lr, #4") /* Data-processing with SP as target */ TEST("add sp, sp, #16") TEST("sub sp, sp, #8") TEST("bic sp, sp, #0x20") TEST("orr sp, sp, #0x20") TEST_PR( "add sp, r",10,0,", r",11,4,"") TEST_PRR("add sp, r",10,0,", r",11,4,", asl r",12,1,"") TEST_P( "mov sp, r",10,0,"") TEST_PR( "mov sp, r",10,0,", asl r",12,0,"") /* Data-processing with PC as target */ TEST_BF( "add pc, pc, #2f-1b-8") TEST_BF_R ("add pc, pc, r",14,2f-1f-8,"") TEST_BF_R ("add pc, r",14,2f-1f-8,", pc") TEST_BF_R ("mov pc, r",0,2f,"") TEST_BF_R ("add pc, pc, r",14,(2f-1f-8)*2,", asr #1") TEST_BB( "sub pc, pc, #1b-2b+8") #if __LINUX_ARM_ARCH__ == 6 && !defined(CONFIG_CPU_V7) TEST_BB( "sub pc, pc, #1b-2b+8-2") /* UNPREDICTABLE before and after ARMv6 */ #endif TEST_BB_R( "sub pc, pc, r",14, 1f-2f+8,"") TEST_BB_R( "rsb pc, r",14,1f-2f+8,", pc") TEST_R( "add pc, pc, r",10,-2,", asl #1") #ifdef CONFIG_THUMB2_KERNEL TEST_ARM_TO_THUMB_INTERWORK_R("add pc, pc, r",0,3f-1f-8+1,"") TEST_ARM_TO_THUMB_INTERWORK_R("sub pc, r",0,3f+8+1,", #8") #endif TEST_GROUP("Miscellaneous instructions") TEST_RMASKED("mrs r",0,~PSR_IGNORE_BITS,", cpsr") TEST_RMASKED("mrspl r",7,~PSR_IGNORE_BITS,", cpsr") TEST_RMASKED("mrs r",14,~PSR_IGNORE_BITS,", cpsr") TEST_UNSUPPORTED(__inst_arm(0xe10ff000) " @ mrs r15, cpsr") TEST_UNSUPPORTED("mrs r0, spsr") TEST_UNSUPPORTED("mrs lr, spsr") TEST_UNSUPPORTED("msr cpsr, r0") TEST_UNSUPPORTED("msr cpsr_f, lr") TEST_UNSUPPORTED("msr spsr, r0") #if __LINUX_ARM_ARCH__ >= 5 || \ (__LINUX_ARM_ARCH__ == 4 && !defined(CONFIG_CPU_32v4)) TEST_BF_R("bx r",0,2f,"") TEST_BB_R("bx r",7,2f,"") TEST_BF_R("bxeq r",14,2f,"") #endif #if __LINUX_ARM_ARCH__ >= 5 TEST_R("clz r0, r",0, 0x0,"") TEST_R("clzeq r7, r",14,0x1,"") TEST_R("clz lr, r",7, 0xffffffff,"") TEST( "clz r4, sp") TEST_UNSUPPORTED(__inst_arm(0x016fff10) " @ clz pc, r0") TEST_UNSUPPORTED(__inst_arm(0x016f0f1f) " @ clz r0, pc") #if __LINUX_ARM_ARCH__ >= 6 TEST_UNSUPPORTED("bxj r0") #endif TEST_BF_R("blx r",0,2f,"") TEST_BB_R("blx r",7,2f,"") TEST_BF_R("blxeq r",14,2f,"") TEST_UNSUPPORTED(__inst_arm(0x0120003f) " @ blx pc") TEST_RR( "qadd r0, r",1, VAL1,", r",2, VAL2,"") TEST_RR( "qaddvs lr, r",9, VAL2,", r",8, VAL1,"") TEST_R( "qadd lr, r",9, VAL2,", r13") TEST_RR( "qsub r0, r",1, VAL1,", r",2, VAL2,"") TEST_RR( "qsubvs lr, r",9, VAL2,", r",8, VAL1,"") TEST_R( "qsub lr, r",9, VAL2,", r13") TEST_RR( "qdadd r0, r",1, VAL1,", r",2, VAL2,"") TEST_RR( "qdaddvs lr, r",9, VAL2,", r",8, VAL1,"") TEST_R( "qdadd lr, r",9, VAL2,", r13") TEST_RR( "qdsub r0, r",1, VAL1,", r",2, VAL2,"") TEST_RR( "qdsubvs lr, r",9, VAL2,", r",8, VAL1,"") TEST_R( "qdsub lr, r",9, VAL2,", r13") TEST_UNSUPPORTED(__inst_arm(0xe101f050) " @ qadd pc, r0, r1") TEST_UNSUPPORTED(__inst_arm(0xe121f050) " @ qsub pc, r0, r1") TEST_UNSUPPORTED(__inst_arm(0xe141f050) " @ qdadd pc, r0, r1") TEST_UNSUPPORTED(__inst_arm(0xe161f050) " @ qdsub pc, r0, r1") TEST_UNSUPPORTED(__inst_arm(0xe16f2050) " @ qdsub r2, r0, pc") TEST_UNSUPPORTED(__inst_arm(0xe161205f) " @ qdsub r2, pc, r1") TEST_UNSUPPORTED("bkpt 0xffff") TEST_UNSUPPORTED("bkpt 0x0000") TEST_UNSUPPORTED(__inst_arm(0xe1600070) " @ smc #0") TEST_GROUP("Halfword multiply and multiply-accumulate") TEST_RRR( "smlabb r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"") TEST_RRR( "smlabbge r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"") TEST_RR( "smlabb lr, r",1, VAL2,", r",2, VAL3,", r13") TEST_UNSUPPORTED(__inst_arm(0xe10f3281) " @ smlabb pc, r1, r2, r3") TEST_RRR( "smlatb r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"") TEST_RRR( "smlatbge r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"") TEST_RR( "smlatb lr, r",1, VAL2,", r",2, VAL3,", r13") TEST_UNSUPPORTED(__inst_arm(0xe10f32a1) " @ smlatb pc, r1, r2, r3") TEST_RRR( "smlabt r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"") TEST_RRR( "smlabtge r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"") TEST_RR( "smlabt lr, r",1, VAL2,", r",2, VAL3,", r13") TEST_UNSUPPORTED(__inst_arm(0xe10f32c1) " @ smlabt pc, r1, r2, r3") TEST_RRR( "smlatt r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"") TEST_RRR( "smlattge r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"") TEST_RR( "smlatt lr, r",1, VAL2,", r",2, VAL3,", r13") TEST_UNSUPPORTED(__inst_arm(0xe10f32e1) " @ smlatt pc, r1, r2, r3") TEST_RRR( "smlawb r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"") TEST_RRR( "smlawbge r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"") TEST_RR( "smlawb lr, r",1, VAL2,", r",2, VAL3,", r13") TEST_UNSUPPORTED(__inst_arm(0xe12f3281) " @ smlawb pc, r1, r2, r3") TEST_RRR( "smlawt r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"") TEST_RRR( "smlawtge r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"") TEST_RR( "smlawt lr, r",1, VAL2,", r",2, VAL3,", r13") TEST_UNSUPPORTED(__inst_arm(0xe12f32c1) " @ smlawt pc, r1, r2, r3") TEST_UNSUPPORTED(__inst_arm(0xe12032cf) " @ smlawt r0, pc, r2, r3") TEST_UNSUPPORTED(__inst_arm(0xe1203fc1) " @ smlawt r0, r1, pc, r3") TEST_UNSUPPORTED(__inst_arm(0xe120f2c1) " @ smlawt r0, r1, r2, pc") TEST_RR( "smulwb r0, r",1, VAL1,", r",2, VAL2,"") TEST_RR( "smulwbge r7, r",8, VAL3,", r",9, VAL1,"") TEST_R( "smulwb lr, r",1, VAL2,", r13") TEST_UNSUPPORTED(__inst_arm(0xe12f02a1) " @ smulwb pc, r1, r2") TEST_RR( "smulwt r0, r",1, VAL1,", r",2, VAL2,"") TEST_RR( "smulwtge r7, r",8, VAL3,", r",9, VAL1,"") TEST_R( "smulwt lr, r",1, VAL2,", r13") TEST_UNSUPPORTED(__inst_arm(0xe12f02e1) " @ smulwt pc, r1, r2") TEST_RRRR( "smlalbb r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4) TEST_RRRR( "smlalbble r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3) TEST_RRR( "smlalbb r",14,VAL3,", r",7, VAL4,", r",5, VAL1,", r13") TEST_UNSUPPORTED(__inst_arm(0xe14f1382) " @ smlalbb pc, r1, r2, r3") TEST_UNSUPPORTED(__inst_arm(0xe141f382) " @ smlalbb r1, pc, r2, r3") TEST_RRRR( "smlaltb r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4) TEST_RRRR( "smlaltble r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3) TEST_RRR( "smlaltb r",14,VAL3,", r",7, VAL4,", r",5, VAL1,", r13") TEST_UNSUPPORTED(__inst_arm(0xe14f13a2) " @ smlaltb pc, r1, r2, r3") TEST_UNSUPPORTED(__inst_arm(0xe141f3a2) " @ smlaltb r1, pc, r2, r3") TEST_RRRR( "smlalbt r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4) TEST_RRRR( "smlalbtle r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3) TEST_RRR( "smlalbt r",14,VAL3,", r",7, VAL4,", r",5, VAL1,", r13") TEST_UNSUPPORTED(__inst_arm(0xe14f13c2) " @ smlalbt pc, r1, r2, r3") TEST_UNSUPPORTED(__inst_arm(0xe141f3c2) " @ smlalbt r1, pc, r2, r3") TEST_RRRR( "smlaltt r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4) TEST_RRRR( "smlalttle r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3) TEST_RRR( "smlaltt r",14,VAL3,", r",7, VAL4,", r",5, VAL1,", r13") TEST_UNSUPPORTED(__inst_arm(0xe14f13e2) " @ smlalbb pc, r1, r2, r3") TEST_UNSUPPORTED(__inst_arm(0xe140f3e2) " @ smlalbb r0, pc, r2, r3") TEST_UNSUPPORTED(__inst_arm(0xe14013ef) " @ smlalbb r0, r1, pc, r3") TEST_UNSUPPORTED(__inst_arm(0xe1401fe2) " @ smlalbb r0, r1, r2, pc") TEST_RR( "smulbb r0, r",1, VAL1,", r",2, VAL2,"") TEST_RR( "smulbbge r7, r",8, VAL3,", r",9, VAL1,"") TEST_R( "smulbb lr, r",1, VAL2,", r13") TEST_UNSUPPORTED(__inst_arm(0xe16f0281) " @ smulbb pc, r1, r2") TEST_RR( "smultb r0, r",1, VAL1,", r",2, VAL2,"") TEST_RR( "smultbge r7, r",8, VAL3,", r",9, VAL1,"") TEST_R( "smultb lr, r",1, VAL2,", r13") TEST_UNSUPPORTED(__inst_arm(0xe16f02a1) " @ smultb pc, r1, r2") TEST_RR( "smulbt r0, r",1, VAL1,", r",2, VAL2,"") TEST_RR( "smulbtge r7, r",8, VAL3,", r",9, VAL1,"") TEST_R( "smulbt lr, r",1, VAL2,", r13") TEST_UNSUPPORTED(__inst_arm(0xe16f02c1) " @ smultb pc, r1, r2") TEST_RR( "smultt r0, r",1, VAL1,", r",2, VAL2,"") TEST_RR( "smulttge r7, r",8, VAL3,", r",9, VAL1,"") TEST_R( "smultt lr, r",1, VAL2,", r13") TEST_UNSUPPORTED(__inst_arm(0xe16f02e1) " @ smultt pc, r1, r2") TEST_UNSUPPORTED(__inst_arm(0xe16002ef) " @ smultt r0, pc, r2") TEST_UNSUPPORTED(__inst_arm(0xe1600fe1) " @ smultt r0, r1, pc") #endif TEST_GROUP("Multiply and multiply-accumulate") TEST_RR( "mul r0, r",1, VAL1,", r",2, VAL2,"") TEST_RR( "mulls r7, r",8, VAL2,", r",9, VAL2,"") TEST_R( "mul lr, r",4, VAL3,", r13") TEST_UNSUPPORTED(__inst_arm(0xe00f0291) " @ mul pc, r1, r2") TEST_UNSUPPORTED(__inst_arm(0xe000029f) " @ mul r0, pc, r2") TEST_UNSUPPORTED(__inst_arm(0xe0000f91) " @ mul r0, r1, pc") TEST_RR( "muls r0, r",1, VAL1,", r",2, VAL2,"") TEST_RR( "mulsls r7, r",8, VAL2,", r",9, VAL2,"") TEST_R( "muls lr, r",4, VAL3,", r13") TEST_UNSUPPORTED(__inst_arm(0xe01f0291) " @ muls pc, r1, r2") TEST_RRR( "mla r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"") TEST_RRR( "mlahi r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"") TEST_RR( "mla lr, r",1, VAL2,", r",2, VAL3,", r13") TEST_UNSUPPORTED(__inst_arm(0xe02f3291) " @ mla pc, r1, r2, r3") TEST_RRR( "mlas r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"") TEST_RRR( "mlashi r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"") TEST_RR( "mlas lr, r",1, VAL2,", r",2, VAL3,", r13") TEST_UNSUPPORTED(__inst_arm(0xe03f3291) " @ mlas pc, r1, r2, r3") #if __LINUX_ARM_ARCH__ >= 6 TEST_RR( "umaal r0, r1, r",2, VAL1,", r",3, VAL2,"") TEST_RR( "umaalls r7, r8, r",9, VAL2,", r",10, VAL1,"") TEST_R( "umaal lr, r12, r",11,VAL3,", r13") TEST_UNSUPPORTED(__inst_arm(0xe041f392) " @ umaal pc, r1, r2, r3") TEST_UNSUPPORTED(__inst_arm(0xe04f0392) " @ umaal r0, pc, r2, r3") TEST_UNSUPPORTED(__inst_arm(0xe0500090) " @ undef") TEST_UNSUPPORTED(__inst_arm(0xe05fff9f) " @ undef") #endif #if __LINUX_ARM_ARCH__ >= 7 TEST_RRR( "mls r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"") TEST_RRR( "mlshi r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"") TEST_RR( "mls lr, r",1, VAL2,", r",2, VAL3,", r13") TEST_UNSUPPORTED(__inst_arm(0xe06f3291) " @ mls pc, r1, r2, r3") TEST_UNSUPPORTED(__inst_arm(0xe060329f) " @ mls r0, pc, r2, r3") TEST_UNSUPPORTED(__inst_arm(0xe0603f91) " @ mls r0, r1, pc, r3") TEST_UNSUPPORTED(__inst_arm(0xe060f291) " @ mls r0, r1, r2, pc") #endif TEST_UNSUPPORTED(__inst_arm(0xe0700090) " @ undef") TEST_UNSUPPORTED(__inst_arm(0xe07fff9f) " @ undef") TEST_RR( "umull r0, r1, r",2, VAL1,", r",3, VAL2,"") TEST_RR( "umullls r7, r8, r",9, VAL2,", r",10, VAL1,"") TEST_R( "umull lr, r12, r",11,VAL3,", r13") TEST_UNSUPPORTED(__inst_arm(0xe081f392) " @ umull pc, r1, r2, r3") TEST_UNSUPPORTED(__inst_arm(0xe08f1392) " @ umull r1, pc, r2, r3") TEST_RR( "umulls r0, r1, r",2, VAL1,", r",3, VAL2,"") TEST_RR( "umullsls r7, r8, r",9, VAL2,", r",10, VAL1,"") TEST_R( "umulls lr, r12, r",11,VAL3,", r13") TEST_UNSUPPORTED(__inst_arm(0xe091f392) " @ umulls pc, r1, r2, r3") TEST_UNSUPPORTED(__inst_arm(0xe09f1392) " @ umulls r1, pc, r2, r3") TEST_RRRR( "umlal r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4) TEST_RRRR( "umlalle r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3) TEST_RRR( "umlal r",14,VAL3,", r",7, VAL4,", r",5, VAL1,", r13") TEST_UNSUPPORTED(__inst_arm(0xe0af1392) " @ umlal pc, r1, r2, r3") TEST_UNSUPPORTED(__inst_arm(0xe0a1f392) " @ umlal r1, pc, r2, r3") TEST_RRRR( "umlals r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4) TEST_RRRR( "umlalsle r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3) TEST_RRR( "umlals r",14,VAL3,", r",7, VAL4,", r",5, VAL1,", r13") TEST_UNSUPPORTED(__inst_arm(0xe0bf1392) " @ umlals pc, r1, r2, r3") TEST_UNSUPPORTED(__inst_arm(0xe0b1f392) " @ umlals r1, pc, r2, r3") TEST_RR( "smull r0, r1, r",2, VAL1,", r",3, VAL2,"") TEST_RR( "smullls r7, r8, r",9, VAL2,", r",10, VAL1,"") TEST_R( "smull lr, r12, r",11,VAL3,", r13") TEST_UNSUPPORTED(__inst_arm(0xe0c1f392) " @ smull pc, r1, r2, r3") TEST_UNSUPPORTED(__inst_arm(0xe0cf1392) " @ smull r1, pc, r2, r3") TEST_RR( "smulls r0, r1, r",2, VAL1,", r",3, VAL2,"") TEST_RR( "smullsls r7, r8, r",9, VAL2,", r",10, VAL1,"") TEST_R( "smulls lr, r12, r",11,VAL3,", r13") TEST_UNSUPPORTED(__inst_arm(0xe0d1f392) " @ smulls pc, r1, r2, r3") TEST_UNSUPPORTED(__inst_arm(0xe0df1392) " @ smulls r1, pc, r2, r3") TEST_RRRR( "smlal r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4) TEST_RRRR( "smlalle r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3) TEST_RRR( "smlal r",14,VAL3,", r",7, VAL4,", r",5, VAL1,", r13") TEST_UNSUPPORTED(__inst_arm(0xe0ef1392) " @ smlal pc, r1, r2, r3") TEST_UNSUPPORTED(__inst_arm(0xe0e1f392) " @ smlal r1, pc, r2, r3") TEST_RRRR( "smlals r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4) TEST_RRRR( "smlalsle r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3) TEST_RRR( "smlals r",14,VAL3,", r",7, VAL4,", r",5, VAL1,", r13") TEST_UNSUPPORTED(__inst_arm(0xe0ff1392) " @ smlals pc, r1, r2, r3") TEST_UNSUPPORTED(__inst_arm(0xe0f0f392) " @ smlals r0, pc, r2, r3") TEST_UNSUPPORTED(__inst_arm(0xe0f0139f) " @ smlals r0, r1, pc, r3") TEST_UNSUPPORTED(__inst_arm(0xe0f01f92) " @ smlals r0, r1, r2, pc") TEST_GROUP("Synchronization primitives") #if __LINUX_ARM_ARCH__ < 6 TEST_RP("swp lr, r",7,VAL2,", [r",8,0,"]") TEST_R( "swpvs r0, r",1,VAL1,", [sp]") TEST_RP("swp sp, r",14,VAL2,", [r",12,13*4,"]") #else TEST_UNSUPPORTED(__inst_arm(0xe108e097) " @ swp lr, r7, [r8]") TEST_UNSUPPORTED(__inst_arm(0x610d0091) " @ swpvs r0, r1, [sp]") TEST_UNSUPPORTED(__inst_arm(0xe10cd09e) " @ swp sp, r14 [r12]") #endif TEST_UNSUPPORTED(__inst_arm(0xe102f091) " @ swp pc, r1, [r2]") TEST_UNSUPPORTED(__inst_arm(0xe102009f) " @ swp r0, pc, [r2]") TEST_UNSUPPORTED(__inst_arm(0xe10f0091) " @ swp r0, r1, [pc]") #if __LINUX_ARM_ARCH__ < 6 TEST_RP("swpb lr, r",7,VAL2,", [r",8,0,"]") TEST_R( "swpbvs r0, r",1,VAL1,", [sp]") #else TEST_UNSUPPORTED(__inst_arm(0xe148e097) " @ swpb lr, r7, [r8]") TEST_UNSUPPORTED(__inst_arm(0x614d0091) " @ swpvsb r0, r1, [sp]") #endif TEST_UNSUPPORTED(__inst_arm(0xe142f091) " @ swpb pc, r1, [r2]") TEST_UNSUPPORTED(__inst_arm(0xe1100090)) /* Unallocated space */ TEST_UNSUPPORTED(__inst_arm(0xe1200090)) /* Unallocated space */ TEST_UNSUPPORTED(__inst_arm(0xe1300090)) /* Unallocated space */ TEST_UNSUPPORTED(__inst_arm(0xe1500090)) /* Unallocated space */ TEST_UNSUPPORTED(__inst_arm(0xe1600090)) /* Unallocated space */ TEST_UNSUPPORTED(__inst_arm(0xe1700090)) /* Unallocated space */ #if __LINUX_ARM_ARCH__ >= 6 TEST_UNSUPPORTED("ldrex r2, [sp]") #endif #if (__LINUX_ARM_ARCH__ >= 7) || defined(CONFIG_CPU_32v6K) TEST_UNSUPPORTED("strexd r0, r2, r3, [sp]") TEST_UNSUPPORTED("ldrexd r2, r3, [sp]") TEST_UNSUPPORTED("strexb r0, r2, [sp]") TEST_UNSUPPORTED("ldrexb r2, [sp]") TEST_UNSUPPORTED("strexh r0, r2, [sp]") TEST_UNSUPPORTED("ldrexh r2, [sp]") #endif TEST_GROUP("Extra load/store instructions") TEST_RPR( "strh r",0, VAL1,", [r",1, 48,", -r",2, 24,"]") TEST_RPR( "strheq r",14,VAL2,", [r",11,0, ", r",12, 48,"]") TEST_UNSUPPORTED( "strheq r14, [r13, r12]") TEST_UNSUPPORTED( "strheq r14, [r12, r13]") TEST_RPR( "strh r",1, VAL1,", [r",2, 24,", r",3, 48,"]!") TEST_RPR( "strhne r",12,VAL2,", [r",11,48,", -r",10,24,"]!") TEST_RPR( "strh r",2, VAL1,", [r",3, 24,"], r",4, 48,"") TEST_RPR( "strh r",10,VAL2,", [r",9, 48,"], -r",11,24,"") TEST_UNSUPPORTED(__inst_arm(0xe1afc0ba) " @ strh r12, [pc, r10]!") TEST_UNSUPPORTED(__inst_arm(0xe089f0bb) " @ strh pc, [r9], r11") TEST_UNSUPPORTED(__inst_arm(0xe089a0bf) " @ strh r10, [r9], pc") TEST_PR( "ldrh r0, [r",0, 48,", -r",2, 24,"]") TEST_PR( "ldrhcs r14, [r",13,0, ", r",12, 48,"]") TEST_PR( "ldrh r1, [r",2, 24,", r",3, 48,"]!") TEST_PR( "ldrhcc r12, [r",11,48,", -r",10,24,"]!") TEST_PR( "ldrh r2, [r",3, 24,"], r",4, 48,"") TEST_PR( "ldrh r10, [r",9, 48,"], -r",11,24,"") TEST_UNSUPPORTED(__inst_arm(0xe1bfc0ba) " @ ldrh r12, [pc, r10]!") TEST_UNSUPPORTED(__inst_arm(0xe099f0bb) " @ ldrh pc, [r9], r11") TEST_UNSUPPORTED(__inst_arm(0xe099a0bf) " @ ldrh r10, [r9], pc") TEST_RP( "strh r",0, VAL1,", [r",1, 24,", #-2]") TEST_RP( "strhmi r",14,VAL2,", [r",13,0, ", #2]") TEST_RP( "strh r",1, VAL1,", [r",2, 24,", #4]!") TEST_RP( "strhpl r",12,VAL2,", [r",11,24,", #-4]!") TEST_RP( "strh r",2, VAL1,", [r",3, 24,"], #48") TEST_RP( "strh r",10,VAL2,", [r",9, 64,"], #-48") TEST_RP( "strh r",3, VAL1,", [r",13,TEST_MEMORY_SIZE,", #-"__stringify(MAX_STACK_SIZE)"]!") TEST_UNSUPPORTED("strh r3, [r13, #-"__stringify(MAX_STACK_SIZE)"-8]!") TEST_RP( "strh r",4, VAL1,", [r",14,TEST_MEMORY_SIZE,", #-"__stringify(MAX_STACK_SIZE)"-8]!") TEST_UNSUPPORTED(__inst_arm(0xe1efc3b0) " @ strh r12, [pc, #48]!") TEST_UNSUPPORTED(__inst_arm(0xe0c9f3b0) " @ strh pc, [r9], #48") TEST_P( "ldrh r0, [r",0, 24,", #-2]") TEST_P( "ldrhvs r14, [r",13,0, ", #2]") TEST_P( "ldrh r1, [r",2, 24,", #4]!") TEST_P( "ldrhvc r12, [r",11,24,", #-4]!") TEST_P( "ldrh r2, [r",3, 24,"], #48") TEST_P( "ldrh r10, [r",9, 64,"], #-48") TEST( "ldrh r0, [pc, #0]") TEST_UNSUPPORTED(__inst_arm(0xe1ffc3b0) " @ ldrh r12, [pc, #48]!") TEST_UNSUPPORTED(__inst_arm(0xe0d9f3b0) " @ ldrh pc, [r9], #48") TEST_PR( "ldrsb r0, [r",0, 48,", -r",2, 24,"]") TEST_PR( "ldrsbhi r14, [r",13,0,", r",12, 48,"]") TEST_PR( "ldrsb r1, [r",2, 24,", r",3, 48,"]!") TEST_PR( "ldrsbls r12, [r",11,48,", -r",10,24,"]!") TEST_PR( "ldrsb r2, [r",3, 24,"], r",4, 48,"") TEST_PR( "ldrsb r10, [r",9, 48,"], -r",11,24,"") TEST_UNSUPPORTED(__inst_arm(0xe1bfc0da) " @ ldrsb r12, [pc, r10]!") TEST_UNSUPPORTED(__inst_arm(0xe099f0db) " @ ldrsb pc, [r9], r11") TEST_P( "ldrsb r0, [r",0, 24,", #-1]") TEST_P( "ldrsbge r14, [r",13,0, ", #1]") TEST_P( "ldrsb r1, [r",2, 24,", #4]!") TEST_P( "ldrsblt r12, [r",11,24,", #-4]!") TEST_P( "ldrsb r2, [r",3, 24,"], #48") TEST_P( "ldrsb r10, [r",9, 64,"], #-48") TEST( "ldrsb r0, [pc, #0]") TEST_UNSUPPORTED(__inst_arm(0xe1ffc3d0) " @ ldrsb r12, [pc, #48]!") TEST_UNSUPPORTED(__inst_arm(0xe0d9f3d0) " @ ldrsb pc, [r9], #48") TEST_PR( "ldrsh r0, [r",0, 48,", -r",2, 24,"]") TEST_PR( "ldrshgt r14, [r",13,0, ", r",12, 48,"]") TEST_PR( "ldrsh r1, [r",2, 24,", r",3, 48,"]!") TEST_PR( "ldrshle r12, [r",11,48,", -r",10,24,"]!") TEST_PR( "ldrsh r2, [r",3, 24,"], r",4, 48,"") TEST_PR( "ldrsh r10, [r",9, 48,"], -r",11,24,"") TEST_UNSUPPORTED(__inst_arm(0xe1bfc0fa) " @ ldrsh r12, [pc, r10]!") TEST_UNSUPPORTED(__inst_arm(0xe099f0fb) " @ ldrsh pc, [r9], r11") TEST_P( "ldrsh r0, [r",0, 24,", #-1]") TEST_P( "ldrsheq r14, [r",13,0 ,", #1]") TEST_P( "ldrsh r1, [r",2, 24,", #4]!") TEST_P( "ldrshne r12, [r",11,24,", #-4]!") TEST_P( "ldrsh r2, [r",3, 24,"], #48") TEST_P( "ldrsh r10, [r",9, 64,"], #-48") TEST( "ldrsh r0, [pc, #0]") TEST_UNSUPPORTED(__inst_arm(0xe1ffc3f0) " @ ldrsh r12, [pc, #48]!") TEST_UNSUPPORTED(__inst_arm(0xe0d9f3f0) " @ ldrsh pc, [r9], #48") #if __LINUX_ARM_ARCH__ >= 7 TEST_UNSUPPORTED("strht r1, [r2], r3") TEST_UNSUPPORTED("ldrht r1, [r2], r3") TEST_UNSUPPORTED("strht r1, [r2], #48") TEST_UNSUPPORTED("ldrht r1, [r2], #48") TEST_UNSUPPORTED("ldrsbt r1, [r2], r3") TEST_UNSUPPORTED("ldrsbt r1, [r2], #48") TEST_UNSUPPORTED("ldrsht r1, [r2], r3") TEST_UNSUPPORTED("ldrsht r1, [r2], #48") #endif #if __LINUX_ARM_ARCH__ >= 5 TEST_RPR( "strd r",0, VAL1,", [r",1, 48,", -r",2,24,"]") TEST_RPR( "strdcc r",8, VAL2,", [r",11,0, ", r",12,48,"]") TEST_UNSUPPORTED( "strdcc r8, [r13, r12]") TEST_UNSUPPORTED( "strdcc r8, [r12, r13]") TEST_RPR( "strd r",4, VAL1,", [r",2, 24,", r",3, 48,"]!") TEST_RPR( "strdcs r",12,VAL2,", r13, [r",11,48,", -r",10,24,"]!") TEST_RPR( "strd r",2, VAL1,", r3, [r",5, 24,"], r",4,48,"") TEST_RPR( "strd r",10,VAL2,", r11, [r",9, 48,"], -r",7,24,"") TEST_UNSUPPORTED(__inst_arm(0xe1afc0fa) " @ strd r12, [pc, r10]!") TEST_PR( "ldrd r0, [r",0, 48,", -r",2,24,"]") TEST_PR( "ldrdmi r8, [r",13,0, ", r",12,48,"]") TEST_PR( "ldrd r4, [r",2, 24,", r",3, 48,"]!") TEST_PR( "ldrdpl r6, [r",11,48,", -r",10,24,"]!") TEST_PR( "ldrd r2, r3, [r",5, 24,"], r",4,48,"") TEST_PR( "ldrd r10, r11, [r",9,48,"], -r",7,24,"") TEST_UNSUPPORTED(__inst_arm(0xe1afc0da) " @ ldrd r12, [pc, r10]!") TEST_UNSUPPORTED(__inst_arm(0xe089f0db) " @ ldrd pc, [r9], r11") TEST_UNSUPPORTED(__inst_arm(0xe089e0db) " @ ldrd lr, [r9], r11") TEST_UNSUPPORTED(__inst_arm(0xe089c0df) " @ ldrd r12, [r9], pc") TEST_RP( "strd r",0, VAL1,", [r",1, 24,", #-8]") TEST_RP( "strdvs r",8, VAL2,", [r",13,0, ", #8]") TEST_RP( "strd r",4, VAL1,", [r",2, 24,", #16]!") TEST_RP( "strdvc r",12,VAL2,", r13, [r",11,24,", #-16]!") TEST_RP( "strd r",2, VAL1,", [r",4, 24,"], #48") TEST_RP( "strd r",10,VAL2,", [r",9, 64,"], #-48") TEST_RP( "strd r",6, VAL1,", [r",13,TEST_MEMORY_SIZE,", #-"__stringify(MAX_STACK_SIZE)"]!") TEST_UNSUPPORTED("strd r6, [r13, #-"__stringify(MAX_STACK_SIZE)"-8]!") TEST_RP( "strd r",4, VAL1,", [r",12,TEST_MEMORY_SIZE,", #-"__stringify(MAX_STACK_SIZE)"-8]!") TEST_UNSUPPORTED(__inst_arm(0xe1efc3f0) " @ strd r12, [pc, #48]!") TEST_P( "ldrd r0, [r",0, 24,", #-8]") TEST_P( "ldrdhi r8, [r",13,0, ", #8]") TEST_P( "ldrd r4, [r",2, 24,", #16]!") TEST_P( "ldrdls r6, [r",11,24,", #-16]!") TEST_P( "ldrd r2, [r",5, 24,"], #48") TEST_P( "ldrd r10, [r",9,6,"], #-48") TEST_UNSUPPORTED(__inst_arm(0xe1efc3d0) " @ ldrd r12, [pc, #48]!") TEST_UNSUPPORTED(__inst_arm(0xe0c9f3d0) " @ ldrd pc, [r9], #48") TEST_UNSUPPORTED(__inst_arm(0xe0c9e3d0) " @ ldrd lr, [r9], #48") #endif TEST_GROUP("Miscellaneous") #if __LINUX_ARM_ARCH__ >= 7 TEST("movw r0, #0") TEST("movw r0, #0xffff") TEST("movw lr, #0xffff") TEST_UNSUPPORTED(__inst_arm(0xe300f000) " @ movw pc, #0") TEST_R("movt r",0, VAL1,", #0") TEST_R("movt r",0, VAL2,", #0xffff") TEST_R("movt r",14,VAL1,", #0xffff") TEST_UNSUPPORTED(__inst_arm(0xe340f000) " @ movt pc, #0") #endif TEST_UNSUPPORTED("msr cpsr, 0x13") TEST_UNSUPPORTED("msr cpsr_f, 0xf0000000") TEST_UNSUPPORTED("msr spsr, 0x13") #if __LINUX_ARM_ARCH__ >= 7 TEST_SUPPORTED("yield") TEST("sev") TEST("nop") TEST("wfi") TEST_SUPPORTED("wfe") TEST_UNSUPPORTED("dbg #0") #endif TEST_GROUP("Load/store word and unsigned byte") #define LOAD_STORE(byte) \ TEST_RP( "str"byte" r",0, VAL1,", [r",1, 24,", #-2]") \ TEST_RP( "str"byte" r",14,VAL2,", [r",13,0, ", #2]") \ TEST_RP( "str"byte" r",1, VAL1,", [r",2, 24,", #4]!") \ TEST_RP( "str"byte" r",12,VAL2,", [r",11,24,", #-4]!") \ TEST_RP( "str"byte" r",2, VAL1,", [r",3, 24,"], #48") \ TEST_RP( "str"byte" r",10,VAL2,", [r",9, 64,"], #-48") \ TEST_RP( "str"byte" r",3, VAL1,", [r",13,TEST_MEMORY_SIZE,", #-"__stringify(MAX_STACK_SIZE)"]!") \ TEST_UNSUPPORTED("str"byte" r3, [r13, #-"__stringify(MAX_STACK_SIZE)"-8]!") \ TEST_RP( "str"byte" r",4, VAL1,", [r",10,TEST_MEMORY_SIZE,", #-"__stringify(MAX_STACK_SIZE)"-8]!") \ TEST_RPR("str"byte" r",0, VAL1,", [r",1, 48,", -r",2, 24,"]") \ TEST_RPR("str"byte" r",14,VAL2,", [r",11,0, ", r",12, 48,"]") \ TEST_UNSUPPORTED("str"byte" r14, [r13, r12]") \ TEST_UNSUPPORTED("str"byte" r14, [r12, r13]") \ TEST_RPR("str"byte" r",1, VAL1,", [r",2, 24,", r",3, 48,"]!") \ TEST_RPR("str"byte" r",12,VAL2,", [r",11,48,", -r",10,24,"]!") \ TEST_RPR("str"byte" r",2, VAL1,", [r",3, 24,"], r",4, 48,"") \ TEST_RPR("str"byte" r",10,VAL2,", [r",9, 48,"], -r",11,24,"") \ TEST_RPR("str"byte" r",0, VAL1,", [r",1, 24,", r",2, 32,", asl #1]")\ TEST_RPR("str"byte" r",14,VAL2,", [r",11,0, ", r",12, 32,", lsr #2]")\ TEST_UNSUPPORTED("str"byte" r14, [r13, r12, lsr #2]") \ TEST_RPR("str"byte" r",1, VAL1,", [r",2, 24,", r",3, 32,", asr #3]!")\ TEST_RPR("str"byte" r",12,VAL2,", [r",11,24,", r",10, 4,", ror #31]!")\ TEST_P( "ldr"byte" r0, [r",0, 24,", #-2]") \ TEST_P( "ldr"byte" r14, [r",13,0, ", #2]") \ TEST_P( "ldr"byte" r1, [r",2, 24,", #4]!") \ TEST_P( "ldr"byte" r12, [r",11,24,", #-4]!") \ TEST_P( "ldr"byte" r2, [r",3, 24,"], #48") \ TEST_P( "ldr"byte" r10, [r",9, 64,"], #-48") \ TEST_PR( "ldr"byte" r0, [r",0, 48,", -r",2, 24,"]") \ TEST_PR( "ldr"byte" r14, [r",13,0, ", r",12, 48,"]") \ TEST_PR( "ldr"byte" r1, [r",2, 24,", r",3, 48,"]!") \ TEST_PR( "ldr"byte" r12, [r",11,48,", -r",10,24,"]!") \ TEST_PR( "ldr"byte" r2, [r",3, 24,"], r",4, 48,"") \ TEST_PR( "ldr"byte" r10, [r",9, 48,"], -r",11,24,"") \ TEST_PR( "ldr"byte" r0, [r",0, 24,", r",2, 32,", asl #1]") \ TEST_PR( "ldr"byte" r14, [r",13,0, ", r",12, 32,", lsr #2]") \ TEST_PR( "ldr"byte" r1, [r",2, 24,", r",3, 32,", asr #3]!") \ TEST_PR( "ldr"byte" r12, [r",11,24,", r",10, 4,", ror #31]!") \ TEST( "ldr"byte" r0, [pc, #0]") \ TEST_R( "ldr"byte" r12, [pc, r",14,0,"]") LOAD_STORE("") TEST_P( "str pc, [r",0,0,", #15*4]") TEST_UNSUPPORTED( "str pc, [sp, r2]") TEST_BF( "ldr pc, [sp, #15*4]") TEST_BF_R("ldr pc, [sp, r",2,15*4,"]") TEST_P( "str sp, [r",0,0,", #13*4]") TEST_UNSUPPORTED( "str sp, [sp, r2]") TEST_BF( "ldr sp, [sp, #13*4]") TEST_BF_R("ldr sp, [sp, r",2,13*4,"]") #ifdef CONFIG_THUMB2_KERNEL TEST_ARM_TO_THUMB_INTERWORK_P("ldr pc, [r",0,0,", #15*4]") #endif TEST_UNSUPPORTED(__inst_arm(0xe5af6008) " @ str r6, [pc, #8]!") TEST_UNSUPPORTED(__inst_arm(0xe7af6008) " @ str r6, [pc, r8]!") TEST_UNSUPPORTED(__inst_arm(0xe5bf6008) " @ ldr r6, [pc, #8]!") TEST_UNSUPPORTED(__inst_arm(0xe7bf6008) " @ ldr r6, [pc, r8]!") TEST_UNSUPPORTED(__inst_arm(0xe788600f) " @ str r6, [r8, pc]") TEST_UNSUPPORTED(__inst_arm(0xe798600f) " @ ldr r6, [r8, pc]") LOAD_STORE("b") TEST_UNSUPPORTED(__inst_arm(0xe5f7f008) " @ ldrb pc, [r7, #8]!") TEST_UNSUPPORTED(__inst_arm(0xe7f7f008) " @ ldrb pc, [r7, r8]!") TEST_UNSUPPORTED(__inst_arm(0xe5ef6008) " @ strb r6, [pc, #8]!") TEST_UNSUPPORTED(__inst_arm(0xe7ef6008) " @ strb r6, [pc, r3]!") TEST_UNSUPPORTED(__inst_arm(0xe5ff6008) " @ ldrb r6, [pc, #8]!") TEST_UNSUPPORTED(__inst_arm(0xe7ff6008) " @ ldrb r6, [pc, r3]!") TEST_UNSUPPORTED("ldrt r0, [r1], #4") TEST_UNSUPPORTED("ldrt r1, [r2], r3") TEST_UNSUPPORTED("strt r2, [r3], #4") TEST_UNSUPPORTED("strt r3, [r4], r5") TEST_UNSUPPORTED("ldrbt r4, [r5], #4") TEST_UNSUPPORTED("ldrbt r5, [r6], r7") TEST_UNSUPPORTED("strbt r6, [r7], #4") TEST_UNSUPPORTED("strbt r7, [r8], r9") #if __LINUX_ARM_ARCH__ >= 7 TEST_GROUP("Parallel addition and subtraction, signed") TEST_UNSUPPORTED(__inst_arm(0xe6000010) "") /* Unallocated space */ TEST_UNSUPPORTED(__inst_arm(0xe60fffff) "") /* Unallocated space */ TEST_RR( "sadd16 r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "sadd16 r14, r",12,HH2,", r",10,HH1,"") TEST_UNSUPPORTED(__inst_arm(0xe61cff1a) " @ sadd16 pc, r12, r10") TEST_RR( "sasx r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "sasx r14, r",12,HH2,", r",10,HH1,"") TEST_UNSUPPORTED(__inst_arm(0xe61cff3a) " @ sasx pc, r12, r10") TEST_RR( "ssax r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "ssax r14, r",12,HH2,", r",10,HH1,"") TEST_UNSUPPORTED(__inst_arm(0xe61cff5a) " @ ssax pc, r12, r10") TEST_RR( "ssub16 r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "ssub16 r14, r",12,HH2,", r",10,HH1,"") TEST_UNSUPPORTED(__inst_arm(0xe61cff7a) " @ ssub16 pc, r12, r10") TEST_RR( "sadd8 r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "sadd8 r14, r",12,HH2,", r",10,HH1,"") TEST_UNSUPPORTED(__inst_arm(0xe61cff9a) " @ sadd8 pc, r12, r10") TEST_UNSUPPORTED(__inst_arm(0xe61000b0) "") /* Unallocated space */ TEST_UNSUPPORTED(__inst_arm(0xe61fffbf) "") /* Unallocated space */ TEST_UNSUPPORTED(__inst_arm(0xe61000d0) "") /* Unallocated space */ TEST_UNSUPPORTED(__inst_arm(0xe61fffdf) "") /* Unallocated space */ TEST_RR( "ssub8 r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "ssub8 r14, r",12,HH2,", r",10,HH1,"") TEST_UNSUPPORTED(__inst_arm(0xe61cfffa) " @ ssub8 pc, r12, r10") TEST_RR( "qadd16 r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "qadd16 r14, r",12,HH2,", r",10,HH1,"") TEST_UNSUPPORTED(__inst_arm(0xe62cff1a) " @ qadd16 pc, r12, r10") TEST_RR( "qasx r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "qasx r14, r",12,HH2,", r",10,HH1,"") TEST_UNSUPPORTED(__inst_arm(0xe62cff3a) " @ qasx pc, r12, r10") TEST_RR( "qsax r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "qsax r14, r",12,HH2,", r",10,HH1,"") TEST_UNSUPPORTED(__inst_arm(0xe62cff5a) " @ qsax pc, r12, r10") TEST_RR( "qsub16 r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "qsub16 r14, r",12,HH2,", r",10,HH1,"") TEST_UNSUPPORTED(__inst_arm(0xe62cff7a) " @ qsub16 pc, r12, r10") TEST_RR( "qadd8 r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "qadd8 r14, r",12,HH2,", r",10,HH1,"") TEST_UNSUPPORTED(__inst_arm(0xe62cff9a) " @ qadd8 pc, r12, r10") TEST_UNSUPPORTED(__inst_arm(0xe62000b0) "") /* Unallocated space */ TEST_UNSUPPORTED(__inst_arm(0xe62fffbf) "") /* Unallocated space */ TEST_UNSUPPORTED(__inst_arm(0xe62000d0) "") /* Unallocated space */ TEST_UNSUPPORTED(__inst_arm(0xe62fffdf) "") /* Unallocated space */ TEST_RR( "qsub8 r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "qsub8 r14, r",12,HH2,", r",10,HH1,"") TEST_UNSUPPORTED(__inst_arm(0xe62cfffa) " @ qsub8 pc, r12, r10") TEST_RR( "shadd16 r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "shadd16 r14, r",12,HH2,", r",10,HH1,"") TEST_UNSUPPORTED(__inst_arm(0xe63cff1a) " @ shadd16 pc, r12, r10") TEST_RR( "shasx r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "shasx r14, r",12,HH2,", r",10,HH1,"") TEST_UNSUPPORTED(__inst_arm(0xe63cff3a) " @ shasx pc, r12, r10") TEST_RR( "shsax r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "shsax r14, r",12,HH2,", r",10,HH1,"") TEST_UNSUPPORTED(__inst_arm(0xe63cff5a) " @ shsax pc, r12, r10") TEST_RR( "shsub16 r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "shsub16 r14, r",12,HH2,", r",10,HH1,"") TEST_UNSUPPORTED(__inst_arm(0xe63cff7a) " @ shsub16 pc, r12, r10") TEST_RR( "shadd8 r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "shadd8 r14, r",12,HH2,", r",10,HH1,"") TEST_UNSUPPORTED(__inst_arm(0xe63cff9a) " @ shadd8 pc, r12, r10") TEST_UNSUPPORTED(__inst_arm(0xe63000b0) "") /* Unallocated space */ TEST_UNSUPPORTED(__inst_arm(0xe63fffbf) "") /* Unallocated space */ TEST_UNSUPPORTED(__inst_arm(0xe63000d0) "") /* Unallocated space */ TEST_UNSUPPORTED(__inst_arm(0xe63fffdf) "") /* Unallocated space */ TEST_RR( "shsub8 r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "shsub8 r14, r",12,HH2,", r",10,HH1,"") TEST_UNSUPPORTED(__inst_arm(0xe63cfffa) " @ shsub8 pc, r12, r10") TEST_GROUP("Parallel addition and subtraction, unsigned") TEST_UNSUPPORTED(__inst_arm(0xe6400010) "") /* Unallocated space */ TEST_UNSUPPORTED(__inst_arm(0xe64fffff) "") /* Unallocated space */ TEST_RR( "uadd16 r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "uadd16 r14, r",12,HH2,", r",10,HH1,"") TEST_UNSUPPORTED(__inst_arm(0xe65cff1a) " @ uadd16 pc, r12, r10") TEST_RR( "uasx r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "uasx r14, r",12,HH2,", r",10,HH1,"") TEST_UNSUPPORTED(__inst_arm(0xe65cff3a) " @ uasx pc, r12, r10") TEST_RR( "usax r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "usax r14, r",12,HH2,", r",10,HH1,"") TEST_UNSUPPORTED(__inst_arm(0xe65cff5a) " @ usax pc, r12, r10") TEST_RR( "usub16 r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "usub16 r14, r",12,HH2,", r",10,HH1,"") TEST_UNSUPPORTED(__inst_arm(0xe65cff7a) " @ usub16 pc, r12, r10") TEST_RR( "uadd8 r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "uadd8 r14, r",12,HH2,", r",10,HH1,"") TEST_UNSUPPORTED(__inst_arm(0xe65cff9a) " @ uadd8 pc, r12, r10") TEST_UNSUPPORTED(__inst_arm(0xe65000b0) "") /* Unallocated space */ TEST_UNSUPPORTED(__inst_arm(0xe65fffbf) "") /* Unallocated space */ TEST_UNSUPPORTED(__inst_arm(0xe65000d0) "") /* Unallocated space */ TEST_UNSUPPORTED(__inst_arm(0xe65fffdf) "") /* Unallocated space */ TEST_RR( "usub8 r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "usub8 r14, r",12,HH2,", r",10,HH1,"") TEST_UNSUPPORTED(__inst_arm(0xe65cfffa) " @ usub8 pc, r12, r10") TEST_RR( "uqadd16 r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "uqadd16 r14, r",12,HH2,", r",10,HH1,"") TEST_UNSUPPORTED(__inst_arm(0xe66cff1a) " @ uqadd16 pc, r12, r10") TEST_RR( "uqasx r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "uqasx r14, r",12,HH2,", r",10,HH1,"") TEST_UNSUPPORTED(__inst_arm(0xe66cff3a) " @ uqasx pc, r12, r10") TEST_RR( "uqsax r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "uqsax r14, r",12,HH2,", r",10,HH1,"") TEST_UNSUPPORTED(__inst_arm(0xe66cff5a) " @ uqsax pc, r12, r10") TEST_RR( "uqsub16 r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "uqsub16 r14, r",12,HH2,", r",10,HH1,"") TEST_UNSUPPORTED(__inst_arm(0xe66cff7a) " @ uqsub16 pc, r12, r10") TEST_RR( "uqadd8 r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "uqadd8 r14, r",12,HH2,", r",10,HH1,"") TEST_UNSUPPORTED(__inst_arm(0xe66cff9a) " @ uqadd8 pc, r12, r10") TEST_UNSUPPORTED(__inst_arm(0xe66000b0) "") /* Unallocated space */ TEST_UNSUPPORTED(__inst_arm(0xe66fffbf) "") /* Unallocated space */ TEST_UNSUPPORTED(__inst_arm(0xe66000d0) "") /* Unallocated space */ TEST_UNSUPPORTED(__inst_arm(0xe66fffdf) "") /* Unallocated space */ TEST_RR( "uqsub8 r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "uqsub8 r14, r",12,HH2,", r",10,HH1,"") TEST_UNSUPPORTED(__inst_arm(0xe66cfffa) " @ uqsub8 pc, r12, r10") TEST_RR( "uhadd16 r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "uhadd16 r14, r",12,HH2,", r",10,HH1,"") TEST_UNSUPPORTED(__inst_arm(0xe67cff1a) " @ uhadd16 pc, r12, r10") TEST_RR( "uhasx r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "uhasx r14, r",12,HH2,", r",10,HH1,"") TEST_UNSUPPORTED(__inst_arm(0xe67cff3a) " @ uhasx pc, r12, r10") TEST_RR( "uhsax r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "uhsax r14, r",12,HH2,", r",10,HH1,"") TEST_UNSUPPORTED(__inst_arm(0xe67cff5a) " @ uhsax pc, r12, r10") TEST_RR( "uhsub16 r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "uhsub16 r14, r",12,HH2,", r",10,HH1,"") TEST_UNSUPPORTED(__inst_arm(0xe67cff7a) " @ uhsub16 pc, r12, r10") TEST_RR( "uhadd8 r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "uhadd8 r14, r",12,HH2,", r",10,HH1,"") TEST_UNSUPPORTED(__inst_arm(0xe67cff9a) " @ uhadd8 pc, r12, r10") TEST_UNSUPPORTED(__inst_arm(0xe67000b0) "") /* Unallocated space */ TEST_UNSUPPORTED(__inst_arm(0xe67fffbf) "") /* Unallocated space */ TEST_UNSUPPORTED(__inst_arm(0xe67000d0) "") /* Unallocated space */ TEST_UNSUPPORTED(__inst_arm(0xe67fffdf) "") /* Unallocated space */ TEST_RR( "uhsub8 r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "uhsub8 r14, r",12,HH2,", r",10,HH1,"") TEST_UNSUPPORTED(__inst_arm(0xe67cfffa) " @ uhsub8 pc, r12, r10") TEST_UNSUPPORTED(__inst_arm(0xe67feffa) " @ uhsub8 r14, pc, r10") TEST_UNSUPPORTED(__inst_arm(0xe67cefff) " @ uhsub8 r14, r12, pc") #endif /* __LINUX_ARM_ARCH__ >= 7 */ #if __LINUX_ARM_ARCH__ >= 6 TEST_GROUP("Packing, unpacking, saturation, and reversal") TEST_RR( "pkhbt r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "pkhbt r14,r",12, HH1,", r",10,HH2,", lsl #2") TEST_UNSUPPORTED(__inst_arm(0xe68cf11a) " @ pkhbt pc, r12, r10, lsl #2") TEST_RR( "pkhtb r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "pkhtb r14,r",12, HH1,", r",10,HH2,", asr #2") TEST_UNSUPPORTED(__inst_arm(0xe68cf15a) " @ pkhtb pc, r12, r10, asr #2") TEST_UNSUPPORTED(__inst_arm(0xe68fe15a) " @ pkhtb r14, pc, r10, asr #2") TEST_UNSUPPORTED(__inst_arm(0xe68ce15f) " @ pkhtb r14, r12, pc, asr #2") TEST_UNSUPPORTED(__inst_arm(0xe6900010) "") /* Unallocated space */ TEST_UNSUPPORTED(__inst_arm(0xe69fffdf) "") /* Unallocated space */ TEST_R( "ssat r0, #24, r",0, VAL1,"") TEST_R( "ssat r14, #24, r",12, VAL2,"") TEST_R( "ssat r0, #24, r",0, VAL1,", lsl #8") TEST_R( "ssat r14, #24, r",12, VAL2,", asr #8") TEST_UNSUPPORTED(__inst_arm(0xe6b7f01c) " @ ssat pc, #24, r12") TEST_R( "usat r0, #24, r",0, VAL1,"") TEST_R( "usat r14, #24, r",12, VAL2,"") TEST_R( "usat r0, #24, r",0, VAL1,", lsl #8") TEST_R( "usat r14, #24, r",12, VAL2,", asr #8") TEST_UNSUPPORTED(__inst_arm(0xe6f7f01c) " @ usat pc, #24, r12") TEST_RR( "sxtab16 r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "sxtab16 r14,r",12, HH2,", r",10,HH1,", ror #8") TEST_R( "sxtb16 r8, r",7, HH1,"") TEST_UNSUPPORTED(__inst_arm(0xe68cf47a) " @ sxtab16 pc,r12, r10, ror #8") TEST_RR( "sel r0, r",0, VAL1,", r",1, VAL2,"") TEST_RR( "sel r14, r",12,VAL1,", r",10, VAL2,"") TEST_UNSUPPORTED(__inst_arm(0xe68cffba) " @ sel pc, r12, r10") TEST_UNSUPPORTED(__inst_arm(0xe68fefba) " @ sel r14, pc, r10") TEST_UNSUPPORTED(__inst_arm(0xe68cefbf) " @ sel r14, r12, pc") TEST_R( "ssat16 r0, #12, r",0, HH1,"") TEST_R( "ssat16 r14, #12, r",12, HH2,"") TEST_UNSUPPORTED(__inst_arm(0xe6abff3c) " @ ssat16 pc, #12, r12") TEST_RR( "sxtab r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "sxtab r14,r",12, HH2,", r",10,HH1,", ror #8") TEST_R( "sxtb r8, r",7, HH1,"") TEST_UNSUPPORTED(__inst_arm(0xe6acf47a) " @ sxtab pc,r12, r10, ror #8") TEST_R( "rev r0, r",0, VAL1,"") TEST_R( "rev r14, r",12, VAL2,"") TEST_UNSUPPORTED(__inst_arm(0xe6bfff3c) " @ rev pc, r12") TEST_RR( "sxtah r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "sxtah r14,r",12, HH2,", r",10,HH1,", ror #8") TEST_R( "sxth r8, r",7, HH1,"") TEST_UNSUPPORTED(__inst_arm(0xe6bcf47a) " @ sxtah pc,r12, r10, ror #8") TEST_R( "rev16 r0, r",0, VAL1,"") TEST_R( "rev16 r14, r",12, VAL2,"") TEST_UNSUPPORTED(__inst_arm(0xe6bfffbc) " @ rev16 pc, r12") TEST_RR( "uxtab16 r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "uxtab16 r14,r",12, HH2,", r",10,HH1,", ror #8") TEST_R( "uxtb16 r8, r",7, HH1,"") TEST_UNSUPPORTED(__inst_arm(0xe6ccf47a) " @ uxtab16 pc,r12, r10, ror #8") TEST_R( "usat16 r0, #12, r",0, HH1,"") TEST_R( "usat16 r14, #12, r",12, HH2,"") TEST_UNSUPPORTED(__inst_arm(0xe6ecff3c) " @ usat16 pc, #12, r12") TEST_UNSUPPORTED(__inst_arm(0xe6ecef3f) " @ usat16 r14, #12, pc") TEST_RR( "uxtab r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "uxtab r14,r",12, HH2,", r",10,HH1,", ror #8") TEST_R( "uxtb r8, r",7, HH1,"") TEST_UNSUPPORTED(__inst_arm(0xe6ecf47a) " @ uxtab pc,r12, r10, ror #8") #if __LINUX_ARM_ARCH__ >= 7 TEST_R( "rbit r0, r",0, VAL1,"") TEST_R( "rbit r14, r",12, VAL2,"") TEST_UNSUPPORTED(__inst_arm(0xe6ffff3c) " @ rbit pc, r12") #endif TEST_RR( "uxtah r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "uxtah r14,r",12, HH2,", r",10,HH1,", ror #8") TEST_R( "uxth r8, r",7, HH1,"") TEST_UNSUPPORTED(__inst_arm(0xe6fff077) " @ uxth pc, r7") TEST_UNSUPPORTED(__inst_arm(0xe6ff807f) " @ uxth r8, pc") TEST_UNSUPPORTED(__inst_arm(0xe6fcf47a) " @ uxtah pc, r12, r10, ror #8") TEST_UNSUPPORTED(__inst_arm(0xe6fce47f) " @ uxtah r14, r12, pc, ror #8") TEST_R( "revsh r0, r",0, VAL1,"") TEST_R( "revsh r14, r",12, VAL2,"") TEST_UNSUPPORTED(__inst_arm(0xe6ffff3c) " @ revsh pc, r12") TEST_UNSUPPORTED(__inst_arm(0xe6ffef3f) " @ revsh r14, pc") TEST_UNSUPPORTED(__inst_arm(0xe6900070) "") /* Unallocated space */ TEST_UNSUPPORTED(__inst_arm(0xe69fff7f) "") /* Unallocated space */ TEST_UNSUPPORTED(__inst_arm(0xe6d00070) "") /* Unallocated space */ TEST_UNSUPPORTED(__inst_arm(0xe6dfff7f) "") /* Unallocated space */ #endif /* __LINUX_ARM_ARCH__ >= 6 */ #if __LINUX_ARM_ARCH__ >= 6 TEST_GROUP("Signed multiplies") TEST_RRR( "smlad r0, r",0, HH1,", r",1, HH2,", r",2, VAL1,"") TEST_RRR( "smlad r14, r",12,HH2,", r",10,HH1,", r",8, VAL2,"") TEST_UNSUPPORTED(__inst_arm(0xe70f8a1c) " @ smlad pc, r12, r10, r8") TEST_RRR( "smladx r0, r",0, HH1,", r",1, HH2,", r",2, VAL1,"") TEST_RRR( "smladx r14, r",12,HH2,", r",10,HH1,", r",8, VAL2,"") TEST_UNSUPPORTED(__inst_arm(0xe70f8a3c) " @ smladx pc, r12, r10, r8") TEST_RR( "smuad r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "smuad r14, r",12,HH2,", r",10,HH1,"") TEST_UNSUPPORTED(__inst_arm(0xe70ffa1c) " @ smuad pc, r12, r10") TEST_RR( "smuadx r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "smuadx r14, r",12,HH2,", r",10,HH1,"") TEST_UNSUPPORTED(__inst_arm(0xe70ffa3c) " @ smuadx pc, r12, r10") TEST_RRR( "smlsd r0, r",0, HH1,", r",1, HH2,", r",2, VAL1,"") TEST_RRR( "smlsd r14, r",12,HH2,", r",10,HH1,", r",8, VAL2,"") TEST_UNSUPPORTED(__inst_arm(0xe70f8a5c) " @ smlsd pc, r12, r10, r8") TEST_RRR( "smlsdx r0, r",0, HH1,", r",1, HH2,", r",2, VAL1,"") TEST_RRR( "smlsdx r14, r",12,HH2,", r",10,HH1,", r",8, VAL2,"") TEST_UNSUPPORTED(__inst_arm(0xe70f8a7c) " @ smlsdx pc, r12, r10, r8") TEST_RR( "smusd r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "smusd r14, r",12,HH2,", r",10,HH1,"") TEST_UNSUPPORTED(__inst_arm(0xe70ffa5c) " @ smusd pc, r12, r10") TEST_RR( "smusdx r0, r",0, HH1,", r",1, HH2,"") TEST_RR( "smusdx r14, r",12,HH2,", r",10,HH1,"") TEST_UNSUPPORTED(__inst_arm(0xe70ffa7c) " @ smusdx pc, r12, r10") TEST_RRRR( "smlald r",0, VAL1,", r",1, VAL2, ", r",0, HH1,", r",1, HH2) TEST_RRRR( "smlald r",11,VAL2,", r",10,VAL1, ", r",9, HH2,", r",8, HH1) TEST_UNSUPPORTED(__inst_arm(0xe74af819) " @ smlald pc, r10, r9, r8") TEST_UNSUPPORTED(__inst_arm(0xe74fb819) " @ smlald r11, pc, r9, r8") TEST_UNSUPPORTED(__inst_arm(0xe74ab81f) " @ smlald r11, r10, pc, r8") TEST_UNSUPPORTED(__inst_arm(0xe74abf19) " @ smlald r11, r10, r9, pc") TEST_RRRR( "smlaldx r",0, VAL1,", r",1, VAL2, ", r",0, HH1,", r",1, HH2) TEST_RRRR( "smlaldx r",11,VAL2,", r",10,VAL1, ", r",9, HH2,", r",8, HH1) TEST_UNSUPPORTED(__inst_arm(0xe74af839) " @ smlaldx pc, r10, r9, r8") TEST_UNSUPPORTED(__inst_arm(0xe74fb839) " @ smlaldx r11, pc, r9, r8") TEST_RRR( "smmla r0, r",0, VAL1,", r",1, VAL2,", r",2, VAL1,"") TEST_RRR( "smmla r14, r",12,VAL2,", r",10,VAL1,", r",8, VAL2,"") TEST_UNSUPPORTED(__inst_arm(0xe75f8a1c) " @ smmla pc, r12, r10, r8") TEST_RRR( "smmlar r0, r",0, VAL1,", r",1, VAL2,", r",2, VAL1,"") TEST_RRR( "smmlar r14, r",12,VAL2,", r",10,VAL1,", r",8, VAL2,"") TEST_UNSUPPORTED(__inst_arm(0xe75f8a3c) " @ smmlar pc, r12, r10, r8") TEST_RR( "smmul r0, r",0, VAL1,", r",1, VAL2,"") TEST_RR( "smmul r14, r",12,VAL2,", r",10,VAL1,"") TEST_UNSUPPORTED(__inst_arm(0xe75ffa1c) " @ smmul pc, r12, r10") TEST_RR( "smmulr r0, r",0, VAL1,", r",1, VAL2,"") TEST_RR( "smmulr r14, r",12,VAL2,", r",10,VAL1,"") TEST_UNSUPPORTED(__inst_arm(0xe75ffa3c) " @ smmulr pc, r12, r10") TEST_RRR( "smmls r0, r",0, VAL1,", r",1, VAL2,", r",2, VAL1,"") TEST_RRR( "smmls r14, r",12,VAL2,", r",10,VAL1,", r",8, VAL2,"") TEST_UNSUPPORTED(__inst_arm(0xe75f8adc) " @ smmls pc, r12, r10, r8") TEST_RRR( "smmlsr r0, r",0, VAL1,", r",1, VAL2,", r",2, VAL1,"") TEST_RRR( "smmlsr r14, r",12,VAL2,", r",10,VAL1,", r",8, VAL2,"") TEST_UNSUPPORTED(__inst_arm(0xe75f8afc) " @ smmlsr pc, r12, r10, r8") TEST_UNSUPPORTED(__inst_arm(0xe75e8aff) " @ smmlsr r14, pc, r10, r8") TEST_UNSUPPORTED(__inst_arm(0xe75e8ffc) " @ smmlsr r14, r12, pc, r8") TEST_UNSUPPORTED(__inst_arm(0xe75efafc) " @ smmlsr r14, r12, r10, pc") TEST_RR( "usad8 r0, r",0, VAL1,", r",1, VAL2,"") TEST_RR( "usad8 r14, r",12,VAL2,", r",10,VAL1,"") TEST_UNSUPPORTED(__inst_arm(0xe75ffa1c) " @ usad8 pc, r12, r10") TEST_UNSUPPORTED(__inst_arm(0xe75efa1f) " @ usad8 r14, pc, r10") TEST_UNSUPPORTED(__inst_arm(0xe75eff1c) " @ usad8 r14, r12, pc") TEST_RRR( "usada8 r0, r",0, VAL1,", r",1, VAL2,", r",2, VAL3,"") TEST_RRR( "usada8 r14, r",12,VAL2,", r",10,VAL1,", r",8, VAL3,"") TEST_UNSUPPORTED(__inst_arm(0xe78f8a1c) " @ usada8 pc, r12, r10, r8") TEST_UNSUPPORTED(__inst_arm(0xe78e8a1f) " @ usada8 r14, pc, r10, r8") TEST_UNSUPPORTED(__inst_arm(0xe78e8f1c) " @ usada8 r14, r12, pc, r8") #endif /* __LINUX_ARM_ARCH__ >= 6 */ #if __LINUX_ARM_ARCH__ >= 7 TEST_GROUP("Bit Field") TEST_R( "sbfx r0, r",0 , VAL1,", #0, #31") TEST_R( "sbfxeq r14, r",12, VAL2,", #8, #16") TEST_R( "sbfx r4, r",10, VAL1,", #16, #15") TEST_UNSUPPORTED(__inst_arm(0xe7aff45c) " @ sbfx pc, r12, #8, #16") TEST_R( "ubfx r0, r",0 , VAL1,", #0, #31") TEST_R( "ubfxcs r14, r",12, VAL2,", #8, #16") TEST_R( "ubfx r4, r",10, VAL1,", #16, #15") TEST_UNSUPPORTED(__inst_arm(0xe7eff45c) " @ ubfx pc, r12, #8, #16") TEST_UNSUPPORTED(__inst_arm(0xe7efc45f) " @ ubfx r12, pc, #8, #16") TEST_R( "bfc r",0, VAL1,", #4, #20") TEST_R( "bfcvs r",14,VAL2,", #4, #20") TEST_R( "bfc r",7, VAL1,", #0, #31") TEST_R( "bfc r",8, VAL2,", #0, #31") TEST_UNSUPPORTED(__inst_arm(0xe7def01f) " @ bfc pc, #0, #31"); TEST_RR( "bfi r",0, VAL1,", r",0 , VAL2,", #0, #31") TEST_RR( "bfipl r",12,VAL1,", r",14 , VAL2,", #4, #20") TEST_UNSUPPORTED(__inst_arm(0xe7d7f21e) " @ bfi pc, r14, #4, #20") TEST_UNSUPPORTED(__inst_arm(0x07f000f0) "") /* Permanently UNDEFINED */ TEST_UNSUPPORTED(__inst_arm(0x07ffffff) "") /* Permanently UNDEFINED */ #endif /* __LINUX_ARM_ARCH__ >= 6 */ TEST_GROUP("Branch, branch with link, and block data transfer") TEST_P( "stmda r",0, 16*4,", {r0}") TEST_P( "stmdaeq r",4, 16*4,", {r0-r15}") TEST_P( "stmdane r",8, 16*4,"!, {r8-r15}") TEST_P( "stmda r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}") TEST_P( "stmda r",13,0, "!, {pc}") TEST_P( "ldmda r",0, 16*4,", {r0}") TEST_BF_P("ldmdacs r",4, 15*4,", {r0-r15}") TEST_BF_P("ldmdacc r",7, 15*4,"!, {r8-r15}") TEST_P( "ldmda r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}") TEST_BF_P("ldmda r",14,15*4,"!, {pc}") TEST_P( "stmia r",0, 16*4,", {r0}") TEST_P( "stmiami r",4, 16*4,", {r0-r15}") TEST_P( "stmiapl r",8, 16*4,"!, {r8-r15}") TEST_P( "stmia r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}") TEST_P( "stmia r",14,0, "!, {pc}") TEST_P( "ldmia r",0, 16*4,", {r0}") TEST_BF_P("ldmiavs r",4, 0, ", {r0-r15}") TEST_BF_P("ldmiavc r",7, 8*4, "!, {r8-r15}") TEST_P( "ldmia r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}") TEST_BF_P("ldmia r",14,15*4,"!, {pc}") TEST_P( "stmdb r",0, 16*4,", {r0}") TEST_P( "stmdbhi r",4, 16*4,", {r0-r15}") TEST_P( "stmdbls r",8, 16*4,"!, {r8-r15}") TEST_P( "stmdb r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}") TEST_P( "stmdb r",13,4, "!, {pc}") TEST_P( "ldmdb r",0, 16*4,", {r0}") TEST_BF_P("ldmdbge r",4, 16*4,", {r0-r15}") TEST_BF_P("ldmdblt r",7, 16*4,"!, {r8-r15}") TEST_P( "ldmdb r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}") TEST_BF_P("ldmdb r",14,16*4,"!, {pc}") TEST_P( "stmib r",0, 16*4,", {r0}") TEST_P( "stmibgt r",4, 16*4,", {r0-r15}") TEST_P( "stmible r",8, 16*4,"!, {r8-r15}") TEST_P( "stmib r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}") TEST_P( "stmib r",13,-4, "!, {pc}") TEST_P( "ldmib r",0, 16*4,", {r0}") TEST_BF_P("ldmibeq r",4, -4,", {r0-r15}") TEST_BF_P("ldmibne r",7, 7*4,"!, {r8-r15}") TEST_P( "ldmib r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}") TEST_BF_P("ldmib r",14,14*4,"!, {pc}") TEST_P( "stmdb r",13,16*4,"!, {r3-r12,lr}") TEST_P( "stmdbeq r",13,16*4,"!, {r3-r12}") TEST_P( "stmdbne r",2, 16*4,", {r3-r12,lr}") TEST_P( "stmdb r",13,16*4,"!, {r2-r12,lr}") TEST_P( "stmdb r",0, 16*4,", {r0-r12}") TEST_P( "stmdb r",0, 16*4,", {r0-r12,lr}") TEST_BF_P("ldmia r",13,5*4, "!, {r3-r12,pc}") TEST_P( "ldmiacc r",13,5*4, "!, {r3-r12}") TEST_BF_P("ldmiacs r",2, 5*4, "!, {r3-r12,pc}") TEST_BF_P("ldmia r",13,4*4, "!, {r2-r12,pc}") TEST_P( "ldmia r",0, 16*4,", {r0-r12}") TEST_P( "ldmia r",0, 16*4,", {r0-r12,lr}") #ifdef CONFIG_THUMB2_KERNEL TEST_ARM_TO_THUMB_INTERWORK_P("ldmplia r",0,15*4,", {pc}") TEST_ARM_TO_THUMB_INTERWORK_P("ldmmiia r",13,0,", {r0-r15}") #endif TEST_BF("b 2f") TEST_BF("bl 2f") TEST_BB("b 2b") TEST_BB("bl 2b") TEST_BF("beq 2f") TEST_BF("bleq 2f") TEST_BB("bne 2b") TEST_BB("blne 2b") TEST_BF("bgt 2f") TEST_BF("blgt 2f") TEST_BB("blt 2b") TEST_BB("bllt 2b") TEST_GROUP("Supervisor Call, and coprocessor instructions") /* * We can't really test these by executing them, so all * we can do is check that probes are, or are not allowed. * At the moment none are allowed... */ #define TEST_COPROCESSOR(code) TEST_UNSUPPORTED(code) #define COPROCESSOR_INSTRUCTIONS_ST_LD(two,cc) \ TEST_COPROCESSOR("stc"two" p0, cr0, [r13, #4]") \ TEST_COPROCESSOR("stc"two" p0, cr0, [r13, #-4]") \ TEST_COPROCESSOR("stc"two" p0, cr0, [r13, #4]!") \ TEST_COPROCESSOR("stc"two" p0, cr0, [r13, #-4]!") \ TEST_COPROCESSOR("stc"two" p0, cr0, [r13], #4") \ TEST_COPROCESSOR("stc"two" p0, cr0, [r13], #-4") \ TEST_COPROCESSOR("stc"two" p0, cr0, [r13], {1}") \ TEST_COPROCESSOR("stc"two"l p0, cr0, [r13, #4]") \ TEST_COPROCESSOR("stc"two"l p0, cr0, [r13, #-4]") \ TEST_COPROCESSOR("stc"two"l p0, cr0, [r13, #4]!") \ TEST_COPROCESSOR("stc"two"l p0, cr0, [r13, #-4]!") \ TEST_COPROCESSOR("stc"two"l p0, cr0, [r13], #4") \ TEST_COPROCESSOR("stc"two"l p0, cr0, [r13], #-4") \ TEST_COPROCESSOR("stc"two"l p0, cr0, [r13], {1}") \ TEST_COPROCESSOR("ldc"two" p0, cr0, [r13, #4]") \ TEST_COPROCESSOR("ldc"two" p0, cr0, [r13, #-4]") \ TEST_COPROCESSOR("ldc"two" p0, cr0, [r13, #4]!") \ TEST_COPROCESSOR("ldc"two" p0, cr0, [r13, #-4]!") \ TEST_COPROCESSOR("ldc"two" p0, cr0, [r13], #4") \ TEST_COPROCESSOR("ldc"two" p0, cr0, [r13], #-4") \ TEST_COPROCESSOR("ldc"two" p0, cr0, [r13], {1}") \ TEST_COPROCESSOR("ldc"two"l p0, cr0, [r13, #4]") \ TEST_COPROCESSOR("ldc"two"l p0, cr0, [r13, #-4]") \ TEST_COPROCESSOR("ldc"two"l p0, cr0, [r13, #4]!") \ TEST_COPROCESSOR("ldc"two"l p0, cr0, [r13, #-4]!") \ TEST_COPROCESSOR("ldc"two"l p0, cr0, [r13], #4") \ TEST_COPROCESSOR("ldc"two"l p0, cr0, [r13], #-4") \ TEST_COPROCESSOR("ldc"two"l p0, cr0, [r13], {1}") \ \ TEST_COPROCESSOR( "stc"two" p0, cr0, [r15, #4]") \ TEST_COPROCESSOR( "stc"two" p0, cr0, [r15, #-4]") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##daf0001) " @ stc"two" 0, cr0, [r15, #4]!") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##d2f0001) " @ stc"two" 0, cr0, [r15, #-4]!") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##caf0001) " @ stc"two" 0, cr0, [r15], #4") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##c2f0001) " @ stc"two" 0, cr0, [r15], #-4") \ TEST_COPROCESSOR( "stc"two" p0, cr0, [r15], {1}") \ TEST_COPROCESSOR( "stc"two"l p0, cr0, [r15, #4]") \ TEST_COPROCESSOR( "stc"two"l p0, cr0, [r15, #-4]") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##def0001) " @ stc"two"l 0, cr0, [r15, #4]!") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##d6f0001) " @ stc"two"l 0, cr0, [r15, #-4]!") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##cef0001) " @ stc"two"l 0, cr0, [r15], #4") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##c6f0001) " @ stc"two"l 0, cr0, [r15], #-4") \ TEST_COPROCESSOR( "stc"two"l p0, cr0, [r15], {1}") \ TEST_COPROCESSOR( "ldc"two" p0, cr0, [r15, #4]") \ TEST_COPROCESSOR( "ldc"two" p0, cr0, [r15, #-4]") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##dbf0001) " @ ldc"two" 0, cr0, [r15, #4]!") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##d3f0001) " @ ldc"two" 0, cr0, [r15, #-4]!") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##cbf0001) " @ ldc"two" 0, cr0, [r15], #4") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##c3f0001) " @ ldc"two" 0, cr0, [r15], #-4") \ TEST_COPROCESSOR( "ldc"two" p0, cr0, [r15], {1}") \ TEST_COPROCESSOR( "ldc"two"l p0, cr0, [r15, #4]") \ TEST_COPROCESSOR( "ldc"two"l p0, cr0, [r15, #-4]") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##dff0001) " @ ldc"two"l 0, cr0, [r15, #4]!") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##d7f0001) " @ ldc"two"l 0, cr0, [r15, #-4]!") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##cff0001) " @ ldc"two"l 0, cr0, [r15], #4") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##c7f0001) " @ ldc"two"l 0, cr0, [r15], #-4") \ TEST_COPROCESSOR( "ldc"two"l p0, cr0, [r15], {1}") #define COPROCESSOR_INSTRUCTIONS_MC_MR(two,cc) \ \ TEST_COPROCESSOR( "mcrr"two" p0, 15, r0, r14, cr0") \ TEST_COPROCESSOR( "mcrr"two" p15, 0, r14, r0, cr15") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##c4f00f0) " @ mcrr"two" 0, 15, r0, r15, cr0") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##c40ff0f) " @ mcrr"two" 15, 0, r15, r0, cr15") \ TEST_COPROCESSOR( "mrrc"two" p0, 15, r0, r14, cr0") \ TEST_COPROCESSOR( "mrrc"two" p15, 0, r14, r0, cr15") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##c5f00f0) " @ mrrc"two" 0, 15, r0, r15, cr0") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##c50ff0f) " @ mrrc"two" 15, 0, r15, r0, cr15") \ TEST_COPROCESSOR( "cdp"two" p15, 15, cr15, cr15, cr15, 7") \ TEST_COPROCESSOR( "cdp"two" p0, 0, cr0, cr0, cr0, 0") \ TEST_COPROCESSOR( "mcr"two" p15, 7, r15, cr15, cr15, 7") \ TEST_COPROCESSOR( "mcr"two" p0, 0, r0, cr0, cr0, 0") \ TEST_COPROCESSOR( "mrc"two" p15, 7, r14, cr15, cr15, 7") \ TEST_COPROCESSOR( "mrc"two" p0, 0, r0, cr0, cr0, 0") COPROCESSOR_INSTRUCTIONS_ST_LD("",e) #if __LINUX_ARM_ARCH__ >= 5 COPROCESSOR_INSTRUCTIONS_MC_MR("",e) #endif TEST_UNSUPPORTED("svc 0") TEST_UNSUPPORTED("svc 0xffffff") TEST_UNSUPPORTED("svc 0") TEST_GROUP("Unconditional instruction") #if __LINUX_ARM_ARCH__ >= 6 TEST_UNSUPPORTED("srsda sp, 0x13") TEST_UNSUPPORTED("srsdb sp, 0x13") TEST_UNSUPPORTED("srsia sp, 0x13") TEST_UNSUPPORTED("srsib sp, 0x13") TEST_UNSUPPORTED("srsda sp!, 0x13") TEST_UNSUPPORTED("srsdb sp!, 0x13") TEST_UNSUPPORTED("srsia sp!, 0x13") TEST_UNSUPPORTED("srsib sp!, 0x13") TEST_UNSUPPORTED("rfeda sp") TEST_UNSUPPORTED("rfedb sp") TEST_UNSUPPORTED("rfeia sp") TEST_UNSUPPORTED("rfeib sp") TEST_UNSUPPORTED("rfeda sp!") TEST_UNSUPPORTED("rfedb sp!") TEST_UNSUPPORTED("rfeia sp!") TEST_UNSUPPORTED("rfeib sp!") TEST_UNSUPPORTED(__inst_arm(0xf81d0a00) " @ rfeda pc") TEST_UNSUPPORTED(__inst_arm(0xf91d0a00) " @ rfedb pc") TEST_UNSUPPORTED(__inst_arm(0xf89d0a00) " @ rfeia pc") TEST_UNSUPPORTED(__inst_arm(0xf99d0a00) " @ rfeib pc") TEST_UNSUPPORTED(__inst_arm(0xf83d0a00) " @ rfeda pc!") TEST_UNSUPPORTED(__inst_arm(0xf93d0a00) " @ rfedb pc!") TEST_UNSUPPORTED(__inst_arm(0xf8bd0a00) " @ rfeia pc!") TEST_UNSUPPORTED(__inst_arm(0xf9bd0a00) " @ rfeib pc!") #endif /* __LINUX_ARM_ARCH__ >= 6 */ #if __LINUX_ARM_ARCH__ >= 6 TEST_X( "blx __dummy_thumb_subroutine_even", ".thumb \n\t" ".space 4 \n\t" ".type __dummy_thumb_subroutine_even, %%function \n\t" "__dummy_thumb_subroutine_even: \n\t" "mov r0, pc \n\t" "bx lr \n\t" ".arm \n\t" ) TEST( "blx __dummy_thumb_subroutine_even") TEST_X( "blx __dummy_thumb_subroutine_odd", ".thumb \n\t" ".space 2 \n\t" ".type __dummy_thumb_subroutine_odd, %%function \n\t" "__dummy_thumb_subroutine_odd: \n\t" "mov r0, pc \n\t" "bx lr \n\t" ".arm \n\t" ) TEST( "blx __dummy_thumb_subroutine_odd") #endif /* __LINUX_ARM_ARCH__ >= 6 */ #if __LINUX_ARM_ARCH__ >= 5 COPROCESSOR_INSTRUCTIONS_ST_LD("2",f) #endif #if __LINUX_ARM_ARCH__ >= 6 COPROCESSOR_INSTRUCTIONS_MC_MR("2",f) #endif TEST_GROUP("Miscellaneous instructions, memory hints, and Advanced SIMD instructions") #if __LINUX_ARM_ARCH__ >= 6 TEST_UNSUPPORTED("cps 0x13") TEST_UNSUPPORTED("cpsie i") TEST_UNSUPPORTED("cpsid i") TEST_UNSUPPORTED("cpsie i,0x13") TEST_UNSUPPORTED("cpsid i,0x13") TEST_UNSUPPORTED("setend le") TEST_UNSUPPORTED("setend be") #endif #if __LINUX_ARM_ARCH__ >= 7 TEST_P("pli [r",0,0b,", #16]") TEST( "pli [pc, #0]") TEST_RR("pli [r",12,0b,", r",0, 16,"]") TEST_RR("pli [r",0, 0b,", -r",12,16,", lsl #4]") #endif #if __LINUX_ARM_ARCH__ >= 5 TEST_P("pld [r",0,32,", #-16]") TEST( "pld [pc, #0]") TEST_PR("pld [r",7, 24, ", r",0, 16,"]") TEST_PR("pld [r",8, 24, ", -r",12,16,", lsl #4]") #endif #if __LINUX_ARM_ARCH__ >= 7 TEST_SUPPORTED( __inst_arm(0xf590f000) " @ pldw [r0, #0]") TEST_SUPPORTED( __inst_arm(0xf797f000) " @ pldw [r7, r0]") TEST_SUPPORTED( __inst_arm(0xf798f18c) " @ pldw [r8, r12, lsl #3]"); #endif #if __LINUX_ARM_ARCH__ >= 7 TEST_UNSUPPORTED("clrex") TEST_UNSUPPORTED("dsb") TEST_UNSUPPORTED("dmb") TEST_UNSUPPORTED("isb") #endif verbose("\n"); }
linux-master
arch/arm/probes/kprobes/test-arm.c
// SPDX-License-Identifier: GPL-2.0-only /* * arch/arm/probes/kprobes/checkers-arm.c * * Copyright (C) 2014 Huawei Inc. */ #include <linux/kernel.h> #include "../decode.h" #include "../decode-arm.h" #include "checkers.h" static enum probes_insn __kprobes arm_check_stack(probes_opcode_t insn, struct arch_probes_insn *asi, const struct decode_header *h) { /* * PROBES_LDRSTRD, PROBES_LDMSTM, PROBES_STORE, * PROBES_STORE_EXTRA may get here. Simply mark all normal * insns as STACK_USE_NONE. */ static const union decode_item table[] = { /* * 'STR{,D,B,H}, Rt, [Rn, Rm]' should be marked as UNKNOWN * if Rn or Rm is SP. * x * STR (register) cccc 011x x0x0 xxxx xxxx xxxx xxxx xxxx * STRB (register) cccc 011x x1x0 xxxx xxxx xxxx xxxx xxxx */ DECODE_OR (0x0e10000f, 0x0600000d), DECODE_OR (0x0e1f0000, 0x060d0000), /* * x * STRD (register) cccc 000x x0x0 xxxx xxxx xxxx 1111 xxxx * STRH (register) cccc 000x x0x0 xxxx xxxx xxxx 1011 xxxx */ DECODE_OR (0x0e5000bf, 0x000000bd), DECODE_CUSTOM (0x0e5f00b0, 0x000d00b0, STACK_USE_UNKNOWN), /* * For PROBES_LDMSTM, only stmdx sp, [...] need to examine * * Bit B/A (bit 24) encodes arithmetic operation order. 1 means * before, 0 means after. * Bit I/D (bit 23) encodes arithmetic operation. 1 means * increment, 0 means decrement. * * So: * B I * / / * A D | Rn | * STMDX SP, [...] cccc 100x 00x0 xxxx xxxx xxxx xxxx xxxx */ DECODE_CUSTOM (0x0edf0000, 0x080d0000, STACK_USE_STMDX), /* P U W | Rn | Rt | imm12 |*/ /* STR (immediate) cccc 010x x0x0 1101 xxxx xxxx xxxx xxxx */ /* STRB (immediate) cccc 010x x1x0 1101 xxxx xxxx xxxx xxxx */ /* P U W | Rn | Rt |imm4| |imm4|*/ /* STRD (immediate) cccc 000x x1x0 1101 xxxx xxxx 1111 xxxx */ /* STRH (immediate) cccc 000x x1x0 1101 xxxx xxxx 1011 xxxx */ /* * index = (P == '1'); add = (U == '1'). * Above insns with: * index == 0 (str{,d,h} rx, [sp], #+/-imm) or * add == 1 (str{,d,h} rx, [sp, #+<imm>]) * should be STACK_USE_NONE. * Only str{,b,d,h} rx,[sp,#-n] (P == 1 and U == 0) are * required to be examined. */ /* STR{,B} Rt,[SP,#-n] cccc 0101 0xx0 1101 xxxx xxxx xxxx xxxx */ DECODE_CUSTOM (0x0f9f0000, 0x050d0000, STACK_USE_FIXED_XXX), /* STR{D,H} Rt,[SP,#-n] cccc 0001 01x0 1101 xxxx xxxx 1x11 xxxx */ DECODE_CUSTOM (0x0fdf00b0, 0x014d00b0, STACK_USE_FIXED_X0X), /* fall through */ DECODE_CUSTOM (0, 0, STACK_USE_NONE), DECODE_END }; return probes_decode_insn(insn, asi, table, false, false, stack_check_actions, NULL); } const struct decode_checker arm_stack_checker[NUM_PROBES_ARM_ACTIONS] = { [PROBES_LDRSTRD] = {.checker = arm_check_stack}, [PROBES_STORE_EXTRA] = {.checker = arm_check_stack}, [PROBES_STORE] = {.checker = arm_check_stack}, [PROBES_LDMSTM] = {.checker = arm_check_stack}, }; static enum probes_insn __kprobes arm_check_regs_nouse(probes_opcode_t insn, struct arch_probes_insn *asi, const struct decode_header *h) { asi->register_usage_flags = 0; return INSN_GOOD; } static enum probes_insn arm_check_regs_normal(probes_opcode_t insn, struct arch_probes_insn *asi, const struct decode_header *h) { u32 regs = h->type_regs.bits >> DECODE_TYPE_BITS; int i; asi->register_usage_flags = 0; for (i = 0; i < 5; regs >>= 4, insn >>= 4, i++) if (regs & 0xf) asi->register_usage_flags |= 1 << (insn & 0xf); return INSN_GOOD; } static enum probes_insn arm_check_regs_ldmstm(probes_opcode_t insn, struct arch_probes_insn *asi, const struct decode_header *h) { unsigned int reglist = insn & 0xffff; unsigned int rn = (insn >> 16) & 0xf; asi->register_usage_flags = reglist | (1 << rn); return INSN_GOOD; } static enum probes_insn arm_check_regs_mov_ip_sp(probes_opcode_t insn, struct arch_probes_insn *asi, const struct decode_header *h) { /* Instruction is 'mov ip, sp' i.e. 'mov r12, r13' */ asi->register_usage_flags = (1 << 12) | (1<< 13); return INSN_GOOD; } /* * | Rn |Rt/d| | Rm | * LDRD (register) cccc 000x x0x0 xxxx xxxx xxxx 1101 xxxx * STRD (register) cccc 000x x0x0 xxxx xxxx xxxx 1111 xxxx * | Rn |Rt/d| |imm4L| * LDRD (immediate) cccc 000x x1x0 xxxx xxxx xxxx 1101 xxxx * STRD (immediate) cccc 000x x1x0 xxxx xxxx xxxx 1111 xxxx * * Such instructions access Rt/d and its next register, so different * from others, a specific checker is required to handle this extra * implicit register usage. */ static enum probes_insn arm_check_regs_ldrdstrd(probes_opcode_t insn, struct arch_probes_insn *asi, const struct decode_header *h) { int rdt = (insn >> 12) & 0xf; arm_check_regs_normal(insn, asi, h); asi->register_usage_flags |= 1 << (rdt + 1); return INSN_GOOD; } const struct decode_checker arm_regs_checker[NUM_PROBES_ARM_ACTIONS] = { [PROBES_MRS] = {.checker = arm_check_regs_normal}, [PROBES_SATURATING_ARITHMETIC] = {.checker = arm_check_regs_normal}, [PROBES_MUL1] = {.checker = arm_check_regs_normal}, [PROBES_MUL2] = {.checker = arm_check_regs_normal}, [PROBES_MUL_ADD_LONG] = {.checker = arm_check_regs_normal}, [PROBES_MUL_ADD] = {.checker = arm_check_regs_normal}, [PROBES_LOAD] = {.checker = arm_check_regs_normal}, [PROBES_LOAD_EXTRA] = {.checker = arm_check_regs_normal}, [PROBES_STORE] = {.checker = arm_check_regs_normal}, [PROBES_STORE_EXTRA] = {.checker = arm_check_regs_normal}, [PROBES_DATA_PROCESSING_REG] = {.checker = arm_check_regs_normal}, [PROBES_DATA_PROCESSING_IMM] = {.checker = arm_check_regs_normal}, [PROBES_SEV] = {.checker = arm_check_regs_nouse}, [PROBES_WFE] = {.checker = arm_check_regs_nouse}, [PROBES_SATURATE] = {.checker = arm_check_regs_normal}, [PROBES_REV] = {.checker = arm_check_regs_normal}, [PROBES_MMI] = {.checker = arm_check_regs_normal}, [PROBES_PACK] = {.checker = arm_check_regs_normal}, [PROBES_EXTEND] = {.checker = arm_check_regs_normal}, [PROBES_EXTEND_ADD] = {.checker = arm_check_regs_normal}, [PROBES_BITFIELD] = {.checker = arm_check_regs_normal}, [PROBES_LDMSTM] = {.checker = arm_check_regs_ldmstm}, [PROBES_MOV_IP_SP] = {.checker = arm_check_regs_mov_ip_sp}, [PROBES_LDRSTRD] = {.checker = arm_check_regs_ldrdstrd}, };
linux-master
arch/arm/probes/kprobes/checkers-arm.c
// SPDX-License-Identifier: GPL-2.0-only /* * arch/arm/probes/kprobes/checkers-common.c * * Copyright (C) 2014 Huawei Inc. */ #include <linux/kernel.h> #include "../decode.h" #include "../decode-arm.h" #include "checkers.h" enum probes_insn checker_stack_use_none(probes_opcode_t insn, struct arch_probes_insn *asi, const struct decode_header *h) { asi->stack_space = 0; return INSN_GOOD_NO_SLOT; } enum probes_insn checker_stack_use_unknown(probes_opcode_t insn, struct arch_probes_insn *asi, const struct decode_header *h) { asi->stack_space = -1; return INSN_GOOD_NO_SLOT; } #ifdef CONFIG_THUMB2_KERNEL enum probes_insn checker_stack_use_imm_0xx(probes_opcode_t insn, struct arch_probes_insn *asi, const struct decode_header *h) { int imm = insn & 0xff; asi->stack_space = imm; return INSN_GOOD_NO_SLOT; } /* * Different from other insn uses imm8, the real addressing offset of * STRD in T32 encoding should be imm8 * 4. See ARMARM description. */ static enum probes_insn checker_stack_use_t32strd(probes_opcode_t insn, struct arch_probes_insn *asi, const struct decode_header *h) { int imm = insn & 0xff; asi->stack_space = imm << 2; return INSN_GOOD_NO_SLOT; } #else enum probes_insn checker_stack_use_imm_x0x(probes_opcode_t insn, struct arch_probes_insn *asi, const struct decode_header *h) { int imm = ((insn & 0xf00) >> 4) + (insn & 0xf); asi->stack_space = imm; return INSN_GOOD_NO_SLOT; } #endif enum probes_insn checker_stack_use_imm_xxx(probes_opcode_t insn, struct arch_probes_insn *asi, const struct decode_header *h) { int imm = insn & 0xfff; asi->stack_space = imm; return INSN_GOOD_NO_SLOT; } enum probes_insn checker_stack_use_stmdx(probes_opcode_t insn, struct arch_probes_insn *asi, const struct decode_header *h) { unsigned int reglist = insn & 0xffff; int pbit = insn & (1 << 24); asi->stack_space = (hweight32(reglist) - (!pbit ? 1 : 0)) * 4; return INSN_GOOD_NO_SLOT; } const union decode_action stack_check_actions[] = { [STACK_USE_NONE] = {.decoder = checker_stack_use_none}, [STACK_USE_UNKNOWN] = {.decoder = checker_stack_use_unknown}, #ifdef CONFIG_THUMB2_KERNEL [STACK_USE_FIXED_0XX] = {.decoder = checker_stack_use_imm_0xx}, [STACK_USE_T32STRD] = {.decoder = checker_stack_use_t32strd}, #else [STACK_USE_FIXED_X0X] = {.decoder = checker_stack_use_imm_x0x}, #endif [STACK_USE_FIXED_XXX] = {.decoder = checker_stack_use_imm_xxx}, [STACK_USE_STMDX] = {.decoder = checker_stack_use_stmdx}, };
linux-master
arch/arm/probes/kprobes/checkers-common.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Kernel Probes Jump Optimization (Optprobes) * * Copyright (C) IBM Corporation, 2002, 2004 * Copyright (C) Hitachi Ltd., 2012 * Copyright (C) Huawei Inc., 2014 */ #include <linux/kprobes.h> #include <linux/jump_label.h> #include <asm/kprobes.h> #include <asm/cacheflush.h> /* for arm_gen_branch */ #include <asm/insn.h> /* for patch_text */ #include <asm/patch.h> #include "core.h" /* * See register_usage_flags. If the probed instruction doesn't use PC, * we can copy it into template and have it executed directly without * simulation or emulation. */ #define ARM_REG_PC 15 #define can_kprobe_direct_exec(m) (!test_bit(ARM_REG_PC, &(m))) /* * NOTE: the first sub and add instruction will be modified according * to the stack cost of the instruction. */ asm ( ".global optprobe_template_entry\n" "optprobe_template_entry:\n" ".global optprobe_template_sub_sp\n" "optprobe_template_sub_sp:" " sub sp, sp, #0xff\n" " stmia sp, {r0 - r14} \n" ".global optprobe_template_add_sp\n" "optprobe_template_add_sp:" " add r3, sp, #0xff\n" " str r3, [sp, #52]\n" " mrs r4, cpsr\n" " str r4, [sp, #64]\n" " mov r1, sp\n" " ldr r0, 1f\n" " ldr r2, 2f\n" /* * AEABI requires an 8-bytes alignment stack. If * SP % 8 != 0 (SP % 4 == 0 should be ensured), * alloc more bytes here. */ " and r4, sp, #4\n" " sub sp, sp, r4\n" #if __LINUX_ARM_ARCH__ >= 5 " blx r2\n" #else " mov lr, pc\n" " mov pc, r2\n" #endif " add sp, sp, r4\n" " ldr r1, [sp, #64]\n" " tst r1, #"__stringify(PSR_T_BIT)"\n" " ldrne r2, [sp, #60]\n" " orrne r2, #1\n" " strne r2, [sp, #60] @ set bit0 of PC for thumb\n" " msr cpsr_cxsf, r1\n" ".global optprobe_template_restore_begin\n" "optprobe_template_restore_begin:\n" " ldmia sp, {r0 - r15}\n" ".global optprobe_template_restore_orig_insn\n" "optprobe_template_restore_orig_insn:\n" " nop\n" ".global optprobe_template_restore_end\n" "optprobe_template_restore_end:\n" " nop\n" ".global optprobe_template_val\n" "optprobe_template_val:\n" "1: .long 0\n" ".global optprobe_template_call\n" "optprobe_template_call:\n" "2: .long 0\n" ".global optprobe_template_end\n" "optprobe_template_end:\n"); #define TMPL_VAL_IDX \ ((unsigned long *)optprobe_template_val - (unsigned long *)optprobe_template_entry) #define TMPL_CALL_IDX \ ((unsigned long *)optprobe_template_call - (unsigned long *)optprobe_template_entry) #define TMPL_END_IDX \ ((unsigned long *)optprobe_template_end - (unsigned long *)optprobe_template_entry) #define TMPL_ADD_SP \ ((unsigned long *)optprobe_template_add_sp - (unsigned long *)optprobe_template_entry) #define TMPL_SUB_SP \ ((unsigned long *)optprobe_template_sub_sp - (unsigned long *)optprobe_template_entry) #define TMPL_RESTORE_BEGIN \ ((unsigned long *)optprobe_template_restore_begin - (unsigned long *)optprobe_template_entry) #define TMPL_RESTORE_ORIGN_INSN \ ((unsigned long *)optprobe_template_restore_orig_insn - (unsigned long *)optprobe_template_entry) #define TMPL_RESTORE_END \ ((unsigned long *)optprobe_template_restore_end - (unsigned long *)optprobe_template_entry) /* * ARM can always optimize an instruction when using ARM ISA, except * instructions like 'str r0, [sp, r1]' which store to stack and unable * to determine stack space consumption statically. */ int arch_prepared_optinsn(struct arch_optimized_insn *optinsn) { return optinsn->insn != NULL; } /* * In ARM ISA, kprobe opt always replace one instruction (4 bytes * aligned and 4 bytes long). It is impossible to encounter another * kprobe in the address range. So always return 0. */ int arch_check_optimized_kprobe(struct optimized_kprobe *op) { return 0; } /* Caller must ensure addr & 3 == 0 */ static int can_optimize(struct kprobe *kp) { if (kp->ainsn.stack_space < 0) return 0; /* * 255 is the biggest imm can be used in 'sub r0, r0, #<imm>'. * Number larger than 255 needs special encoding. */ if (kp->ainsn.stack_space > 255 - sizeof(struct pt_regs)) return 0; return 1; } /* Free optimized instruction slot */ static void __arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty) { if (op->optinsn.insn) { free_optinsn_slot(op->optinsn.insn, dirty); op->optinsn.insn = NULL; } } static void optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs) { unsigned long flags; struct kprobe *p = &op->kp; struct kprobe_ctlblk *kcb; /* Save skipped registers */ regs->ARM_pc = (unsigned long)op->kp.addr; regs->ARM_ORIG_r0 = ~0UL; local_irq_save(flags); kcb = get_kprobe_ctlblk(); if (kprobe_running()) { kprobes_inc_nmissed_count(&op->kp); } else { __this_cpu_write(current_kprobe, &op->kp); kcb->kprobe_status = KPROBE_HIT_ACTIVE; opt_pre_handler(&op->kp, regs); __this_cpu_write(current_kprobe, NULL); } /* * We singlestep the replaced instruction only when it can't be * executed directly during restore. */ if (!p->ainsn.kprobe_direct_exec) op->kp.ainsn.insn_singlestep(p->opcode, &p->ainsn, regs); local_irq_restore(flags); } NOKPROBE_SYMBOL(optimized_callback) int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *orig) { kprobe_opcode_t *code; unsigned long rel_chk; unsigned long val; unsigned long stack_protect = sizeof(struct pt_regs); if (!can_optimize(orig)) return -EILSEQ; code = get_optinsn_slot(); if (!code) return -ENOMEM; /* * Verify if the address gap is in 32MiB range, because this uses * a relative jump. * * kprobe opt use a 'b' instruction to branch to optinsn.insn. * According to ARM manual, branch instruction is: * * 31 28 27 24 23 0 * +------+---+---+---+---+----------------+ * | cond | 1 | 0 | 1 | 0 | imm24 | * +------+---+---+---+---+----------------+ * * imm24 is a signed 24 bits integer. The real branch offset is computed * by: imm32 = SignExtend(imm24:'00', 32); * * So the maximum forward branch should be: * (0x007fffff << 2) = 0x01fffffc = 0x1fffffc * The maximum backword branch should be: * (0xff800000 << 2) = 0xfe000000 = -0x2000000 * * We can simply check (rel & 0xfe000003): * if rel is positive, (rel & 0xfe000000) shoule be 0 * if rel is negitive, (rel & 0xfe000000) should be 0xfe000000 * the last '3' is used for alignment checking. */ rel_chk = (unsigned long)((long)code - (long)orig->addr + 8) & 0xfe000003; if ((rel_chk != 0) && (rel_chk != 0xfe000000)) { /* * Different from x86, we free code buf directly instead of * calling __arch_remove_optimized_kprobe() because * we have not fill any field in op. */ free_optinsn_slot(code, 0); return -ERANGE; } /* Copy arch-dep-instance from template. */ memcpy(code, (unsigned long *)optprobe_template_entry, TMPL_END_IDX * sizeof(kprobe_opcode_t)); /* Adjust buffer according to instruction. */ BUG_ON(orig->ainsn.stack_space < 0); stack_protect += orig->ainsn.stack_space; /* Should have been filtered by can_optimize(). */ BUG_ON(stack_protect > 255); /* Create a 'sub sp, sp, #<stack_protect>' */ code[TMPL_SUB_SP] = __opcode_to_mem_arm(0xe24dd000 | stack_protect); /* Create a 'add r3, sp, #<stack_protect>' */ code[TMPL_ADD_SP] = __opcode_to_mem_arm(0xe28d3000 | stack_protect); /* Set probe information */ val = (unsigned long)op; code[TMPL_VAL_IDX] = val; /* Set probe function call */ val = (unsigned long)optimized_callback; code[TMPL_CALL_IDX] = val; /* If possible, copy insn and have it executed during restore */ orig->ainsn.kprobe_direct_exec = false; if (can_kprobe_direct_exec(orig->ainsn.register_usage_flags)) { kprobe_opcode_t final_branch = arm_gen_branch( (unsigned long)(&code[TMPL_RESTORE_END]), (unsigned long)(op->kp.addr) + 4); if (final_branch != 0) { /* * Replace original 'ldmia sp, {r0 - r15}' with * 'ldmia {r0 - r14}', restore all registers except pc. */ code[TMPL_RESTORE_BEGIN] = __opcode_to_mem_arm(0xe89d7fff); /* The original probed instruction */ code[TMPL_RESTORE_ORIGN_INSN] = __opcode_to_mem_arm(orig->opcode); /* Jump back to next instruction */ code[TMPL_RESTORE_END] = __opcode_to_mem_arm(final_branch); orig->ainsn.kprobe_direct_exec = true; } } flush_icache_range((unsigned long)code, (unsigned long)(&code[TMPL_END_IDX])); /* Set op->optinsn.insn means prepared. */ op->optinsn.insn = code; return 0; } void __kprobes arch_optimize_kprobes(struct list_head *oplist) { struct optimized_kprobe *op, *tmp; list_for_each_entry_safe(op, tmp, oplist, list) { unsigned long insn; WARN_ON(kprobe_disabled(&op->kp)); /* * Backup instructions which will be replaced * by jump address */ memcpy(op->optinsn.copied_insn, op->kp.addr, RELATIVEJUMP_SIZE); insn = arm_gen_branch((unsigned long)op->kp.addr, (unsigned long)op->optinsn.insn); BUG_ON(insn == 0); /* * Make it a conditional branch if replaced insn * is consitional */ insn = (__mem_to_opcode_arm( op->optinsn.copied_insn[0]) & 0xf0000000) | (insn & 0x0fffffff); /* * Similar to __arch_disarm_kprobe, operations which * removing breakpoints must be wrapped by stop_machine * to avoid racing. */ kprobes_remove_breakpoint(op->kp.addr, insn); list_del_init(&op->list); } } void arch_unoptimize_kprobe(struct optimized_kprobe *op) { arch_arm_kprobe(&op->kp); } /* * Recover original instructions and breakpoints from relative jumps. * Caller must call with locking kprobe_mutex. */ void arch_unoptimize_kprobes(struct list_head *oplist, struct list_head *done_list) { struct optimized_kprobe *op, *tmp; list_for_each_entry_safe(op, tmp, oplist, list) { arch_unoptimize_kprobe(op); list_move(&op->list, done_list); } } int arch_within_optimized_kprobe(struct optimized_kprobe *op, kprobe_opcode_t *addr) { return (op->kp.addr <= addr && op->kp.addr + (RELATIVEJUMP_SIZE / sizeof(kprobe_opcode_t)) > addr); } void arch_remove_optimized_kprobe(struct optimized_kprobe *op) { __arch_remove_optimized_kprobe(op, 1); }
linux-master
arch/arm/probes/kprobes/opt-arm.c
// SPDX-License-Identifier: GPL-2.0-only /* * Just-In-Time compiler for eBPF filters on 32bit ARM * * Copyright (c) 2017 Shubham Bansal <[email protected]> * Copyright (c) 2011 Mircea Gherzan <[email protected]> */ #include <linux/bpf.h> #include <linux/bitops.h> #include <linux/compiler.h> #include <linux/errno.h> #include <linux/filter.h> #include <linux/netdevice.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/if_vlan.h> #include <asm/cacheflush.h> #include <asm/hwcap.h> #include <asm/opcodes.h> #include <asm/system_info.h> #include "bpf_jit_32.h" /* * eBPF prog stack layout: * * high * original ARM_SP => +-----+ * | | callee saved registers * +-----+ <= (BPF_FP + SCRATCH_SIZE) * | ... | eBPF JIT scratch space * eBPF fp register => +-----+ * (BPF_FP) | ... | eBPF prog stack * +-----+ * |RSVD | JIT scratchpad * current ARM_SP => +-----+ <= (BPF_FP - STACK_SIZE + SCRATCH_SIZE) * | ... | caller-saved registers * +-----+ * | ... | arguments passed on stack * ARM_SP during call => +-----| * | | * | ... | Function call stack * | | * +-----+ * low * * The callee saved registers depends on whether frame pointers are enabled. * With frame pointers (to be compliant with the ABI): * * high * original ARM_SP => +--------------+ \ * | pc | | * current ARM_FP => +--------------+ } callee saved registers * |r4-r9,fp,ip,lr| | * +--------------+ / * low * * Without frame pointers: * * high * original ARM_SP => +--------------+ * | r4-r9,fp,lr | callee saved registers * current ARM_FP => +--------------+ * low * * When popping registers off the stack at the end of a BPF function, we * reference them via the current ARM_FP register. * * Some eBPF operations are implemented via a call to a helper function. * Such calls are "invisible" in the eBPF code, so it is up to the calling * program to preserve any caller-saved ARM registers during the call. The * JIT emits code to push and pop those registers onto the stack, immediately * above the callee stack frame. */ #define CALLEE_MASK (1 << ARM_R4 | 1 << ARM_R5 | 1 << ARM_R6 | \ 1 << ARM_R7 | 1 << ARM_R8 | 1 << ARM_R9 | \ 1 << ARM_FP) #define CALLEE_PUSH_MASK (CALLEE_MASK | 1 << ARM_LR) #define CALLEE_POP_MASK (CALLEE_MASK | 1 << ARM_PC) #define CALLER_MASK (1 << ARM_R0 | 1 << ARM_R1 | 1 << ARM_R2 | 1 << ARM_R3) enum { /* Stack layout - these are offsets from (top of stack - 4) */ BPF_R2_HI, BPF_R2_LO, BPF_R3_HI, BPF_R3_LO, BPF_R4_HI, BPF_R4_LO, BPF_R5_HI, BPF_R5_LO, BPF_R7_HI, BPF_R7_LO, BPF_R8_HI, BPF_R8_LO, BPF_R9_HI, BPF_R9_LO, BPF_FP_HI, BPF_FP_LO, BPF_TC_HI, BPF_TC_LO, BPF_AX_HI, BPF_AX_LO, /* Stack space for BPF_REG_2, BPF_REG_3, BPF_REG_4, * BPF_REG_5, BPF_REG_7, BPF_REG_8, BPF_REG_9, * BPF_REG_FP and Tail call counts. */ BPF_JIT_SCRATCH_REGS, }; /* * Negative "register" values indicate the register is stored on the stack * and are the offset from the top of the eBPF JIT scratch space. */ #define STACK_OFFSET(k) (-4 - (k) * 4) #define SCRATCH_SIZE (BPF_JIT_SCRATCH_REGS * 4) #ifdef CONFIG_FRAME_POINTER #define EBPF_SCRATCH_TO_ARM_FP(x) ((x) - 4 * hweight16(CALLEE_PUSH_MASK) - 4) #else #define EBPF_SCRATCH_TO_ARM_FP(x) (x) #endif #define TMP_REG_1 (MAX_BPF_JIT_REG + 0) /* TEMP Register 1 */ #define TMP_REG_2 (MAX_BPF_JIT_REG + 1) /* TEMP Register 2 */ #define TCALL_CNT (MAX_BPF_JIT_REG + 2) /* Tail Call Count */ #define FLAG_IMM_OVERFLOW (1 << 0) /* * Map eBPF registers to ARM 32bit registers or stack scratch space. * * 1. First argument is passed using the arm 32bit registers and rest of the * arguments are passed on stack scratch space. * 2. First callee-saved argument is mapped to arm 32 bit registers and rest * arguments are mapped to scratch space on stack. * 3. We need two 64 bit temp registers to do complex operations on eBPF * registers. * * As the eBPF registers are all 64 bit registers and arm has only 32 bit * registers, we have to map each eBPF registers with two arm 32 bit regs or * scratch memory space and we have to build eBPF 64 bit register from those. * */ static const s8 bpf2a32[][2] = { /* return value from in-kernel function, and exit value from eBPF */ [BPF_REG_0] = {ARM_R1, ARM_R0}, /* arguments from eBPF program to in-kernel function */ [BPF_REG_1] = {ARM_R3, ARM_R2}, /* Stored on stack scratch space */ [BPF_REG_2] = {STACK_OFFSET(BPF_R2_HI), STACK_OFFSET(BPF_R2_LO)}, [BPF_REG_3] = {STACK_OFFSET(BPF_R3_HI), STACK_OFFSET(BPF_R3_LO)}, [BPF_REG_4] = {STACK_OFFSET(BPF_R4_HI), STACK_OFFSET(BPF_R4_LO)}, [BPF_REG_5] = {STACK_OFFSET(BPF_R5_HI), STACK_OFFSET(BPF_R5_LO)}, /* callee saved registers that in-kernel function will preserve */ [BPF_REG_6] = {ARM_R5, ARM_R4}, /* Stored on stack scratch space */ [BPF_REG_7] = {STACK_OFFSET(BPF_R7_HI), STACK_OFFSET(BPF_R7_LO)}, [BPF_REG_8] = {STACK_OFFSET(BPF_R8_HI), STACK_OFFSET(BPF_R8_LO)}, [BPF_REG_9] = {STACK_OFFSET(BPF_R9_HI), STACK_OFFSET(BPF_R9_LO)}, /* Read only Frame Pointer to access Stack */ [BPF_REG_FP] = {STACK_OFFSET(BPF_FP_HI), STACK_OFFSET(BPF_FP_LO)}, /* Temporary Register for BPF JIT, can be used * for constant blindings and others. */ [TMP_REG_1] = {ARM_R7, ARM_R6}, [TMP_REG_2] = {ARM_R9, ARM_R8}, /* Tail call count. Stored on stack scratch space. */ [TCALL_CNT] = {STACK_OFFSET(BPF_TC_HI), STACK_OFFSET(BPF_TC_LO)}, /* temporary register for blinding constants. * Stored on stack scratch space. */ [BPF_REG_AX] = {STACK_OFFSET(BPF_AX_HI), STACK_OFFSET(BPF_AX_LO)}, }; #define dst_lo dst[1] #define dst_hi dst[0] #define src_lo src[1] #define src_hi src[0] /* * JIT Context: * * prog : bpf_prog * idx : index of current last JITed instruction. * prologue_bytes : bytes used in prologue. * epilogue_offset : offset of epilogue starting. * offsets : array of eBPF instruction offsets in * JITed code. * target : final JITed code. * epilogue_bytes : no of bytes used in epilogue. * imm_count : no of immediate counts used for global * variables. * imms : array of global variable addresses. */ struct jit_ctx { const struct bpf_prog *prog; unsigned int idx; unsigned int prologue_bytes; unsigned int epilogue_offset; unsigned int cpu_architecture; u32 flags; u32 *offsets; u32 *target; u32 stack_size; #if __LINUX_ARM_ARCH__ < 7 u16 epilogue_bytes; u16 imm_count; u32 *imms; #endif }; /* * Wrappers which handle both OABI and EABI and assures Thumb2 interworking * (where the assembly routines like __aeabi_uidiv could cause problems). */ static u32 jit_udiv32(u32 dividend, u32 divisor) { return dividend / divisor; } static u32 jit_mod32(u32 dividend, u32 divisor) { return dividend % divisor; } static inline void _emit(int cond, u32 inst, struct jit_ctx *ctx) { inst |= (cond << 28); inst = __opcode_to_mem_arm(inst); if (ctx->target != NULL) ctx->target[ctx->idx] = inst; ctx->idx++; } /* * Emit an instruction that will be executed unconditionally. */ static inline void emit(u32 inst, struct jit_ctx *ctx) { _emit(ARM_COND_AL, inst, ctx); } /* * This is rather horrid, but necessary to convert an integer constant * to an immediate operand for the opcodes, and be able to detect at * build time whether the constant can't be converted (iow, usable in * BUILD_BUG_ON()). */ #define imm12val(v, s) (rol32(v, (s)) | (s) << 7) #define const_imm8m(x) \ ({ int r; \ u32 v = (x); \ if (!(v & ~0x000000ff)) \ r = imm12val(v, 0); \ else if (!(v & ~0xc000003f)) \ r = imm12val(v, 2); \ else if (!(v & ~0xf000000f)) \ r = imm12val(v, 4); \ else if (!(v & ~0xfc000003)) \ r = imm12val(v, 6); \ else if (!(v & ~0xff000000)) \ r = imm12val(v, 8); \ else if (!(v & ~0x3fc00000)) \ r = imm12val(v, 10); \ else if (!(v & ~0x0ff00000)) \ r = imm12val(v, 12); \ else if (!(v & ~0x03fc0000)) \ r = imm12val(v, 14); \ else if (!(v & ~0x00ff0000)) \ r = imm12val(v, 16); \ else if (!(v & ~0x003fc000)) \ r = imm12val(v, 18); \ else if (!(v & ~0x000ff000)) \ r = imm12val(v, 20); \ else if (!(v & ~0x0003fc00)) \ r = imm12val(v, 22); \ else if (!(v & ~0x0000ff00)) \ r = imm12val(v, 24); \ else if (!(v & ~0x00003fc0)) \ r = imm12val(v, 26); \ else if (!(v & ~0x00000ff0)) \ r = imm12val(v, 28); \ else if (!(v & ~0x000003fc)) \ r = imm12val(v, 30); \ else \ r = -1; \ r; }) /* * Checks if immediate value can be converted to imm12(12 bits) value. */ static int imm8m(u32 x) { u32 rot; for (rot = 0; rot < 16; rot++) if ((x & ~ror32(0xff, 2 * rot)) == 0) return rol32(x, 2 * rot) | (rot << 8); return -1; } #define imm8m(x) (__builtin_constant_p(x) ? const_imm8m(x) : imm8m(x)) static u32 arm_bpf_ldst_imm12(u32 op, u8 rt, u8 rn, s16 imm12) { op |= rt << 12 | rn << 16; if (imm12 >= 0) op |= ARM_INST_LDST__U; else imm12 = -imm12; return op | (imm12 & ARM_INST_LDST__IMM12); } static u32 arm_bpf_ldst_imm8(u32 op, u8 rt, u8 rn, s16 imm8) { op |= rt << 12 | rn << 16; if (imm8 >= 0) op |= ARM_INST_LDST__U; else imm8 = -imm8; return op | (imm8 & 0xf0) << 4 | (imm8 & 0x0f); } #define ARM_LDR_I(rt, rn, off) arm_bpf_ldst_imm12(ARM_INST_LDR_I, rt, rn, off) #define ARM_LDRB_I(rt, rn, off) arm_bpf_ldst_imm12(ARM_INST_LDRB_I, rt, rn, off) #define ARM_LDRD_I(rt, rn, off) arm_bpf_ldst_imm8(ARM_INST_LDRD_I, rt, rn, off) #define ARM_LDRH_I(rt, rn, off) arm_bpf_ldst_imm8(ARM_INST_LDRH_I, rt, rn, off) #define ARM_STR_I(rt, rn, off) arm_bpf_ldst_imm12(ARM_INST_STR_I, rt, rn, off) #define ARM_STRB_I(rt, rn, off) arm_bpf_ldst_imm12(ARM_INST_STRB_I, rt, rn, off) #define ARM_STRD_I(rt, rn, off) arm_bpf_ldst_imm8(ARM_INST_STRD_I, rt, rn, off) #define ARM_STRH_I(rt, rn, off) arm_bpf_ldst_imm8(ARM_INST_STRH_I, rt, rn, off) /* * Initializes the JIT space with undefined instructions. */ static void jit_fill_hole(void *area, unsigned int size) { u32 *ptr; /* We are guaranteed to have aligned memory. */ for (ptr = area; size >= sizeof(u32); size -= sizeof(u32)) *ptr++ = __opcode_to_mem_arm(ARM_INST_UDF); } #if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) /* EABI requires the stack to be aligned to 64-bit boundaries */ #define STACK_ALIGNMENT 8 #else /* Stack must be aligned to 32-bit boundaries */ #define STACK_ALIGNMENT 4 #endif /* total stack size used in JITed code */ #define _STACK_SIZE (ctx->prog->aux->stack_depth + SCRATCH_SIZE) #define STACK_SIZE ALIGN(_STACK_SIZE, STACK_ALIGNMENT) #if __LINUX_ARM_ARCH__ < 7 static u16 imm_offset(u32 k, struct jit_ctx *ctx) { unsigned int i = 0, offset; u16 imm; /* on the "fake" run we just count them (duplicates included) */ if (ctx->target == NULL) { ctx->imm_count++; return 0; } while ((i < ctx->imm_count) && ctx->imms[i]) { if (ctx->imms[i] == k) break; i++; } if (ctx->imms[i] == 0) ctx->imms[i] = k; /* constants go just after the epilogue */ offset = ctx->offsets[ctx->prog->len - 1] * 4; offset += ctx->prologue_bytes; offset += ctx->epilogue_bytes; offset += i * 4; ctx->target[offset / 4] = k; /* PC in ARM mode == address of the instruction + 8 */ imm = offset - (8 + ctx->idx * 4); if (imm & ~0xfff) { /* * literal pool is too far, signal it into flags. we * can only detect it on the second pass unfortunately. */ ctx->flags |= FLAG_IMM_OVERFLOW; return 0; } return imm; } #endif /* __LINUX_ARM_ARCH__ */ static inline int bpf2a32_offset(int bpf_to, int bpf_from, const struct jit_ctx *ctx) { int to, from; if (ctx->target == NULL) return 0; to = ctx->offsets[bpf_to]; from = ctx->offsets[bpf_from]; return to - from - 1; } /* * Move an immediate that's not an imm8m to a core register. */ static inline void emit_mov_i_no8m(const u8 rd, u32 val, struct jit_ctx *ctx) { #if __LINUX_ARM_ARCH__ < 7 emit(ARM_LDR_I(rd, ARM_PC, imm_offset(val, ctx)), ctx); #else emit(ARM_MOVW(rd, val & 0xffff), ctx); if (val > 0xffff) emit(ARM_MOVT(rd, val >> 16), ctx); #endif } static inline void emit_mov_i(const u8 rd, u32 val, struct jit_ctx *ctx) { int imm12 = imm8m(val); if (imm12 >= 0) emit(ARM_MOV_I(rd, imm12), ctx); else emit_mov_i_no8m(rd, val, ctx); } static void emit_bx_r(u8 tgt_reg, struct jit_ctx *ctx) { if (elf_hwcap & HWCAP_THUMB) emit(ARM_BX(tgt_reg), ctx); else emit(ARM_MOV_R(ARM_PC, tgt_reg), ctx); } static inline void emit_blx_r(u8 tgt_reg, struct jit_ctx *ctx) { #if __LINUX_ARM_ARCH__ < 5 emit(ARM_MOV_R(ARM_LR, ARM_PC), ctx); emit_bx_r(tgt_reg, ctx); #else emit(ARM_BLX_R(tgt_reg), ctx); #endif } static inline int epilogue_offset(const struct jit_ctx *ctx) { int to, from; /* No need for 1st dummy run */ if (ctx->target == NULL) return 0; to = ctx->epilogue_offset; from = ctx->idx; return to - from - 2; } static inline void emit_udivmod(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx, u8 op) { const int exclude_mask = BIT(ARM_R0) | BIT(ARM_R1); const s8 *tmp = bpf2a32[TMP_REG_1]; #if __LINUX_ARM_ARCH__ == 7 if (elf_hwcap & HWCAP_IDIVA) { if (op == BPF_DIV) emit(ARM_UDIV(rd, rm, rn), ctx); else { emit(ARM_UDIV(ARM_IP, rm, rn), ctx); emit(ARM_MLS(rd, rn, ARM_IP, rm), ctx); } return; } #endif /* * For BPF_ALU | BPF_DIV | BPF_K instructions * As ARM_R1 and ARM_R0 contains 1st argument of bpf * function, we need to save it on caller side to save * it from getting destroyed within callee. * After the return from the callee, we restore ARM_R0 * ARM_R1. */ if (rn != ARM_R1) { emit(ARM_MOV_R(tmp[0], ARM_R1), ctx); emit(ARM_MOV_R(ARM_R1, rn), ctx); } if (rm != ARM_R0) { emit(ARM_MOV_R(tmp[1], ARM_R0), ctx); emit(ARM_MOV_R(ARM_R0, rm), ctx); } /* Push caller-saved registers on stack */ emit(ARM_PUSH(CALLER_MASK & ~exclude_mask), ctx); /* Call appropriate function */ emit_mov_i(ARM_IP, op == BPF_DIV ? (u32)jit_udiv32 : (u32)jit_mod32, ctx); emit_blx_r(ARM_IP, ctx); /* Restore caller-saved registers from stack */ emit(ARM_POP(CALLER_MASK & ~exclude_mask), ctx); /* Save return value */ if (rd != ARM_R0) emit(ARM_MOV_R(rd, ARM_R0), ctx); /* Restore ARM_R0 and ARM_R1 */ if (rn != ARM_R1) emit(ARM_MOV_R(ARM_R1, tmp[0]), ctx); if (rm != ARM_R0) emit(ARM_MOV_R(ARM_R0, tmp[1]), ctx); } /* Is the translated BPF register on stack? */ static bool is_stacked(s8 reg) { return reg < 0; } /* If a BPF register is on the stack (stk is true), load it to the * supplied temporary register and return the temporary register * for subsequent operations, otherwise just use the CPU register. */ static s8 arm_bpf_get_reg32(s8 reg, s8 tmp, struct jit_ctx *ctx) { if (is_stacked(reg)) { emit(ARM_LDR_I(tmp, ARM_FP, EBPF_SCRATCH_TO_ARM_FP(reg)), ctx); reg = tmp; } return reg; } static const s8 *arm_bpf_get_reg64(const s8 *reg, const s8 *tmp, struct jit_ctx *ctx) { if (is_stacked(reg[1])) { if (__LINUX_ARM_ARCH__ >= 6 || ctx->cpu_architecture >= CPU_ARCH_ARMv5TE) { emit(ARM_LDRD_I(tmp[1], ARM_FP, EBPF_SCRATCH_TO_ARM_FP(reg[1])), ctx); } else { emit(ARM_LDR_I(tmp[1], ARM_FP, EBPF_SCRATCH_TO_ARM_FP(reg[1])), ctx); emit(ARM_LDR_I(tmp[0], ARM_FP, EBPF_SCRATCH_TO_ARM_FP(reg[0])), ctx); } reg = tmp; } return reg; } /* If a BPF register is on the stack (stk is true), save the register * back to the stack. If the source register is not the same, then * move it into the correct register. */ static void arm_bpf_put_reg32(s8 reg, s8 src, struct jit_ctx *ctx) { if (is_stacked(reg)) emit(ARM_STR_I(src, ARM_FP, EBPF_SCRATCH_TO_ARM_FP(reg)), ctx); else if (reg != src) emit(ARM_MOV_R(reg, src), ctx); } static void arm_bpf_put_reg64(const s8 *reg, const s8 *src, struct jit_ctx *ctx) { if (is_stacked(reg[1])) { if (__LINUX_ARM_ARCH__ >= 6 || ctx->cpu_architecture >= CPU_ARCH_ARMv5TE) { emit(ARM_STRD_I(src[1], ARM_FP, EBPF_SCRATCH_TO_ARM_FP(reg[1])), ctx); } else { emit(ARM_STR_I(src[1], ARM_FP, EBPF_SCRATCH_TO_ARM_FP(reg[1])), ctx); emit(ARM_STR_I(src[0], ARM_FP, EBPF_SCRATCH_TO_ARM_FP(reg[0])), ctx); } } else { if (reg[1] != src[1]) emit(ARM_MOV_R(reg[1], src[1]), ctx); if (reg[0] != src[0]) emit(ARM_MOV_R(reg[0], src[0]), ctx); } } static inline void emit_a32_mov_i(const s8 dst, const u32 val, struct jit_ctx *ctx) { const s8 *tmp = bpf2a32[TMP_REG_1]; if (is_stacked(dst)) { emit_mov_i(tmp[1], val, ctx); arm_bpf_put_reg32(dst, tmp[1], ctx); } else { emit_mov_i(dst, val, ctx); } } static void emit_a32_mov_i64(const s8 dst[], u64 val, struct jit_ctx *ctx) { const s8 *tmp = bpf2a32[TMP_REG_1]; const s8 *rd = is_stacked(dst_lo) ? tmp : dst; emit_mov_i(rd[1], (u32)val, ctx); emit_mov_i(rd[0], val >> 32, ctx); arm_bpf_put_reg64(dst, rd, ctx); } /* Sign extended move */ static inline void emit_a32_mov_se_i64(const bool is64, const s8 dst[], const u32 val, struct jit_ctx *ctx) { u64 val64 = val; if (is64 && (val & (1<<31))) val64 |= 0xffffffff00000000ULL; emit_a32_mov_i64(dst, val64, ctx); } static inline void emit_a32_add_r(const u8 dst, const u8 src, const bool is64, const bool hi, struct jit_ctx *ctx) { /* 64 bit : * adds dst_lo, dst_lo, src_lo * adc dst_hi, dst_hi, src_hi * 32 bit : * add dst_lo, dst_lo, src_lo */ if (!hi && is64) emit(ARM_ADDS_R(dst, dst, src), ctx); else if (hi && is64) emit(ARM_ADC_R(dst, dst, src), ctx); else emit(ARM_ADD_R(dst, dst, src), ctx); } static inline void emit_a32_sub_r(const u8 dst, const u8 src, const bool is64, const bool hi, struct jit_ctx *ctx) { /* 64 bit : * subs dst_lo, dst_lo, src_lo * sbc dst_hi, dst_hi, src_hi * 32 bit : * sub dst_lo, dst_lo, src_lo */ if (!hi && is64) emit(ARM_SUBS_R(dst, dst, src), ctx); else if (hi && is64) emit(ARM_SBC_R(dst, dst, src), ctx); else emit(ARM_SUB_R(dst, dst, src), ctx); } static inline void emit_alu_r(const u8 dst, const u8 src, const bool is64, const bool hi, const u8 op, struct jit_ctx *ctx){ switch (BPF_OP(op)) { /* dst = dst + src */ case BPF_ADD: emit_a32_add_r(dst, src, is64, hi, ctx); break; /* dst = dst - src */ case BPF_SUB: emit_a32_sub_r(dst, src, is64, hi, ctx); break; /* dst = dst | src */ case BPF_OR: emit(ARM_ORR_R(dst, dst, src), ctx); break; /* dst = dst & src */ case BPF_AND: emit(ARM_AND_R(dst, dst, src), ctx); break; /* dst = dst ^ src */ case BPF_XOR: emit(ARM_EOR_R(dst, dst, src), ctx); break; /* dst = dst * src */ case BPF_MUL: emit(ARM_MUL(dst, dst, src), ctx); break; /* dst = dst << src */ case BPF_LSH: emit(ARM_LSL_R(dst, dst, src), ctx); break; /* dst = dst >> src */ case BPF_RSH: emit(ARM_LSR_R(dst, dst, src), ctx); break; /* dst = dst >> src (signed)*/ case BPF_ARSH: emit(ARM_MOV_SR(dst, dst, SRTYPE_ASR, src), ctx); break; } } /* ALU operation (64 bit) */ static inline void emit_a32_alu_r64(const bool is64, const s8 dst[], const s8 src[], struct jit_ctx *ctx, const u8 op) { const s8 *tmp = bpf2a32[TMP_REG_1]; const s8 *tmp2 = bpf2a32[TMP_REG_2]; const s8 *rd; rd = arm_bpf_get_reg64(dst, tmp, ctx); if (is64) { const s8 *rs; rs = arm_bpf_get_reg64(src, tmp2, ctx); /* ALU operation */ emit_alu_r(rd[1], rs[1], true, false, op, ctx); emit_alu_r(rd[0], rs[0], true, true, op, ctx); } else { s8 rs; rs = arm_bpf_get_reg32(src_lo, tmp2[1], ctx); /* ALU operation */ emit_alu_r(rd[1], rs, true, false, op, ctx); if (!ctx->prog->aux->verifier_zext) emit_a32_mov_i(rd[0], 0, ctx); } arm_bpf_put_reg64(dst, rd, ctx); } /* dst = src (4 bytes)*/ static inline void emit_a32_mov_r(const s8 dst, const s8 src, struct jit_ctx *ctx) { const s8 *tmp = bpf2a32[TMP_REG_1]; s8 rt; rt = arm_bpf_get_reg32(src, tmp[0], ctx); arm_bpf_put_reg32(dst, rt, ctx); } /* dst = src */ static inline void emit_a32_mov_r64(const bool is64, const s8 dst[], const s8 src[], struct jit_ctx *ctx) { if (!is64) { emit_a32_mov_r(dst_lo, src_lo, ctx); if (!ctx->prog->aux->verifier_zext) /* Zero out high 4 bytes */ emit_a32_mov_i(dst_hi, 0, ctx); } else if (__LINUX_ARM_ARCH__ < 6 && ctx->cpu_architecture < CPU_ARCH_ARMv5TE) { /* complete 8 byte move */ emit_a32_mov_r(dst_lo, src_lo, ctx); emit_a32_mov_r(dst_hi, src_hi, ctx); } else if (is_stacked(src_lo) && is_stacked(dst_lo)) { const u8 *tmp = bpf2a32[TMP_REG_1]; emit(ARM_LDRD_I(tmp[1], ARM_FP, EBPF_SCRATCH_TO_ARM_FP(src_lo)), ctx); emit(ARM_STRD_I(tmp[1], ARM_FP, EBPF_SCRATCH_TO_ARM_FP(dst_lo)), ctx); } else if (is_stacked(src_lo)) { emit(ARM_LDRD_I(dst[1], ARM_FP, EBPF_SCRATCH_TO_ARM_FP(src_lo)), ctx); } else if (is_stacked(dst_lo)) { emit(ARM_STRD_I(src[1], ARM_FP, EBPF_SCRATCH_TO_ARM_FP(dst_lo)), ctx); } else { emit(ARM_MOV_R(dst[0], src[0]), ctx); emit(ARM_MOV_R(dst[1], src[1]), ctx); } } /* Shift operations */ static inline void emit_a32_alu_i(const s8 dst, const u32 val, struct jit_ctx *ctx, const u8 op) { const s8 *tmp = bpf2a32[TMP_REG_1]; s8 rd; rd = arm_bpf_get_reg32(dst, tmp[0], ctx); /* Do shift operation */ switch (op) { case BPF_LSH: emit(ARM_LSL_I(rd, rd, val), ctx); break; case BPF_RSH: emit(ARM_LSR_I(rd, rd, val), ctx); break; case BPF_ARSH: emit(ARM_ASR_I(rd, rd, val), ctx); break; case BPF_NEG: emit(ARM_RSB_I(rd, rd, val), ctx); break; } arm_bpf_put_reg32(dst, rd, ctx); } /* dst = ~dst (64 bit) */ static inline void emit_a32_neg64(const s8 dst[], struct jit_ctx *ctx){ const s8 *tmp = bpf2a32[TMP_REG_1]; const s8 *rd; /* Setup Operand */ rd = arm_bpf_get_reg64(dst, tmp, ctx); /* Do Negate Operation */ emit(ARM_RSBS_I(rd[1], rd[1], 0), ctx); emit(ARM_RSC_I(rd[0], rd[0], 0), ctx); arm_bpf_put_reg64(dst, rd, ctx); } /* dst = dst << src */ static inline void emit_a32_lsh_r64(const s8 dst[], const s8 src[], struct jit_ctx *ctx) { const s8 *tmp = bpf2a32[TMP_REG_1]; const s8 *tmp2 = bpf2a32[TMP_REG_2]; const s8 *rd; s8 rt; /* Setup Operands */ rt = arm_bpf_get_reg32(src_lo, tmp2[1], ctx); rd = arm_bpf_get_reg64(dst, tmp, ctx); /* Do LSH operation */ emit(ARM_SUB_I(ARM_IP, rt, 32), ctx); emit(ARM_RSB_I(tmp2[0], rt, 32), ctx); emit(ARM_MOV_SR(ARM_LR, rd[0], SRTYPE_ASL, rt), ctx); emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd[1], SRTYPE_ASL, ARM_IP), ctx); emit(ARM_ORR_SR(ARM_IP, ARM_LR, rd[1], SRTYPE_LSR, tmp2[0]), ctx); emit(ARM_MOV_SR(ARM_LR, rd[1], SRTYPE_ASL, rt), ctx); arm_bpf_put_reg32(dst_lo, ARM_LR, ctx); arm_bpf_put_reg32(dst_hi, ARM_IP, ctx); } /* dst = dst >> src (signed)*/ static inline void emit_a32_arsh_r64(const s8 dst[], const s8 src[], struct jit_ctx *ctx) { const s8 *tmp = bpf2a32[TMP_REG_1]; const s8 *tmp2 = bpf2a32[TMP_REG_2]; const s8 *rd; s8 rt; /* Setup Operands */ rt = arm_bpf_get_reg32(src_lo, tmp2[1], ctx); rd = arm_bpf_get_reg64(dst, tmp, ctx); /* Do the ARSH operation */ emit(ARM_RSB_I(ARM_IP, rt, 32), ctx); emit(ARM_SUBS_I(tmp2[0], rt, 32), ctx); emit(ARM_MOV_SR(ARM_LR, rd[1], SRTYPE_LSR, rt), ctx); emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd[0], SRTYPE_ASL, ARM_IP), ctx); _emit(ARM_COND_PL, ARM_ORR_SR(ARM_LR, ARM_LR, rd[0], SRTYPE_ASR, tmp2[0]), ctx); emit(ARM_MOV_SR(ARM_IP, rd[0], SRTYPE_ASR, rt), ctx); arm_bpf_put_reg32(dst_lo, ARM_LR, ctx); arm_bpf_put_reg32(dst_hi, ARM_IP, ctx); } /* dst = dst >> src */ static inline void emit_a32_rsh_r64(const s8 dst[], const s8 src[], struct jit_ctx *ctx) { const s8 *tmp = bpf2a32[TMP_REG_1]; const s8 *tmp2 = bpf2a32[TMP_REG_2]; const s8 *rd; s8 rt; /* Setup Operands */ rt = arm_bpf_get_reg32(src_lo, tmp2[1], ctx); rd = arm_bpf_get_reg64(dst, tmp, ctx); /* Do RSH operation */ emit(ARM_RSB_I(ARM_IP, rt, 32), ctx); emit(ARM_SUBS_I(tmp2[0], rt, 32), ctx); emit(ARM_MOV_SR(ARM_LR, rd[1], SRTYPE_LSR, rt), ctx); emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd[0], SRTYPE_ASL, ARM_IP), ctx); emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd[0], SRTYPE_LSR, tmp2[0]), ctx); emit(ARM_MOV_SR(ARM_IP, rd[0], SRTYPE_LSR, rt), ctx); arm_bpf_put_reg32(dst_lo, ARM_LR, ctx); arm_bpf_put_reg32(dst_hi, ARM_IP, ctx); } /* dst = dst << val */ static inline void emit_a32_lsh_i64(const s8 dst[], const u32 val, struct jit_ctx *ctx){ const s8 *tmp = bpf2a32[TMP_REG_1]; const s8 *tmp2 = bpf2a32[TMP_REG_2]; const s8 *rd; /* Setup operands */ rd = arm_bpf_get_reg64(dst, tmp, ctx); /* Do LSH operation */ if (val < 32) { emit(ARM_MOV_SI(tmp2[0], rd[0], SRTYPE_ASL, val), ctx); emit(ARM_ORR_SI(rd[0], tmp2[0], rd[1], SRTYPE_LSR, 32 - val), ctx); emit(ARM_MOV_SI(rd[1], rd[1], SRTYPE_ASL, val), ctx); } else { if (val == 32) emit(ARM_MOV_R(rd[0], rd[1]), ctx); else emit(ARM_MOV_SI(rd[0], rd[1], SRTYPE_ASL, val - 32), ctx); emit(ARM_EOR_R(rd[1], rd[1], rd[1]), ctx); } arm_bpf_put_reg64(dst, rd, ctx); } /* dst = dst >> val */ static inline void emit_a32_rsh_i64(const s8 dst[], const u32 val, struct jit_ctx *ctx) { const s8 *tmp = bpf2a32[TMP_REG_1]; const s8 *tmp2 = bpf2a32[TMP_REG_2]; const s8 *rd; /* Setup operands */ rd = arm_bpf_get_reg64(dst, tmp, ctx); /* Do LSR operation */ if (val == 0) { /* An immediate value of 0 encodes a shift amount of 32 * for LSR. To shift by 0, don't do anything. */ } else if (val < 32) { emit(ARM_MOV_SI(tmp2[1], rd[1], SRTYPE_LSR, val), ctx); emit(ARM_ORR_SI(rd[1], tmp2[1], rd[0], SRTYPE_ASL, 32 - val), ctx); emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_LSR, val), ctx); } else if (val == 32) { emit(ARM_MOV_R(rd[1], rd[0]), ctx); emit(ARM_MOV_I(rd[0], 0), ctx); } else { emit(ARM_MOV_SI(rd[1], rd[0], SRTYPE_LSR, val - 32), ctx); emit(ARM_MOV_I(rd[0], 0), ctx); } arm_bpf_put_reg64(dst, rd, ctx); } /* dst = dst >> val (signed) */ static inline void emit_a32_arsh_i64(const s8 dst[], const u32 val, struct jit_ctx *ctx){ const s8 *tmp = bpf2a32[TMP_REG_1]; const s8 *tmp2 = bpf2a32[TMP_REG_2]; const s8 *rd; /* Setup operands */ rd = arm_bpf_get_reg64(dst, tmp, ctx); /* Do ARSH operation */ if (val == 0) { /* An immediate value of 0 encodes a shift amount of 32 * for ASR. To shift by 0, don't do anything. */ } else if (val < 32) { emit(ARM_MOV_SI(tmp2[1], rd[1], SRTYPE_LSR, val), ctx); emit(ARM_ORR_SI(rd[1], tmp2[1], rd[0], SRTYPE_ASL, 32 - val), ctx); emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_ASR, val), ctx); } else if (val == 32) { emit(ARM_MOV_R(rd[1], rd[0]), ctx); emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_ASR, 31), ctx); } else { emit(ARM_MOV_SI(rd[1], rd[0], SRTYPE_ASR, val - 32), ctx); emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_ASR, 31), ctx); } arm_bpf_put_reg64(dst, rd, ctx); } static inline void emit_a32_mul_r64(const s8 dst[], const s8 src[], struct jit_ctx *ctx) { const s8 *tmp = bpf2a32[TMP_REG_1]; const s8 *tmp2 = bpf2a32[TMP_REG_2]; const s8 *rd, *rt; /* Setup operands for multiplication */ rd = arm_bpf_get_reg64(dst, tmp, ctx); rt = arm_bpf_get_reg64(src, tmp2, ctx); /* Do Multiplication */ emit(ARM_MUL(ARM_IP, rd[1], rt[0]), ctx); emit(ARM_MUL(ARM_LR, rd[0], rt[1]), ctx); emit(ARM_ADD_R(ARM_LR, ARM_IP, ARM_LR), ctx); emit(ARM_UMULL(ARM_IP, rd[0], rd[1], rt[1]), ctx); emit(ARM_ADD_R(rd[0], ARM_LR, rd[0]), ctx); arm_bpf_put_reg32(dst_lo, ARM_IP, ctx); arm_bpf_put_reg32(dst_hi, rd[0], ctx); } static bool is_ldst_imm(s16 off, const u8 size) { s16 off_max = 0; switch (size) { case BPF_B: case BPF_W: off_max = 0xfff; break; case BPF_H: off_max = 0xff; break; case BPF_DW: /* Need to make sure off+4 does not overflow. */ off_max = 0xfff - 4; break; } return -off_max <= off && off <= off_max; } /* *(size *)(dst + off) = src */ static inline void emit_str_r(const s8 dst, const s8 src[], s16 off, struct jit_ctx *ctx, const u8 sz){ const s8 *tmp = bpf2a32[TMP_REG_1]; s8 rd; rd = arm_bpf_get_reg32(dst, tmp[1], ctx); if (!is_ldst_imm(off, sz)) { emit_a32_mov_i(tmp[0], off, ctx); emit(ARM_ADD_R(tmp[0], tmp[0], rd), ctx); rd = tmp[0]; off = 0; } switch (sz) { case BPF_B: /* Store a Byte */ emit(ARM_STRB_I(src_lo, rd, off), ctx); break; case BPF_H: /* Store a HalfWord */ emit(ARM_STRH_I(src_lo, rd, off), ctx); break; case BPF_W: /* Store a Word */ emit(ARM_STR_I(src_lo, rd, off), ctx); break; case BPF_DW: /* Store a Double Word */ emit(ARM_STR_I(src_lo, rd, off), ctx); emit(ARM_STR_I(src_hi, rd, off + 4), ctx); break; } } /* dst = *(size*)(src + off) */ static inline void emit_ldx_r(const s8 dst[], const s8 src, s16 off, struct jit_ctx *ctx, const u8 sz){ const s8 *tmp = bpf2a32[TMP_REG_1]; const s8 *rd = is_stacked(dst_lo) ? tmp : dst; s8 rm = src; if (!is_ldst_imm(off, sz)) { emit_a32_mov_i(tmp[0], off, ctx); emit(ARM_ADD_R(tmp[0], tmp[0], src), ctx); rm = tmp[0]; off = 0; } else if (rd[1] == rm) { emit(ARM_MOV_R(tmp[0], rm), ctx); rm = tmp[0]; } switch (sz) { case BPF_B: /* Load a Byte */ emit(ARM_LDRB_I(rd[1], rm, off), ctx); if (!ctx->prog->aux->verifier_zext) emit_a32_mov_i(rd[0], 0, ctx); break; case BPF_H: /* Load a HalfWord */ emit(ARM_LDRH_I(rd[1], rm, off), ctx); if (!ctx->prog->aux->verifier_zext) emit_a32_mov_i(rd[0], 0, ctx); break; case BPF_W: /* Load a Word */ emit(ARM_LDR_I(rd[1], rm, off), ctx); if (!ctx->prog->aux->verifier_zext) emit_a32_mov_i(rd[0], 0, ctx); break; case BPF_DW: /* Load a Double Word */ emit(ARM_LDR_I(rd[1], rm, off), ctx); emit(ARM_LDR_I(rd[0], rm, off + 4), ctx); break; } arm_bpf_put_reg64(dst, rd, ctx); } /* Arithmatic Operation */ static inline void emit_ar_r(const u8 rd, const u8 rt, const u8 rm, const u8 rn, struct jit_ctx *ctx, u8 op, bool is_jmp64) { switch (op) { case BPF_JSET: if (is_jmp64) { emit(ARM_AND_R(ARM_IP, rt, rn), ctx); emit(ARM_AND_R(ARM_LR, rd, rm), ctx); emit(ARM_ORRS_R(ARM_IP, ARM_LR, ARM_IP), ctx); } else { emit(ARM_ANDS_R(ARM_IP, rt, rn), ctx); } break; case BPF_JEQ: case BPF_JNE: case BPF_JGT: case BPF_JGE: case BPF_JLE: case BPF_JLT: if (is_jmp64) { emit(ARM_CMP_R(rd, rm), ctx); /* Only compare low halve if high halve are equal. */ _emit(ARM_COND_EQ, ARM_CMP_R(rt, rn), ctx); } else { emit(ARM_CMP_R(rt, rn), ctx); } break; case BPF_JSLE: case BPF_JSGT: emit(ARM_CMP_R(rn, rt), ctx); if (is_jmp64) emit(ARM_SBCS_R(ARM_IP, rm, rd), ctx); break; case BPF_JSLT: case BPF_JSGE: emit(ARM_CMP_R(rt, rn), ctx); if (is_jmp64) emit(ARM_SBCS_R(ARM_IP, rd, rm), ctx); break; } } static int out_offset = -1; /* initialized on the first pass of build_body() */ static int emit_bpf_tail_call(struct jit_ctx *ctx) { /* bpf_tail_call(void *prog_ctx, struct bpf_array *array, u64 index) */ const s8 *r2 = bpf2a32[BPF_REG_2]; const s8 *r3 = bpf2a32[BPF_REG_3]; const s8 *tmp = bpf2a32[TMP_REG_1]; const s8 *tmp2 = bpf2a32[TMP_REG_2]; const s8 *tcc = bpf2a32[TCALL_CNT]; const s8 *tc; const int idx0 = ctx->idx; #define cur_offset (ctx->idx - idx0) #define jmp_offset (out_offset - (cur_offset) - 2) u32 lo, hi; s8 r_array, r_index; int off; /* if (index >= array->map.max_entries) * goto out; */ BUILD_BUG_ON(offsetof(struct bpf_array, map.max_entries) > ARM_INST_LDST__IMM12); off = offsetof(struct bpf_array, map.max_entries); r_array = arm_bpf_get_reg32(r2[1], tmp2[0], ctx); /* index is 32-bit for arrays */ r_index = arm_bpf_get_reg32(r3[1], tmp2[1], ctx); /* array->map.max_entries */ emit(ARM_LDR_I(tmp[1], r_array, off), ctx); /* index >= array->map.max_entries */ emit(ARM_CMP_R(r_index, tmp[1]), ctx); _emit(ARM_COND_CS, ARM_B(jmp_offset), ctx); /* tmp2[0] = array, tmp2[1] = index */ /* * if (tail_call_cnt >= MAX_TAIL_CALL_CNT) * goto out; * tail_call_cnt++; */ lo = (u32)MAX_TAIL_CALL_CNT; hi = (u32)((u64)MAX_TAIL_CALL_CNT >> 32); tc = arm_bpf_get_reg64(tcc, tmp, ctx); emit(ARM_CMP_I(tc[0], hi), ctx); _emit(ARM_COND_EQ, ARM_CMP_I(tc[1], lo), ctx); _emit(ARM_COND_CS, ARM_B(jmp_offset), ctx); emit(ARM_ADDS_I(tc[1], tc[1], 1), ctx); emit(ARM_ADC_I(tc[0], tc[0], 0), ctx); arm_bpf_put_reg64(tcc, tmp, ctx); /* prog = array->ptrs[index] * if (prog == NULL) * goto out; */ BUILD_BUG_ON(imm8m(offsetof(struct bpf_array, ptrs)) < 0); off = imm8m(offsetof(struct bpf_array, ptrs)); emit(ARM_ADD_I(tmp[1], r_array, off), ctx); emit(ARM_LDR_R_SI(tmp[1], tmp[1], r_index, SRTYPE_ASL, 2), ctx); emit(ARM_CMP_I(tmp[1], 0), ctx); _emit(ARM_COND_EQ, ARM_B(jmp_offset), ctx); /* goto *(prog->bpf_func + prologue_size); */ BUILD_BUG_ON(offsetof(struct bpf_prog, bpf_func) > ARM_INST_LDST__IMM12); off = offsetof(struct bpf_prog, bpf_func); emit(ARM_LDR_I(tmp[1], tmp[1], off), ctx); emit(ARM_ADD_I(tmp[1], tmp[1], ctx->prologue_bytes), ctx); emit_bx_r(tmp[1], ctx); /* out: */ if (out_offset == -1) out_offset = cur_offset; if (cur_offset != out_offset) { pr_err_once("tail_call out_offset = %d, expected %d!\n", cur_offset, out_offset); return -1; } return 0; #undef cur_offset #undef jmp_offset } /* 0xabcd => 0xcdab */ static inline void emit_rev16(const u8 rd, const u8 rn, struct jit_ctx *ctx) { #if __LINUX_ARM_ARCH__ < 6 const s8 *tmp2 = bpf2a32[TMP_REG_2]; emit(ARM_AND_I(tmp2[1], rn, 0xff), ctx); emit(ARM_MOV_SI(tmp2[0], rn, SRTYPE_LSR, 8), ctx); emit(ARM_AND_I(tmp2[0], tmp2[0], 0xff), ctx); emit(ARM_ORR_SI(rd, tmp2[0], tmp2[1], SRTYPE_LSL, 8), ctx); #else /* ARMv6+ */ emit(ARM_REV16(rd, rn), ctx); #endif } /* 0xabcdefgh => 0xghefcdab */ static inline void emit_rev32(const u8 rd, const u8 rn, struct jit_ctx *ctx) { #if __LINUX_ARM_ARCH__ < 6 const s8 *tmp2 = bpf2a32[TMP_REG_2]; emit(ARM_AND_I(tmp2[1], rn, 0xff), ctx); emit(ARM_MOV_SI(tmp2[0], rn, SRTYPE_LSR, 24), ctx); emit(ARM_ORR_SI(ARM_IP, tmp2[0], tmp2[1], SRTYPE_LSL, 24), ctx); emit(ARM_MOV_SI(tmp2[1], rn, SRTYPE_LSR, 8), ctx); emit(ARM_AND_I(tmp2[1], tmp2[1], 0xff), ctx); emit(ARM_MOV_SI(tmp2[0], rn, SRTYPE_LSR, 16), ctx); emit(ARM_AND_I(tmp2[0], tmp2[0], 0xff), ctx); emit(ARM_MOV_SI(tmp2[0], tmp2[0], SRTYPE_LSL, 8), ctx); emit(ARM_ORR_SI(tmp2[0], tmp2[0], tmp2[1], SRTYPE_LSL, 16), ctx); emit(ARM_ORR_R(rd, ARM_IP, tmp2[0]), ctx); #else /* ARMv6+ */ emit(ARM_REV(rd, rn), ctx); #endif } // push the scratch stack register on top of the stack static inline void emit_push_r64(const s8 src[], struct jit_ctx *ctx) { const s8 *tmp2 = bpf2a32[TMP_REG_2]; const s8 *rt; u16 reg_set = 0; rt = arm_bpf_get_reg64(src, tmp2, ctx); reg_set = (1 << rt[1]) | (1 << rt[0]); emit(ARM_PUSH(reg_set), ctx); } static void build_prologue(struct jit_ctx *ctx) { const s8 arm_r0 = bpf2a32[BPF_REG_0][1]; const s8 *bpf_r1 = bpf2a32[BPF_REG_1]; const s8 *bpf_fp = bpf2a32[BPF_REG_FP]; const s8 *tcc = bpf2a32[TCALL_CNT]; /* Save callee saved registers. */ #ifdef CONFIG_FRAME_POINTER u16 reg_set = CALLEE_PUSH_MASK | 1 << ARM_IP | 1 << ARM_PC; emit(ARM_MOV_R(ARM_IP, ARM_SP), ctx); emit(ARM_PUSH(reg_set), ctx); emit(ARM_SUB_I(ARM_FP, ARM_IP, 4), ctx); #else emit(ARM_PUSH(CALLEE_PUSH_MASK), ctx); emit(ARM_MOV_R(ARM_FP, ARM_SP), ctx); #endif /* mov r3, #0 */ /* sub r2, sp, #SCRATCH_SIZE */ emit(ARM_MOV_I(bpf_r1[0], 0), ctx); emit(ARM_SUB_I(bpf_r1[1], ARM_SP, SCRATCH_SIZE), ctx); ctx->stack_size = imm8m(STACK_SIZE); /* Set up function call stack */ emit(ARM_SUB_I(ARM_SP, ARM_SP, ctx->stack_size), ctx); /* Set up BPF prog stack base register */ emit_a32_mov_r64(true, bpf_fp, bpf_r1, ctx); /* Initialize Tail Count */ emit(ARM_MOV_I(bpf_r1[1], 0), ctx); emit_a32_mov_r64(true, tcc, bpf_r1, ctx); /* Move BPF_CTX to BPF_R1 */ emit(ARM_MOV_R(bpf_r1[1], arm_r0), ctx); /* end of prologue */ } /* restore callee saved registers. */ static void build_epilogue(struct jit_ctx *ctx) { #ifdef CONFIG_FRAME_POINTER /* When using frame pointers, some additional registers need to * be loaded. */ u16 reg_set = CALLEE_POP_MASK | 1 << ARM_SP; emit(ARM_SUB_I(ARM_SP, ARM_FP, hweight16(reg_set) * 4), ctx); emit(ARM_LDM(ARM_SP, reg_set), ctx); #else /* Restore callee saved registers. */ emit(ARM_MOV_R(ARM_SP, ARM_FP), ctx); emit(ARM_POP(CALLEE_POP_MASK), ctx); #endif } /* * Convert an eBPF instruction to native instruction, i.e * JITs an eBPF instruction. * Returns : * 0 - Successfully JITed an 8-byte eBPF instruction * >0 - Successfully JITed a 16-byte eBPF instruction * <0 - Failed to JIT. */ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) { const u8 code = insn->code; const s8 *dst = bpf2a32[insn->dst_reg]; const s8 *src = bpf2a32[insn->src_reg]; const s8 *tmp = bpf2a32[TMP_REG_1]; const s8 *tmp2 = bpf2a32[TMP_REG_2]; const s16 off = insn->off; const s32 imm = insn->imm; const int i = insn - ctx->prog->insnsi; const bool is64 = BPF_CLASS(code) == BPF_ALU64; const s8 *rd, *rs; s8 rd_lo, rt, rm, rn; s32 jmp_offset; #define check_imm(bits, imm) do { \ if ((imm) >= (1 << ((bits) - 1)) || \ (imm) < -(1 << ((bits) - 1))) { \ pr_info("[%2d] imm=%d(0x%x) out of range\n", \ i, imm, imm); \ return -EINVAL; \ } \ } while (0) #define check_imm24(imm) check_imm(24, imm) switch (code) { /* ALU operations */ /* dst = src */ case BPF_ALU | BPF_MOV | BPF_K: case BPF_ALU | BPF_MOV | BPF_X: case BPF_ALU64 | BPF_MOV | BPF_K: case BPF_ALU64 | BPF_MOV | BPF_X: switch (BPF_SRC(code)) { case BPF_X: if (imm == 1) { /* Special mov32 for zext */ emit_a32_mov_i(dst_hi, 0, ctx); break; } emit_a32_mov_r64(is64, dst, src, ctx); break; case BPF_K: /* Sign-extend immediate value to destination reg */ emit_a32_mov_se_i64(is64, dst, imm, ctx); break; } break; /* dst = dst + src/imm */ /* dst = dst - src/imm */ /* dst = dst | src/imm */ /* dst = dst & src/imm */ /* dst = dst ^ src/imm */ /* dst = dst * src/imm */ /* dst = dst << src */ /* dst = dst >> src */ case BPF_ALU | BPF_ADD | BPF_K: case BPF_ALU | BPF_ADD | BPF_X: case BPF_ALU | BPF_SUB | BPF_K: case BPF_ALU | BPF_SUB | BPF_X: case BPF_ALU | BPF_OR | BPF_K: case BPF_ALU | BPF_OR | BPF_X: case BPF_ALU | BPF_AND | BPF_K: case BPF_ALU | BPF_AND | BPF_X: case BPF_ALU | BPF_XOR | BPF_K: case BPF_ALU | BPF_XOR | BPF_X: case BPF_ALU | BPF_MUL | BPF_K: case BPF_ALU | BPF_MUL | BPF_X: case BPF_ALU | BPF_LSH | BPF_X: case BPF_ALU | BPF_RSH | BPF_X: case BPF_ALU | BPF_ARSH | BPF_X: case BPF_ALU64 | BPF_ADD | BPF_K: case BPF_ALU64 | BPF_ADD | BPF_X: case BPF_ALU64 | BPF_SUB | BPF_K: case BPF_ALU64 | BPF_SUB | BPF_X: case BPF_ALU64 | BPF_OR | BPF_K: case BPF_ALU64 | BPF_OR | BPF_X: case BPF_ALU64 | BPF_AND | BPF_K: case BPF_ALU64 | BPF_AND | BPF_X: case BPF_ALU64 | BPF_XOR | BPF_K: case BPF_ALU64 | BPF_XOR | BPF_X: switch (BPF_SRC(code)) { case BPF_X: emit_a32_alu_r64(is64, dst, src, ctx, BPF_OP(code)); break; case BPF_K: /* Move immediate value to the temporary register * and then do the ALU operation on the temporary * register as this will sign-extend the immediate * value into temporary reg and then it would be * safe to do the operation on it. */ emit_a32_mov_se_i64(is64, tmp2, imm, ctx); emit_a32_alu_r64(is64, dst, tmp2, ctx, BPF_OP(code)); break; } break; /* dst = dst / src(imm) */ /* dst = dst % src(imm) */ case BPF_ALU | BPF_DIV | BPF_K: case BPF_ALU | BPF_DIV | BPF_X: case BPF_ALU | BPF_MOD | BPF_K: case BPF_ALU | BPF_MOD | BPF_X: rd_lo = arm_bpf_get_reg32(dst_lo, tmp2[1], ctx); switch (BPF_SRC(code)) { case BPF_X: rt = arm_bpf_get_reg32(src_lo, tmp2[0], ctx); break; case BPF_K: rt = tmp2[0]; emit_a32_mov_i(rt, imm, ctx); break; default: rt = src_lo; break; } emit_udivmod(rd_lo, rd_lo, rt, ctx, BPF_OP(code)); arm_bpf_put_reg32(dst_lo, rd_lo, ctx); if (!ctx->prog->aux->verifier_zext) emit_a32_mov_i(dst_hi, 0, ctx); break; case BPF_ALU64 | BPF_DIV | BPF_K: case BPF_ALU64 | BPF_DIV | BPF_X: case BPF_ALU64 | BPF_MOD | BPF_K: case BPF_ALU64 | BPF_MOD | BPF_X: goto notyet; /* dst = dst << imm */ /* dst = dst >> imm */ /* dst = dst >> imm (signed) */ case BPF_ALU | BPF_LSH | BPF_K: case BPF_ALU | BPF_RSH | BPF_K: case BPF_ALU | BPF_ARSH | BPF_K: if (unlikely(imm > 31)) return -EINVAL; if (imm) emit_a32_alu_i(dst_lo, imm, ctx, BPF_OP(code)); if (!ctx->prog->aux->verifier_zext) emit_a32_mov_i(dst_hi, 0, ctx); break; /* dst = dst << imm */ case BPF_ALU64 | BPF_LSH | BPF_K: if (unlikely(imm > 63)) return -EINVAL; emit_a32_lsh_i64(dst, imm, ctx); break; /* dst = dst >> imm */ case BPF_ALU64 | BPF_RSH | BPF_K: if (unlikely(imm > 63)) return -EINVAL; emit_a32_rsh_i64(dst, imm, ctx); break; /* dst = dst << src */ case BPF_ALU64 | BPF_LSH | BPF_X: emit_a32_lsh_r64(dst, src, ctx); break; /* dst = dst >> src */ case BPF_ALU64 | BPF_RSH | BPF_X: emit_a32_rsh_r64(dst, src, ctx); break; /* dst = dst >> src (signed) */ case BPF_ALU64 | BPF_ARSH | BPF_X: emit_a32_arsh_r64(dst, src, ctx); break; /* dst = dst >> imm (signed) */ case BPF_ALU64 | BPF_ARSH | BPF_K: if (unlikely(imm > 63)) return -EINVAL; emit_a32_arsh_i64(dst, imm, ctx); break; /* dst = ~dst */ case BPF_ALU | BPF_NEG: emit_a32_alu_i(dst_lo, 0, ctx, BPF_OP(code)); if (!ctx->prog->aux->verifier_zext) emit_a32_mov_i(dst_hi, 0, ctx); break; /* dst = ~dst (64 bit) */ case BPF_ALU64 | BPF_NEG: emit_a32_neg64(dst, ctx); break; /* dst = dst * src/imm */ case BPF_ALU64 | BPF_MUL | BPF_X: case BPF_ALU64 | BPF_MUL | BPF_K: switch (BPF_SRC(code)) { case BPF_X: emit_a32_mul_r64(dst, src, ctx); break; case BPF_K: /* Move immediate value to the temporary register * and then do the multiplication on it as this * will sign-extend the immediate value into temp * reg then it would be safe to do the operation * on it. */ emit_a32_mov_se_i64(is64, tmp2, imm, ctx); emit_a32_mul_r64(dst, tmp2, ctx); break; } break; /* dst = htole(dst) */ /* dst = htobe(dst) */ case BPF_ALU | BPF_END | BPF_FROM_LE: case BPF_ALU | BPF_END | BPF_FROM_BE: rd = arm_bpf_get_reg64(dst, tmp, ctx); if (BPF_SRC(code) == BPF_FROM_LE) goto emit_bswap_uxt; switch (imm) { case 16: emit_rev16(rd[1], rd[1], ctx); goto emit_bswap_uxt; case 32: emit_rev32(rd[1], rd[1], ctx); goto emit_bswap_uxt; case 64: emit_rev32(ARM_LR, rd[1], ctx); emit_rev32(rd[1], rd[0], ctx); emit(ARM_MOV_R(rd[0], ARM_LR), ctx); break; } goto exit; emit_bswap_uxt: switch (imm) { case 16: /* zero-extend 16 bits into 64 bits */ #if __LINUX_ARM_ARCH__ < 6 emit_a32_mov_i(tmp2[1], 0xffff, ctx); emit(ARM_AND_R(rd[1], rd[1], tmp2[1]), ctx); #else /* ARMv6+ */ emit(ARM_UXTH(rd[1], rd[1]), ctx); #endif if (!ctx->prog->aux->verifier_zext) emit(ARM_EOR_R(rd[0], rd[0], rd[0]), ctx); break; case 32: /* zero-extend 32 bits into 64 bits */ if (!ctx->prog->aux->verifier_zext) emit(ARM_EOR_R(rd[0], rd[0], rd[0]), ctx); break; case 64: /* nop */ break; } exit: arm_bpf_put_reg64(dst, rd, ctx); break; /* dst = imm64 */ case BPF_LD | BPF_IMM | BPF_DW: { u64 val = (u32)imm | (u64)insn[1].imm << 32; emit_a32_mov_i64(dst, val, ctx); return 1; } /* LDX: dst = *(size *)(src + off) */ case BPF_LDX | BPF_MEM | BPF_W: case BPF_LDX | BPF_MEM | BPF_H: case BPF_LDX | BPF_MEM | BPF_B: case BPF_LDX | BPF_MEM | BPF_DW: rn = arm_bpf_get_reg32(src_lo, tmp2[1], ctx); emit_ldx_r(dst, rn, off, ctx, BPF_SIZE(code)); break; /* speculation barrier */ case BPF_ST | BPF_NOSPEC: break; /* ST: *(size *)(dst + off) = imm */ case BPF_ST | BPF_MEM | BPF_W: case BPF_ST | BPF_MEM | BPF_H: case BPF_ST | BPF_MEM | BPF_B: case BPF_ST | BPF_MEM | BPF_DW: switch (BPF_SIZE(code)) { case BPF_DW: /* Sign-extend immediate value into temp reg */ emit_a32_mov_se_i64(true, tmp2, imm, ctx); break; case BPF_W: case BPF_H: case BPF_B: emit_a32_mov_i(tmp2[1], imm, ctx); break; } emit_str_r(dst_lo, tmp2, off, ctx, BPF_SIZE(code)); break; /* Atomic ops */ case BPF_STX | BPF_ATOMIC | BPF_W: case BPF_STX | BPF_ATOMIC | BPF_DW: goto notyet; /* STX: *(size *)(dst + off) = src */ case BPF_STX | BPF_MEM | BPF_W: case BPF_STX | BPF_MEM | BPF_H: case BPF_STX | BPF_MEM | BPF_B: case BPF_STX | BPF_MEM | BPF_DW: rs = arm_bpf_get_reg64(src, tmp2, ctx); emit_str_r(dst_lo, rs, off, ctx, BPF_SIZE(code)); break; /* PC += off if dst == src */ /* PC += off if dst > src */ /* PC += off if dst >= src */ /* PC += off if dst < src */ /* PC += off if dst <= src */ /* PC += off if dst != src */ /* PC += off if dst > src (signed) */ /* PC += off if dst >= src (signed) */ /* PC += off if dst < src (signed) */ /* PC += off if dst <= src (signed) */ /* PC += off if dst & src */ case BPF_JMP | BPF_JEQ | BPF_X: case BPF_JMP | BPF_JGT | BPF_X: case BPF_JMP | BPF_JGE | BPF_X: case BPF_JMP | BPF_JNE | BPF_X: case BPF_JMP | BPF_JSGT | BPF_X: case BPF_JMP | BPF_JSGE | BPF_X: case BPF_JMP | BPF_JSET | BPF_X: case BPF_JMP | BPF_JLE | BPF_X: case BPF_JMP | BPF_JLT | BPF_X: case BPF_JMP | BPF_JSLT | BPF_X: case BPF_JMP | BPF_JSLE | BPF_X: case BPF_JMP32 | BPF_JEQ | BPF_X: case BPF_JMP32 | BPF_JGT | BPF_X: case BPF_JMP32 | BPF_JGE | BPF_X: case BPF_JMP32 | BPF_JNE | BPF_X: case BPF_JMP32 | BPF_JSGT | BPF_X: case BPF_JMP32 | BPF_JSGE | BPF_X: case BPF_JMP32 | BPF_JSET | BPF_X: case BPF_JMP32 | BPF_JLE | BPF_X: case BPF_JMP32 | BPF_JLT | BPF_X: case BPF_JMP32 | BPF_JSLT | BPF_X: case BPF_JMP32 | BPF_JSLE | BPF_X: /* Setup source registers */ rm = arm_bpf_get_reg32(src_hi, tmp2[0], ctx); rn = arm_bpf_get_reg32(src_lo, tmp2[1], ctx); goto go_jmp; /* PC += off if dst == imm */ /* PC += off if dst > imm */ /* PC += off if dst >= imm */ /* PC += off if dst < imm */ /* PC += off if dst <= imm */ /* PC += off if dst != imm */ /* PC += off if dst > imm (signed) */ /* PC += off if dst >= imm (signed) */ /* PC += off if dst < imm (signed) */ /* PC += off if dst <= imm (signed) */ /* PC += off if dst & imm */ case BPF_JMP | BPF_JEQ | BPF_K: case BPF_JMP | BPF_JGT | BPF_K: case BPF_JMP | BPF_JGE | BPF_K: case BPF_JMP | BPF_JNE | BPF_K: case BPF_JMP | BPF_JSGT | BPF_K: case BPF_JMP | BPF_JSGE | BPF_K: case BPF_JMP | BPF_JSET | BPF_K: case BPF_JMP | BPF_JLT | BPF_K: case BPF_JMP | BPF_JLE | BPF_K: case BPF_JMP | BPF_JSLT | BPF_K: case BPF_JMP | BPF_JSLE | BPF_K: case BPF_JMP32 | BPF_JEQ | BPF_K: case BPF_JMP32 | BPF_JGT | BPF_K: case BPF_JMP32 | BPF_JGE | BPF_K: case BPF_JMP32 | BPF_JNE | BPF_K: case BPF_JMP32 | BPF_JSGT | BPF_K: case BPF_JMP32 | BPF_JSGE | BPF_K: case BPF_JMP32 | BPF_JSET | BPF_K: case BPF_JMP32 | BPF_JLT | BPF_K: case BPF_JMP32 | BPF_JLE | BPF_K: case BPF_JMP32 | BPF_JSLT | BPF_K: case BPF_JMP32 | BPF_JSLE | BPF_K: if (off == 0) break; rm = tmp2[0]; rn = tmp2[1]; /* Sign-extend immediate value */ emit_a32_mov_se_i64(true, tmp2, imm, ctx); go_jmp: /* Setup destination register */ rd = arm_bpf_get_reg64(dst, tmp, ctx); /* Check for the condition */ emit_ar_r(rd[0], rd[1], rm, rn, ctx, BPF_OP(code), BPF_CLASS(code) == BPF_JMP); /* Setup JUMP instruction */ jmp_offset = bpf2a32_offset(i+off, i, ctx); switch (BPF_OP(code)) { case BPF_JNE: case BPF_JSET: _emit(ARM_COND_NE, ARM_B(jmp_offset), ctx); break; case BPF_JEQ: _emit(ARM_COND_EQ, ARM_B(jmp_offset), ctx); break; case BPF_JGT: _emit(ARM_COND_HI, ARM_B(jmp_offset), ctx); break; case BPF_JGE: _emit(ARM_COND_CS, ARM_B(jmp_offset), ctx); break; case BPF_JSGT: _emit(ARM_COND_LT, ARM_B(jmp_offset), ctx); break; case BPF_JSGE: _emit(ARM_COND_GE, ARM_B(jmp_offset), ctx); break; case BPF_JLE: _emit(ARM_COND_LS, ARM_B(jmp_offset), ctx); break; case BPF_JLT: _emit(ARM_COND_CC, ARM_B(jmp_offset), ctx); break; case BPF_JSLT: _emit(ARM_COND_LT, ARM_B(jmp_offset), ctx); break; case BPF_JSLE: _emit(ARM_COND_GE, ARM_B(jmp_offset), ctx); break; } break; /* JMP OFF */ case BPF_JMP | BPF_JA: { if (off == 0) break; jmp_offset = bpf2a32_offset(i+off, i, ctx); check_imm24(jmp_offset); emit(ARM_B(jmp_offset), ctx); break; } /* tail call */ case BPF_JMP | BPF_TAIL_CALL: if (emit_bpf_tail_call(ctx)) return -EFAULT; break; /* function call */ case BPF_JMP | BPF_CALL: { const s8 *r0 = bpf2a32[BPF_REG_0]; const s8 *r1 = bpf2a32[BPF_REG_1]; const s8 *r2 = bpf2a32[BPF_REG_2]; const s8 *r3 = bpf2a32[BPF_REG_3]; const s8 *r4 = bpf2a32[BPF_REG_4]; const s8 *r5 = bpf2a32[BPF_REG_5]; const u32 func = (u32)__bpf_call_base + (u32)imm; emit_a32_mov_r64(true, r0, r1, ctx); emit_a32_mov_r64(true, r1, r2, ctx); emit_push_r64(r5, ctx); emit_push_r64(r4, ctx); emit_push_r64(r3, ctx); emit_a32_mov_i(tmp[1], func, ctx); emit_blx_r(tmp[1], ctx); emit(ARM_ADD_I(ARM_SP, ARM_SP, imm8m(24)), ctx); // callee clean break; } /* function return */ case BPF_JMP | BPF_EXIT: /* Optimization: when last instruction is EXIT * simply fallthrough to epilogue. */ if (i == ctx->prog->len - 1) break; jmp_offset = epilogue_offset(ctx); check_imm24(jmp_offset); emit(ARM_B(jmp_offset), ctx); break; notyet: pr_info_once("*** NOT YET: opcode %02x ***\n", code); return -EFAULT; default: pr_err_once("unknown opcode %02x\n", code); return -EINVAL; } if (ctx->flags & FLAG_IMM_OVERFLOW) /* * this instruction generated an overflow when * trying to access the literal pool, so * delegate this filter to the kernel interpreter. */ return -1; return 0; } static int build_body(struct jit_ctx *ctx) { const struct bpf_prog *prog = ctx->prog; unsigned int i; for (i = 0; i < prog->len; i++) { const struct bpf_insn *insn = &(prog->insnsi[i]); int ret; ret = build_insn(insn, ctx); /* It's used with loading the 64 bit immediate value. */ if (ret > 0) { i++; if (ctx->target == NULL) ctx->offsets[i] = ctx->idx; continue; } if (ctx->target == NULL) ctx->offsets[i] = ctx->idx; /* If unsuccesful, return with error code */ if (ret) return ret; } return 0; } static int validate_code(struct jit_ctx *ctx) { int i; for (i = 0; i < ctx->idx; i++) { if (ctx->target[i] == __opcode_to_mem_arm(ARM_INST_UDF)) return -1; } return 0; } bool bpf_jit_needs_zext(void) { return true; } struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) { struct bpf_prog *tmp, *orig_prog = prog; struct bpf_binary_header *header; bool tmp_blinded = false; struct jit_ctx ctx; unsigned int tmp_idx; unsigned int image_size; u8 *image_ptr; /* If BPF JIT was not enabled then we must fall back to * the interpreter. */ if (!prog->jit_requested) return orig_prog; /* If constant blinding was enabled and we failed during blinding * then we must fall back to the interpreter. Otherwise, we save * the new JITed code. */ tmp = bpf_jit_blind_constants(prog); if (IS_ERR(tmp)) return orig_prog; if (tmp != prog) { tmp_blinded = true; prog = tmp; } memset(&ctx, 0, sizeof(ctx)); ctx.prog = prog; ctx.cpu_architecture = cpu_architecture(); /* Not able to allocate memory for offsets[] , then * we must fall back to the interpreter */ ctx.offsets = kcalloc(prog->len, sizeof(int), GFP_KERNEL); if (ctx.offsets == NULL) { prog = orig_prog; goto out; } /* 1) fake pass to find in the length of the JITed code, * to compute ctx->offsets and other context variables * needed to compute final JITed code. * Also, calculate random starting pointer/start of JITed code * which is prefixed by random number of fault instructions. * * If the first pass fails then there is no chance of it * being successful in the second pass, so just fall back * to the interpreter. */ if (build_body(&ctx)) { prog = orig_prog; goto out_off; } tmp_idx = ctx.idx; build_prologue(&ctx); ctx.prologue_bytes = (ctx.idx - tmp_idx) * 4; ctx.epilogue_offset = ctx.idx; #if __LINUX_ARM_ARCH__ < 7 tmp_idx = ctx.idx; build_epilogue(&ctx); ctx.epilogue_bytes = (ctx.idx - tmp_idx) * 4; ctx.idx += ctx.imm_count; if (ctx.imm_count) { ctx.imms = kcalloc(ctx.imm_count, sizeof(u32), GFP_KERNEL); if (ctx.imms == NULL) { prog = orig_prog; goto out_off; } } #else /* there's nothing about the epilogue on ARMv7 */ build_epilogue(&ctx); #endif /* Now we can get the actual image size of the JITed arm code. * Currently, we are not considering the THUMB-2 instructions * for jit, although it can decrease the size of the image. * * As each arm instruction is of length 32bit, we are translating * number of JITed instructions into the size required to store these * JITed code. */ image_size = sizeof(u32) * ctx.idx; /* Now we know the size of the structure to make */ header = bpf_jit_binary_alloc(image_size, &image_ptr, sizeof(u32), jit_fill_hole); /* Not able to allocate memory for the structure then * we must fall back to the interpretation */ if (header == NULL) { prog = orig_prog; goto out_imms; } /* 2.) Actual pass to generate final JIT code */ ctx.target = (u32 *) image_ptr; ctx.idx = 0; build_prologue(&ctx); /* If building the body of the JITed code fails somehow, * we fall back to the interpretation. */ if (build_body(&ctx) < 0) { image_ptr = NULL; bpf_jit_binary_free(header); prog = orig_prog; goto out_imms; } build_epilogue(&ctx); /* 3.) Extra pass to validate JITed Code */ if (validate_code(&ctx)) { image_ptr = NULL; bpf_jit_binary_free(header); prog = orig_prog; goto out_imms; } flush_icache_range((u32)header, (u32)(ctx.target + ctx.idx)); if (bpf_jit_enable > 1) /* there are 2 passes here */ bpf_jit_dump(prog->len, image_size, 2, ctx.target); bpf_jit_binary_lock_ro(header); prog->bpf_func = (void *)ctx.target; prog->jited = 1; prog->jited_len = image_size; out_imms: #if __LINUX_ARM_ARCH__ < 7 if (ctx.imm_count) kfree(ctx.imms); #endif out_off: kfree(ctx.offsets); out: if (tmp_blinded) bpf_jit_prog_release_other(prog, prog == orig_prog ? tmp : orig_prog); return prog; }
linux-master
arch/arm/net/bpf_jit_32.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright STMicroelectronics, 2007. */ #include <linux/types.h> #include <linux/init.h> #include <linux/io.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach-types.h> /* * These are the only hard-coded address offsets we still have to use. */ #define NOMADIK_FSMC_BASE 0x10100000 /* FSMC registers */ #define NOMADIK_SDRAMC_BASE 0x10110000 /* SDRAM Controller */ #define NOMADIK_CLCDC_BASE 0x10120000 /* CLCD Controller */ #define NOMADIK_MDIF_BASE 0x10120000 /* MDIF */ #define NOMADIK_DMA0_BASE 0x10130000 /* DMA0 Controller */ #define NOMADIK_IC_BASE 0x10140000 /* Vectored Irq Controller */ #define NOMADIK_DMA1_BASE 0x10150000 /* DMA1 Controller */ #define NOMADIK_USB_BASE 0x10170000 /* USB-OTG conf reg base */ #define NOMADIK_CRYP_BASE 0x10180000 /* Crypto processor */ #define NOMADIK_SHA1_BASE 0x10190000 /* SHA-1 Processor */ #define NOMADIK_XTI_BASE 0x101A0000 /* XTI */ #define NOMADIK_RNG_BASE 0x101B0000 /* Random number generator */ #define NOMADIK_SRC_BASE 0x101E0000 /* SRC base */ #define NOMADIK_WDOG_BASE 0x101E1000 /* Watchdog */ #define NOMADIK_MTU0_BASE 0x101E2000 /* Multiple Timer 0 */ #define NOMADIK_MTU1_BASE 0x101E3000 /* Multiple Timer 1 */ #define NOMADIK_GPIO0_BASE 0x101E4000 /* GPIO0 */ #define NOMADIK_GPIO1_BASE 0x101E5000 /* GPIO1 */ #define NOMADIK_GPIO2_BASE 0x101E6000 /* GPIO2 */ #define NOMADIK_GPIO3_BASE 0x101E7000 /* GPIO3 */ #define NOMADIK_RTC_BASE 0x101E8000 /* Real Time Clock base */ #define NOMADIK_PMU_BASE 0x101E9000 /* Power Management Unit */ #define NOMADIK_OWM_BASE 0x101EA000 /* One wire master */ #define NOMADIK_SCR_BASE 0x101EF000 /* Secure Control registers */ #define NOMADIK_MSP2_BASE 0x101F0000 /* MSP 2 interface */ #define NOMADIK_MSP1_BASE 0x101F1000 /* MSP 1 interface */ #define NOMADIK_UART2_BASE 0x101F2000 /* UART 2 interface */ #define NOMADIK_SSIRx_BASE 0x101F3000 /* SSI 8-ch rx interface */ #define NOMADIK_SSITx_BASE 0x101F4000 /* SSI 8-ch tx interface */ #define NOMADIK_MSHC_BASE 0x101F5000 /* Memory Stick(Pro) Host */ #define NOMADIK_SDI_BASE 0x101F6000 /* SD-card/MM-Card */ #define NOMADIK_I2C1_BASE 0x101F7000 /* I2C1 interface */ #define NOMADIK_I2C0_BASE 0x101F8000 /* I2C0 interface */ #define NOMADIK_MSP0_BASE 0x101F9000 /* MSP 0 interface */ #define NOMADIK_FIRDA_BASE 0x101FA000 /* FIrDA interface */ #define NOMADIK_UART1_BASE 0x101FB000 /* UART 1 interface */ #define NOMADIK_SSP_BASE 0x101FC000 /* SSP interface */ #define NOMADIK_UART0_BASE 0x101FD000 /* UART 0 interface */ #define NOMADIK_SGA_BASE 0x101FE000 /* SGA interface */ #define NOMADIK_L2CC_BASE 0x10210000 /* L2 Cache controller */ #define NOMADIK_UART1_VBASE 0xF01FB000 /* This is needed for LL-debug/earlyprintk/debug-macro.S */ static struct map_desc cpu8815_io_desc[] __initdata = { { .virtual = NOMADIK_UART1_VBASE, .pfn = __phys_to_pfn(NOMADIK_UART1_BASE), .length = SZ_4K, .type = MT_DEVICE, }, }; static void __init cpu8815_map_io(void) { iotable_init(cpu8815_io_desc, ARRAY_SIZE(cpu8815_io_desc)); } static void cpu8815_restart(enum reboot_mode mode, const char *cmd) { void __iomem *srcbase = ioremap(NOMADIK_SRC_BASE, SZ_4K); /* FIXME: use egpio when implemented */ /* Write anything to Reset status register */ writel(1, srcbase + 0x18); } static const char * cpu8815_board_compat[] = { "st,nomadik-nhk-15", "calaosystems,usb-s8815", NULL, }; DT_MACHINE_START(NOMADIK_DT, "Nomadik STn8815") .l2c_aux_val = 0, .l2c_aux_mask = ~0, .map_io = cpu8815_map_io, .restart = cpu8815_restart, .dt_compat = cpu8815_board_compat, MACHINE_END
linux-master
arch/arm/mach-nomadik/cpu-8815.c
// SPDX-License-Identifier: GPL-2.0-only /* * This file contains common code that is intended to be used across * boards so that it's not replicated. * * Copyright (C) 2011 Xilinx */ #include <linux/init.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/cpumask.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/clk/zynq.h> #include <linux/clocksource.h> #include <linux/of_address.h> #include <linux/of_clk.h> #include <linux/of_irq.h> #include <linux/of_platform.h> #include <linux/of.h> #include <linux/memblock.h> #include <linux/irqchip.h> #include <linux/irqchip/arm-gic.h> #include <linux/slab.h> #include <linux/sys_soc.h> #include <linux/pgtable.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/time.h> #include <asm/mach-types.h> #include <asm/page.h> #include <asm/smp_scu.h> #include <asm/system_info.h> #include <asm/hardware/cache-l2x0.h> #include "common.h" #define ZYNQ_DEVCFG_MCTRL 0x80 #define ZYNQ_DEVCFG_PS_VERSION_SHIFT 28 #define ZYNQ_DEVCFG_PS_VERSION_MASK 0xF void __iomem *zynq_scu_base; /** * zynq_memory_init - Initialize special memory * * We need to stop things allocating the low memory as DMA can't work in * the 1st 512K of memory. */ static void __init zynq_memory_init(void) { if (!__pa(PAGE_OFFSET)) memblock_reserve(__pa(PAGE_OFFSET), 0x80000); } static struct platform_device zynq_cpuidle_device = { .name = "cpuidle-zynq", }; /** * zynq_get_revision - Get Zynq silicon revision * * Return: Silicon version or -1 otherwise */ static int __init zynq_get_revision(void) { struct device_node *np; void __iomem *zynq_devcfg_base; u32 revision; np = of_find_compatible_node(NULL, NULL, "xlnx,zynq-devcfg-1.0"); if (!np) { pr_err("%s: no devcfg node found\n", __func__); return -1; } zynq_devcfg_base = of_iomap(np, 0); of_node_put(np); if (!zynq_devcfg_base) { pr_err("%s: Unable to map I/O memory\n", __func__); return -1; } revision = readl(zynq_devcfg_base + ZYNQ_DEVCFG_MCTRL); revision >>= ZYNQ_DEVCFG_PS_VERSION_SHIFT; revision &= ZYNQ_DEVCFG_PS_VERSION_MASK; iounmap(zynq_devcfg_base); return revision; } static void __init zynq_init_late(void) { zynq_core_pm_init(); zynq_pm_late_init(); } /** * zynq_init_machine - System specific initialization, intended to be * called from board specific initialization. */ static void __init zynq_init_machine(void) { struct soc_device_attribute *soc_dev_attr; struct soc_device *soc_dev; struct device *parent = NULL; soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL); if (!soc_dev_attr) goto out; system_rev = zynq_get_revision(); soc_dev_attr->family = kasprintf(GFP_KERNEL, "Xilinx Zynq"); soc_dev_attr->revision = kasprintf(GFP_KERNEL, "0x%x", system_rev); soc_dev_attr->soc_id = kasprintf(GFP_KERNEL, "0x%x", zynq_slcr_get_device_id()); soc_dev = soc_device_register(soc_dev_attr); if (IS_ERR(soc_dev)) { kfree(soc_dev_attr->family); kfree(soc_dev_attr->revision); kfree(soc_dev_attr->soc_id); kfree(soc_dev_attr); goto out; } parent = soc_device_to_device(soc_dev); out: /* * Finished with the static registrations now; fill in the missing * devices */ of_platform_default_populate(NULL, NULL, parent); platform_device_register(&zynq_cpuidle_device); } static void __init zynq_timer_init(void) { zynq_clock_init(); of_clk_init(NULL); timer_probe(); } static struct map_desc zynq_cortex_a9_scu_map __initdata = { .length = SZ_256, .type = MT_DEVICE, }; static void __init zynq_scu_map_io(void) { unsigned long base; base = scu_a9_get_base(); zynq_cortex_a9_scu_map.pfn = __phys_to_pfn(base); /* Expected address is in vmalloc area that's why simple assign here */ zynq_cortex_a9_scu_map.virtual = base; iotable_init(&zynq_cortex_a9_scu_map, 1); zynq_scu_base = (void __iomem *)base; BUG_ON(!zynq_scu_base); } /** * zynq_map_io - Create memory mappings needed for early I/O. */ static void __init zynq_map_io(void) { debug_ll_io_init(); zynq_scu_map_io(); } static void __init zynq_irq_init(void) { zynq_early_slcr_init(); irqchip_init(); } static const char * const zynq_dt_match[] = { "xlnx,zynq-7000", NULL }; DT_MACHINE_START(XILINX_EP107, "Xilinx Zynq Platform") /* 64KB way size, 8-way associativity, parity disabled */ .l2c_aux_val = 0x00400000, .l2c_aux_mask = 0xffbfffff, .smp = smp_ops(zynq_smp_ops), .map_io = zynq_map_io, .init_irq = zynq_irq_init, .init_machine = zynq_init_machine, .init_late = zynq_init_late, .init_time = zynq_timer_init, .dt_compat = zynq_dt_match, .reserve = zynq_memory_init, MACHINE_END
linux-master
arch/arm/mach-zynq/common.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Xilinx SLCR driver * * Copyright (c) 2011-2013 Xilinx Inc. */ #include <linux/io.h> #include <linux/reboot.h> #include <linux/mfd/syscon.h> #include <linux/of_address.h> #include <linux/regmap.h> #include <linux/clk/zynq.h> #include "common.h" /* register offsets */ #define SLCR_UNLOCK_OFFSET 0x8 /* SCLR unlock register */ #define SLCR_PS_RST_CTRL_OFFSET 0x200 /* PS Software Reset Control */ #define SLCR_A9_CPU_RST_CTRL_OFFSET 0x244 /* CPU Software Reset Control */ #define SLCR_REBOOT_STATUS_OFFSET 0x258 /* PS Reboot Status */ #define SLCR_PSS_IDCODE 0x530 /* PS IDCODE */ #define SLCR_L2C_RAM 0xA1C /* L2C_RAM in AR#54190 */ #define SLCR_UNLOCK_MAGIC 0xDF0D #define SLCR_A9_CPU_CLKSTOP 0x10 #define SLCR_A9_CPU_RST 0x1 #define SLCR_PSS_IDCODE_DEVICE_SHIFT 12 #define SLCR_PSS_IDCODE_DEVICE_MASK 0x1F static void __iomem *zynq_slcr_base; static struct regmap *zynq_slcr_regmap; /** * zynq_slcr_write - Write to a register in SLCR block * * @val: Value to write to the register * @offset: Register offset in SLCR block * * Return: a negative value on error, 0 on success */ static int zynq_slcr_write(u32 val, u32 offset) { return regmap_write(zynq_slcr_regmap, offset, val); } /** * zynq_slcr_read - Read a register in SLCR block * * @val: Pointer to value to be read from SLCR * @offset: Register offset in SLCR block * * Return: a negative value on error, 0 on success */ static int zynq_slcr_read(u32 *val, u32 offset) { return regmap_read(zynq_slcr_regmap, offset, val); } /** * zynq_slcr_unlock - Unlock SLCR registers * * Return: a negative value on error, 0 on success */ static inline int zynq_slcr_unlock(void) { zynq_slcr_write(SLCR_UNLOCK_MAGIC, SLCR_UNLOCK_OFFSET); return 0; } /** * zynq_slcr_get_device_id - Read device code id * * Return: Device code id */ u32 zynq_slcr_get_device_id(void) { u32 val; zynq_slcr_read(&val, SLCR_PSS_IDCODE); val >>= SLCR_PSS_IDCODE_DEVICE_SHIFT; val &= SLCR_PSS_IDCODE_DEVICE_MASK; return val; } /** * zynq_slcr_system_restart - Restart the entire system. * * @nb: Pointer to restart notifier block (unused) * @action: Reboot mode (unused) * @data: Restart handler private data (unused) * * Return: 0 always */ static int zynq_slcr_system_restart(struct notifier_block *nb, unsigned long action, void *data) { u32 reboot; /* * Clear 0x0F000000 bits of reboot status register to workaround * the FSBL not loading the bitstream after soft-reboot * This is a temporary solution until we know more. */ zynq_slcr_read(&reboot, SLCR_REBOOT_STATUS_OFFSET); zynq_slcr_write(reboot & 0xF0FFFFFF, SLCR_REBOOT_STATUS_OFFSET); zynq_slcr_write(1, SLCR_PS_RST_CTRL_OFFSET); return 0; } static struct notifier_block zynq_slcr_restart_nb = { .notifier_call = zynq_slcr_system_restart, .priority = 192, }; /** * zynq_slcr_cpu_start - Start cpu * @cpu: cpu number */ void zynq_slcr_cpu_start(int cpu) { u32 reg; zynq_slcr_read(&reg, SLCR_A9_CPU_RST_CTRL_OFFSET); reg &= ~(SLCR_A9_CPU_RST << cpu); zynq_slcr_write(reg, SLCR_A9_CPU_RST_CTRL_OFFSET); reg &= ~(SLCR_A9_CPU_CLKSTOP << cpu); zynq_slcr_write(reg, SLCR_A9_CPU_RST_CTRL_OFFSET); zynq_slcr_cpu_state_write(cpu, false); } /** * zynq_slcr_cpu_stop - Stop cpu * @cpu: cpu number */ void zynq_slcr_cpu_stop(int cpu) { u32 reg; zynq_slcr_read(&reg, SLCR_A9_CPU_RST_CTRL_OFFSET); reg |= (SLCR_A9_CPU_CLKSTOP | SLCR_A9_CPU_RST) << cpu; zynq_slcr_write(reg, SLCR_A9_CPU_RST_CTRL_OFFSET); } /** * zynq_slcr_cpu_state - Read/write cpu state * @cpu: cpu number * * SLCR_REBOOT_STATUS save upper 2 bits (31/30 cpu states for cpu0 and cpu1) * 0 means cpu is running, 1 cpu is going to die. * * Return: true if cpu is running, false if cpu is going to die */ bool zynq_slcr_cpu_state_read(int cpu) { u32 state; state = readl(zynq_slcr_base + SLCR_REBOOT_STATUS_OFFSET); state &= 1 << (31 - cpu); return !state; } /** * zynq_slcr_cpu_state - Read/write cpu state * @cpu: cpu number * @die: cpu state - true if cpu is going to die * * SLCR_REBOOT_STATUS save upper 2 bits (31/30 cpu states for cpu0 and cpu1) * 0 means cpu is running, 1 cpu is going to die. */ void zynq_slcr_cpu_state_write(int cpu, bool die) { u32 state, mask; state = readl(zynq_slcr_base + SLCR_REBOOT_STATUS_OFFSET); mask = 1 << (31 - cpu); if (die) state |= mask; else state &= ~mask; writel(state, zynq_slcr_base + SLCR_REBOOT_STATUS_OFFSET); } /** * zynq_early_slcr_init - Early slcr init function * * Return: 0 on success, negative errno otherwise. * * Called very early during boot from platform code to unlock SLCR. */ int __init zynq_early_slcr_init(void) { struct device_node *np; np = of_find_compatible_node(NULL, NULL, "xlnx,zynq-slcr"); if (!np) { pr_err("%s: no slcr node found\n", __func__); BUG(); } zynq_slcr_base = of_iomap(np, 0); if (!zynq_slcr_base) { pr_err("%s: Unable to map I/O memory\n", __func__); BUG(); } np->data = (__force void *)zynq_slcr_base; zynq_slcr_regmap = syscon_regmap_lookup_by_compatible("xlnx,zynq-slcr"); if (IS_ERR(zynq_slcr_regmap)) { pr_err("%s: failed to find zynq-slcr\n", __func__); of_node_put(np); return -ENODEV; } /* unlock the SLCR so that registers can be changed */ zynq_slcr_unlock(); /* See AR#54190 design advisory */ regmap_update_bits(zynq_slcr_regmap, SLCR_L2C_RAM, 0x70707, 0x20202); register_restart_handler(&zynq_slcr_restart_nb); pr_info("%pOFn mapped to %p\n", np, zynq_slcr_base); of_node_put(np); return 0; }
linux-master
arch/arm/mach-zynq/slcr.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Zynq power management * * Copyright (C) 2012 - 2014 Xilinx * * Sören Brinkmann <[email protected]> */ #include <linux/io.h> #include <linux/of.h> #include <linux/of_address.h> #include "common.h" /* register offsets */ #define DDRC_CTRL_REG1_OFFS 0x60 #define DDRC_DRAM_PARAM_REG3_OFFS 0x20 /* bitfields */ #define DDRC_CLOCKSTOP_MASK BIT(23) #define DDRC_SELFREFRESH_MASK BIT(12) static void __iomem *ddrc_base; /** * zynq_pm_ioremap() - Create IO mappings * @comp: DT compatible string * Return: Pointer to the mapped memory or NULL. * * Remap the memory region for a compatible DT node. */ static void __iomem *zynq_pm_ioremap(const char *comp) { struct device_node *np; void __iomem *base = NULL; np = of_find_compatible_node(NULL, NULL, comp); if (np) { base = of_iomap(np, 0); of_node_put(np); } else { pr_warn("%s: no compatible node found for '%s'\n", __func__, comp); } return base; } /** * zynq_pm_late_init() - Power management init * * Initialization of power management related features and infrastructure. */ void __init zynq_pm_late_init(void) { u32 reg; ddrc_base = zynq_pm_ioremap("xlnx,zynq-ddrc-a05"); if (!ddrc_base) { pr_warn("%s: Unable to map DDRC IO memory.\n", __func__); } else { /* * Enable DDRC clock stop feature. The HW takes care of * entering/exiting the correct mode depending * on activity state. */ reg = readl(ddrc_base + DDRC_DRAM_PARAM_REG3_OFFS); reg |= DDRC_CLOCKSTOP_MASK; writel(reg, ddrc_base + DDRC_DRAM_PARAM_REG3_OFFS); } }
linux-master
arch/arm/mach-zynq/pm.c
// SPDX-License-Identifier: GPL-2.0-only /* * This file contains Xilinx specific SMP code, used to start up * the second processor. * * Copyright (C) 2011-2013 Xilinx * * based on linux/arch/arm/mach-realview/platsmp.c * * Copyright (C) 2002 ARM Ltd. */ #include <linux/export.h> #include <linux/jiffies.h> #include <linux/init.h> #include <linux/io.h> #include <asm/cacheflush.h> #include <asm/smp_plat.h> #include <asm/smp_scu.h> #include <linux/irqchip/arm-gic.h> #include "common.h" /* * Store number of cores in the system * Because of scu_get_core_count() must be in __init section and can't * be called from zynq_cpun_start() because it is not in __init section. */ static int ncores; int zynq_cpun_start(u32 address, int cpu) { u32 trampoline_code_size = &zynq_secondary_trampoline_end - &zynq_secondary_trampoline; u32 phy_cpuid = cpu_logical_map(cpu); /* MS: Expectation that SLCR are directly map and accessible */ /* Not possible to jump to non aligned address */ if (!(address & 3) && (!address || (address >= trampoline_code_size))) { /* Store pointer to ioremap area which points to address 0x0 */ static u8 __iomem *zero; u32 trampoline_size = &zynq_secondary_trampoline_jump - &zynq_secondary_trampoline; zynq_slcr_cpu_stop(phy_cpuid); if (address) { if (__pa(PAGE_OFFSET)) { zero = ioremap(0, trampoline_code_size); if (!zero) { pr_warn("BOOTUP jump vectors not accessible\n"); return -1; } } else { zero = (__force u8 __iomem *)PAGE_OFFSET; } /* * This is elegant way how to jump to any address * 0x0: Load address at 0x8 to r0 * 0x4: Jump by mov instruction * 0x8: Jumping address */ memcpy_toio(zero, &zynq_secondary_trampoline, trampoline_size); writel(address, zero + trampoline_size); flush_cache_all(); outer_flush_range(0, trampoline_code_size); smp_wmb(); if (__pa(PAGE_OFFSET)) iounmap(zero); } zynq_slcr_cpu_start(phy_cpuid); return 0; } pr_warn("Can't start CPU%d: Wrong starting address %x\n", cpu, address); return -1; } EXPORT_SYMBOL(zynq_cpun_start); static int zynq_boot_secondary(unsigned int cpu, struct task_struct *idle) { return zynq_cpun_start(__pa_symbol(secondary_startup_arm), cpu); } /* * Initialise the CPU possible map early - this describes the CPUs * which may be present or become present in the system. */ static void __init zynq_smp_init_cpus(void) { int i; ncores = scu_get_core_count(zynq_scu_base); for (i = 0; i < ncores && i < CONFIG_NR_CPUS; i++) set_cpu_possible(i, true); } static void __init zynq_smp_prepare_cpus(unsigned int max_cpus) { scu_enable(zynq_scu_base); } /** * zynq_secondary_init - Initialize secondary CPU cores * @cpu: CPU that is initialized * * This function is in the hotplug path. Don't move it into the * init section!! */ static void zynq_secondary_init(unsigned int cpu) { zynq_core_pm_init(); } #ifdef CONFIG_HOTPLUG_CPU static int zynq_cpu_kill(unsigned cpu) { unsigned long timeout = jiffies + msecs_to_jiffies(50); while (zynq_slcr_cpu_state_read(cpu)) if (time_after(jiffies, timeout)) return 0; zynq_slcr_cpu_stop(cpu); return 1; } /** * zynq_cpu_die - Let a CPU core die * @cpu: Dying CPU * * Platform-specific code to shutdown a CPU. * Called with IRQs disabled on the dying CPU. */ static void zynq_cpu_die(unsigned int cpu) { zynq_slcr_cpu_state_write(cpu, true); /* * there is no power-control hardware on this platform, so all * we can do is put the core into WFI; this is safe as the calling * code will have already disabled interrupts */ for (;;) cpu_do_idle(); } #endif const struct smp_operations zynq_smp_ops __initconst = { .smp_init_cpus = zynq_smp_init_cpus, .smp_prepare_cpus = zynq_smp_prepare_cpus, .smp_boot_secondary = zynq_boot_secondary, .smp_secondary_init = zynq_secondary_init, #ifdef CONFIG_HOTPLUG_CPU .cpu_die = zynq_cpu_die, .cpu_kill = zynq_cpu_kill, #endif };
linux-master
arch/arm/mach-zynq/platsmp.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * arch/arm/mach-lpc32xx/common.c * * Author: Kevin Wells <[email protected]> * * Copyright (C) 2010 NXP Semiconductors */ #include <linux/init.h> #include <linux/soc/nxp/lpc32xx-misc.h> #include <asm/mach/map.h> #include <asm/system_info.h> #include "lpc32xx.h" #include "common.h" /* * Returns the unique ID for the device */ void lpc32xx_get_uid(u32 devid[4]) { int i; for (i = 0; i < 4; i++) devid[i] = __raw_readl(LPC32XX_CLKPWR_DEVID(i << 2)); } /* * Detects and returns IRAM size for the device variation */ #define LPC32XX_IRAM_BANK_SIZE SZ_128K static u32 iram_size; u32 lpc32xx_return_iram(void __iomem **mapbase, dma_addr_t *dmaaddr) { if (iram_size == 0) { u32 savedval1, savedval2; void __iomem *iramptr1, *iramptr2; iramptr1 = io_p2v(LPC32XX_IRAM_BASE); iramptr2 = io_p2v(LPC32XX_IRAM_BASE + LPC32XX_IRAM_BANK_SIZE); savedval1 = __raw_readl(iramptr1); savedval2 = __raw_readl(iramptr2); if (savedval1 == savedval2) { __raw_writel(savedval2 + 1, iramptr2); if (__raw_readl(iramptr1) == savedval2 + 1) iram_size = LPC32XX_IRAM_BANK_SIZE; else iram_size = LPC32XX_IRAM_BANK_SIZE * 2; __raw_writel(savedval2, iramptr2); } else iram_size = LPC32XX_IRAM_BANK_SIZE * 2; } if (dmaaddr) *dmaaddr = LPC32XX_IRAM_BASE; if (mapbase) *mapbase = io_p2v(LPC32XX_IRAM_BASE); return iram_size; } EXPORT_SYMBOL_GPL(lpc32xx_return_iram); void lpc32xx_set_phy_interface_mode(phy_interface_t mode) { u32 tmp = __raw_readl(LPC32XX_CLKPWR_MACCLK_CTRL); tmp &= ~LPC32XX_CLKPWR_MACCTRL_PINS_MSK; if (mode == PHY_INTERFACE_MODE_MII) tmp |= LPC32XX_CLKPWR_MACCTRL_USE_MII_PINS; else tmp |= LPC32XX_CLKPWR_MACCTRL_USE_RMII_PINS; __raw_writel(tmp, LPC32XX_CLKPWR_MACCLK_CTRL); } EXPORT_SYMBOL_GPL(lpc32xx_set_phy_interface_mode); static struct map_desc lpc32xx_io_desc[] __initdata = { { .virtual = (unsigned long)IO_ADDRESS(LPC32XX_AHB0_START), .pfn = __phys_to_pfn(LPC32XX_AHB0_START), .length = LPC32XX_AHB0_SIZE, .type = MT_DEVICE }, { .virtual = (unsigned long)IO_ADDRESS(LPC32XX_AHB1_START), .pfn = __phys_to_pfn(LPC32XX_AHB1_START), .length = LPC32XX_AHB1_SIZE, .type = MT_DEVICE }, { .virtual = (unsigned long)IO_ADDRESS(LPC32XX_FABAPB_START), .pfn = __phys_to_pfn(LPC32XX_FABAPB_START), .length = LPC32XX_FABAPB_SIZE, .type = MT_DEVICE }, { .virtual = (unsigned long)IO_ADDRESS(LPC32XX_IRAM_BASE), .pfn = __phys_to_pfn(LPC32XX_IRAM_BASE), .length = (LPC32XX_IRAM_BANK_SIZE * 2), .type = MT_DEVICE }, }; void __init lpc32xx_map_io(void) { iotable_init(lpc32xx_io_desc, ARRAY_SIZE(lpc32xx_io_desc)); } static int __init lpc32xx_check_uid(void) { u32 uid[4]; lpc32xx_get_uid(uid); printk(KERN_INFO "LPC32XX unique ID: %08x%08x%08x%08x\n", uid[3], uid[2], uid[1], uid[0]); if (!system_serial_low && !system_serial_high) { system_serial_low = uid[0]; system_serial_high = uid[1]; } return 1; } arch_initcall(lpc32xx_check_uid);
linux-master
arch/arm/mach-lpc32xx/common.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * arch/arm/mach-lpc32xx/serial.c * * Author: Kevin Wells <[email protected]> * * Copyright (C) 2010 NXP Semiconductors */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/serial.h> #include <linux/serial_core.h> #include <linux/serial_reg.h> #include <linux/serial_8250.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/soc/nxp/lpc32xx-misc.h> #include "lpc32xx.h" #include "common.h" #define LPC32XX_SUART_FIFO_SIZE 64 struct uartinit { char *uart_ck_name; u32 ck_mode_mask; void __iomem *pdiv_clk_reg; resource_size_t mapbase; }; static struct uartinit uartinit_data[] __initdata = { { .uart_ck_name = "uart5_ck", .ck_mode_mask = LPC32XX_UART_CLKMODE_LOAD(LPC32XX_UART_CLKMODE_ON, 5), .pdiv_clk_reg = LPC32XX_CLKPWR_UART5_CLK_CTRL, .mapbase = LPC32XX_UART5_BASE, }, { .uart_ck_name = "uart3_ck", .ck_mode_mask = LPC32XX_UART_CLKMODE_LOAD(LPC32XX_UART_CLKMODE_ON, 3), .pdiv_clk_reg = LPC32XX_CLKPWR_UART3_CLK_CTRL, .mapbase = LPC32XX_UART3_BASE, }, { .uart_ck_name = "uart4_ck", .ck_mode_mask = LPC32XX_UART_CLKMODE_LOAD(LPC32XX_UART_CLKMODE_ON, 4), .pdiv_clk_reg = LPC32XX_CLKPWR_UART4_CLK_CTRL, .mapbase = LPC32XX_UART4_BASE, }, { .uart_ck_name = "uart6_ck", .ck_mode_mask = LPC32XX_UART_CLKMODE_LOAD(LPC32XX_UART_CLKMODE_ON, 6), .pdiv_clk_reg = LPC32XX_CLKPWR_UART6_CLK_CTRL, .mapbase = LPC32XX_UART6_BASE, }, }; /* LPC3250 Errata HSUART.1: Hang workaround via loopback mode on inactivity */ void lpc32xx_loopback_set(resource_size_t mapbase, int state) { int bit; u32 tmp; switch (mapbase) { case LPC32XX_HS_UART1_BASE: bit = 0; break; case LPC32XX_HS_UART2_BASE: bit = 1; break; case LPC32XX_HS_UART7_BASE: bit = 6; break; default: WARN(1, "lpc32xx_hs: Warning: Unknown port at %08x\n", mapbase); return; } tmp = readl(LPC32XX_UARTCTL_CLOOP); if (state) tmp |= (1 << bit); else tmp &= ~(1 << bit); writel(tmp, LPC32XX_UARTCTL_CLOOP); } EXPORT_SYMBOL_GPL(lpc32xx_loopback_set); void __init lpc32xx_serial_init(void) { u32 tmp, clkmodes = 0; struct clk *clk; unsigned int puart; int i, j; for (i = 0; i < ARRAY_SIZE(uartinit_data); i++) { clk = clk_get(NULL, uartinit_data[i].uart_ck_name); if (!IS_ERR(clk)) { clk_enable(clk); } /* Setup UART clock modes for all UARTs, disable autoclock */ clkmodes |= uartinit_data[i].ck_mode_mask; /* pre-UART clock divider set to 1 */ __raw_writel(0x0101, uartinit_data[i].pdiv_clk_reg); /* * Force a flush of the RX FIFOs to work around a * HW bug */ puart = uartinit_data[i].mapbase; __raw_writel(0xC1, LPC32XX_UART_IIR_FCR(puart)); __raw_writel(0x00, LPC32XX_UART_DLL_FIFO(puart)); j = LPC32XX_SUART_FIFO_SIZE; while (j--) tmp = __raw_readl( LPC32XX_UART_DLL_FIFO(puart)); __raw_writel(0, LPC32XX_UART_IIR_FCR(puart)); } /* This needs to be done after all UART clocks are setup */ __raw_writel(clkmodes, LPC32XX_UARTCTL_CLKMODE); for (i = 0; i < ARRAY_SIZE(uartinit_data); i++) { /* Force a flush of the RX FIFOs to work around a HW bug */ puart = uartinit_data[i].mapbase; __raw_writel(0xC1, LPC32XX_UART_IIR_FCR(puart)); __raw_writel(0x00, LPC32XX_UART_DLL_FIFO(puart)); j = LPC32XX_SUART_FIFO_SIZE; while (j--) tmp = __raw_readl(LPC32XX_UART_DLL_FIFO(puart)); __raw_writel(0, LPC32XX_UART_IIR_FCR(puart)); } /* Disable IrDA pulsing support on UART6 */ tmp = __raw_readl(LPC32XX_UARTCTL_CTRL); tmp |= LPC32XX_UART_UART6_IRDAMOD_BYPASS; __raw_writel(tmp, LPC32XX_UARTCTL_CTRL); /* Disable UART5->USB transparent mode or USB won't work */ tmp = __raw_readl(LPC32XX_UARTCTL_CTRL); tmp &= ~LPC32XX_UART_U5_ROUTE_TO_USB; __raw_writel(tmp, LPC32XX_UARTCTL_CTRL); }
linux-master
arch/arm/mach-lpc32xx/serial.c
// SPDX-License-Identifier: GPL-2.0-only /* * arch/arm/mach-lpc32xx/pm.c * * Original authors: Vitaly Wool, Dmitry Chigirev <[email protected]> * Modified by Kevin Wells <[email protected]> * * 2005 (c) MontaVista Software, Inc. */ /* * LPC32XX CPU and system power management * * The LPC32XX has three CPU modes for controlling system power: run, * direct-run, and halt modes. When switching between halt and run modes, * the CPU transistions through direct-run mode. For Linux, direct-run * mode is not used in normal operation. Halt mode is used when the * system is fully suspended. * * Run mode: * The ARM CPU clock (HCLK_PLL), HCLK bus clock, and PCLK bus clocks are * derived from the HCLK PLL. The HCLK and PCLK bus rates are divided from * the HCLK_PLL rate. Linux runs in this mode. * * Direct-run mode: * The ARM CPU clock, HCLK bus clock, and PCLK bus clocks are driven from * SYSCLK. SYSCLK is usually around 13MHz, but may vary based on SYSCLK * source or the frequency of the main oscillator. In this mode, the * HCLK_PLL can be safely enabled, changed, or disabled. * * Halt mode: * SYSCLK is gated off and the CPU and system clocks are halted. * Peripherals based on the 32KHz oscillator clock (ie, RTC, touch, * key scanner, etc.) still operate if enabled. In this state, an enabled * system event (ie, GPIO state change, RTC match, key press, etc.) will * wake the system up back into direct-run mode. * * DRAM refresh * DRAM clocking and refresh are slightly different for systems with DDR * DRAM or regular SDRAM devices. If SDRAM is used in the system, the * SDRAM will still be accessible in direct-run mode. In DDR based systems, * a transition to direct-run mode will stop all DDR accesses (no clocks). * Because of this, the code to switch power modes and the code to enter * and exit DRAM self-refresh modes must not be executed in DRAM. A small * section of IRAM is used instead for this. * * Suspend is handled with the following logic: * Backup a small area of IRAM used for the suspend code * Copy suspend code to IRAM * Transfer control to code in IRAM * Places DRAMs in self-refresh mode * Enter direct-run mode * Save state of HCLK_PLL PLL * Disable HCLK_PLL PLL * Enter halt mode - CPU and buses will stop * System enters direct-run mode when an enabled event occurs * HCLK PLL state is restored * Run mode is entered * DRAMS are placed back into normal mode * Code execution returns from IRAM * IRAM code are used for suspend is restored * Suspend mode is exited */ #include <linux/suspend.h> #include <linux/io.h> #include <linux/slab.h> #include <asm/cacheflush.h> #include "lpc32xx.h" #include "common.h" #define TEMP_IRAM_AREA IO_ADDRESS(LPC32XX_IRAM_BASE) /* * Both STANDBY and MEM suspend states are handled the same with no * loss of CPU or memory state */ static int lpc32xx_pm_enter(suspend_state_t state) { int (*lpc32xx_suspend_ptr) (void); void *iram_swap_area; /* Allocate some space for temporary IRAM storage */ iram_swap_area = kmemdup((void *)TEMP_IRAM_AREA, lpc32xx_sys_suspend_sz, GFP_KERNEL); if (!iram_swap_area) return -ENOMEM; /* * Copy code to suspend system into IRAM. The suspend code * needs to run from IRAM as DRAM may no longer be available * when the PLL is stopped. */ memcpy((void *) TEMP_IRAM_AREA, &lpc32xx_sys_suspend, lpc32xx_sys_suspend_sz); flush_icache_range((unsigned long)TEMP_IRAM_AREA, (unsigned long)(TEMP_IRAM_AREA) + lpc32xx_sys_suspend_sz); /* Transfer to suspend code in IRAM */ lpc32xx_suspend_ptr = (void *) TEMP_IRAM_AREA; flush_cache_all(); (void) lpc32xx_suspend_ptr(); /* Restore original IRAM contents */ memcpy((void *) TEMP_IRAM_AREA, iram_swap_area, lpc32xx_sys_suspend_sz); kfree(iram_swap_area); return 0; } static const struct platform_suspend_ops lpc32xx_pm_ops = { .valid = suspend_valid_only_mem, .enter = lpc32xx_pm_enter, }; #define EMC_DYN_MEM_CTRL_OFS 0x20 #define EMC_SRMMC (1 << 3) #define EMC_CTRL_REG io_p2v(LPC32XX_EMC_BASE + EMC_DYN_MEM_CTRL_OFS) static int __init lpc32xx_pm_init(void) { /* * Setup SDRAM self-refresh clock to automatically disable o * start of self-refresh. This only needs to be done once. */ __raw_writel(__raw_readl(EMC_CTRL_REG) | EMC_SRMMC, EMC_CTRL_REG); suspend_set_ops(&lpc32xx_pm_ops); return 0; } arch_initcall(lpc32xx_pm_init);
linux-master
arch/arm/mach-lpc32xx/pm.c
// SPDX-License-Identifier: GPL-2.0+ /* * Platform support for LPC32xx SoC * * Author: Kevin Wells <[email protected]> * * Copyright (C) 2012 Roland Stigge <[email protected]> * Copyright (C) 2010 NXP Semiconductors */ #include <linux/amba/pl08x.h> #include <linux/mtd/lpc32xx_mlc.h> #include <linux/mtd/lpc32xx_slc.h> #include <linux/of_platform.h> #include <asm/mach/arch.h> #include "common.h" static struct pl08x_channel_data pl08x_slave_channels[] = { { .bus_id = "nand-slc", .min_signal = 1, /* SLC NAND Flash */ .max_signal = 1, .periph_buses = PL08X_AHB1, }, { .bus_id = "nand-mlc", .min_signal = 12, /* MLC NAND Flash */ .max_signal = 12, .periph_buses = PL08X_AHB1, }, }; static int pl08x_get_signal(const struct pl08x_channel_data *cd) { return cd->min_signal; } static void pl08x_put_signal(const struct pl08x_channel_data *cd, int ch) { } static struct pl08x_platform_data pl08x_pd = { /* Some reasonable memcpy defaults */ .memcpy_burst_size = PL08X_BURST_SZ_256, .memcpy_bus_width = PL08X_BUS_WIDTH_32_BITS, .slave_channels = &pl08x_slave_channels[0], .num_slave_channels = ARRAY_SIZE(pl08x_slave_channels), .get_xfer_signal = pl08x_get_signal, .put_xfer_signal = pl08x_put_signal, .lli_buses = PL08X_AHB1, .mem_buses = PL08X_AHB1, }; static struct lpc32xx_slc_platform_data lpc32xx_slc_data = { .dma_filter = pl08x_filter_id, }; static struct lpc32xx_mlc_platform_data lpc32xx_mlc_data = { .dma_filter = pl08x_filter_id, }; static const struct of_dev_auxdata lpc32xx_auxdata_lookup[] __initconst = { OF_DEV_AUXDATA("arm,pl080", 0x31000000, "pl08xdmac", &pl08x_pd), OF_DEV_AUXDATA("nxp,lpc3220-slc", 0x20020000, "20020000.flash", &lpc32xx_slc_data), OF_DEV_AUXDATA("nxp,lpc3220-mlc", 0x200a8000, "200a8000.flash", &lpc32xx_mlc_data), { } }; static void __init lpc3250_machine_init(void) { lpc32xx_serial_init(); of_platform_default_populate(NULL, lpc32xx_auxdata_lookup, NULL); } static const char *const lpc32xx_dt_compat[] __initconst = { "nxp,lpc3220", "nxp,lpc3230", "nxp,lpc3240", "nxp,lpc3250", NULL }; DT_MACHINE_START(LPC32XX_DT, "LPC32XX SoC (Flattened Device Tree)") .atag_offset = 0x100, .map_io = lpc32xx_map_io, .init_machine = lpc3250_machine_init, .dt_compat = lpc32xx_dt_compat, MACHINE_END
linux-master
arch/arm/mach-lpc32xx/phy3250.c
// SPDX-License-Identifier: GPL-2.0 /* * Device Tree support for Marvell Berlin SoCs. * * Sebastian Hesselbarth <[email protected]> * * based on GPL'ed 2.6 kernel sources * (c) Marvell International Ltd. */ #include <asm/mach/arch.h> static const char * const berlin_dt_compat[] = { "marvell,berlin", NULL, }; DT_MACHINE_START(BERLIN_DT, "Marvell Berlin") .dt_compat = berlin_dt_compat, /* * with DT probing for L2CCs, berlin_init_machine can be removed. * Note: 88DE3005 (Armada 1500-mini) uses pl310 l2cc */ .l2c_aux_val = 0x30c00000, .l2c_aux_mask = 0xfeffffff, MACHINE_END
linux-master
arch/arm/mach-berlin/berlin.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2014 Marvell Technology Group Ltd. * * Antoine Ténart <[email protected]> */ #include <linux/io.h> #include <linux/delay.h> #include <linux/of.h> #include <linux/of_address.h> #include <asm/cacheflush.h> #include <asm/cp15.h> #include <asm/page.h> #include <asm/smp_plat.h> #include <asm/smp_scu.h> /* * There are two reset registers, one with self-clearing (SC) * reset and one with non-self-clearing reset (NON_SC). */ #define CPU_RESET_SC 0x00 #define CPU_RESET_NON_SC 0x20 #define RESET_VECT 0x00 #define SW_RESET_ADDR 0x94 extern u32 boot_inst; static void __iomem *cpu_ctrl; static inline void berlin_perform_reset_cpu(unsigned int cpu) { u32 val; val = readl(cpu_ctrl + CPU_RESET_NON_SC); val &= ~BIT(cpu_logical_map(cpu)); writel(val, cpu_ctrl + CPU_RESET_NON_SC); val |= BIT(cpu_logical_map(cpu)); writel(val, cpu_ctrl + CPU_RESET_NON_SC); } static int berlin_boot_secondary(unsigned int cpu, struct task_struct *idle) { if (!cpu_ctrl) return -EFAULT; /* * Reset the CPU, making it to execute the instruction in the reset * exception vector. */ berlin_perform_reset_cpu(cpu); return 0; } static void __init berlin_smp_prepare_cpus(unsigned int max_cpus) { struct device_node *np; void __iomem *scu_base; void __iomem *vectors_base; np = of_find_compatible_node(NULL, NULL, "arm,cortex-a9-scu"); scu_base = of_iomap(np, 0); of_node_put(np); if (!scu_base) return; np = of_find_compatible_node(NULL, NULL, "marvell,berlin-cpu-ctrl"); cpu_ctrl = of_iomap(np, 0); of_node_put(np); if (!cpu_ctrl) goto unmap_scu; vectors_base = ioremap(VECTORS_BASE, SZ_32K); if (!vectors_base) goto unmap_scu; scu_enable(scu_base); /* * Write the first instruction the CPU will execute after being reset * in the reset exception vector. */ writel(boot_inst, vectors_base + RESET_VECT); /* * Write the secondary startup address into the SW reset address * vector. This is used by boot_inst. */ writel(__pa_symbol(secondary_startup), vectors_base + SW_RESET_ADDR); iounmap(vectors_base); unmap_scu: iounmap(scu_base); } #ifdef CONFIG_HOTPLUG_CPU static void berlin_cpu_die(unsigned int cpu) { v7_exit_coherency_flush(louis); while (1) cpu_do_idle(); } static int berlin_cpu_kill(unsigned int cpu) { u32 val; val = readl(cpu_ctrl + CPU_RESET_NON_SC); val &= ~BIT(cpu_logical_map(cpu)); writel(val, cpu_ctrl + CPU_RESET_NON_SC); return 1; } #endif static const struct smp_operations berlin_smp_ops __initconst = { .smp_prepare_cpus = berlin_smp_prepare_cpus, .smp_boot_secondary = berlin_boot_secondary, #ifdef CONFIG_HOTPLUG_CPU .cpu_die = berlin_cpu_die, .cpu_kill = berlin_cpu_kill, #endif }; CPU_METHOD_OF_DECLARE(berlin_smp, "marvell,berlin-smp", &berlin_smp_ops);
linux-master
arch/arm/mach-berlin/platsmp.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2015 Mentor Graphics Corporation. * * vdsomunge - Host program which produces a shared object * architecturally specified to be usable by both soft- and hard-float * programs. * * The Procedure Call Standard for the ARM Architecture (ARM IHI * 0042E) says: * * 6.4.1 VFP and Base Standard Compatibility * * Code compiled for the VFP calling standard is compatible with * the base standard (and vice-versa) if no floating-point or * containerized vector arguments or results are used. * * And ELF for the ARM Architecture (ARM IHI 0044E) (Table 4-2) says: * * If both EF_ARM_ABI_FLOAT_XXXX bits are clear, conformance to the * base procedure-call standard is implied. * * The VDSO is built with -msoft-float, as with the rest of the ARM * kernel, and uses no floating point arguments or results. The build * process will produce a shared object that may or may not have the * EF_ARM_ABI_FLOAT_SOFT flag set (it seems to depend on the binutils * version; binutils starting with 2.24 appears to set it). The * EF_ARM_ABI_FLOAT_HARD flag should definitely not be set, and this * program will error out if it is. * * If the soft-float flag is set, this program clears it. That's all * it does. */ #include <elf.h> #include <errno.h> #include <fcntl.h> #include <stdarg.h> #include <stdbool.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/mman.h> #include <sys/stat.h> #include <sys/types.h> #include <unistd.h> #define swab16(x) \ ((((x) & 0x00ff) << 8) | \ (((x) & 0xff00) >> 8)) #define swab32(x) \ ((((x) & 0x000000ff) << 24) | \ (((x) & 0x0000ff00) << 8) | \ (((x) & 0x00ff0000) >> 8) | \ (((x) & 0xff000000) >> 24)) #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ #define HOST_ORDER ELFDATA2LSB #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ #define HOST_ORDER ELFDATA2MSB #endif /* Some of the ELF constants we'd like to use were added to <elf.h> * relatively recently. */ #ifndef EF_ARM_EABI_VER5 #define EF_ARM_EABI_VER5 0x05000000 #endif #ifndef EF_ARM_ABI_FLOAT_SOFT #define EF_ARM_ABI_FLOAT_SOFT 0x200 #endif #ifndef EF_ARM_ABI_FLOAT_HARD #define EF_ARM_ABI_FLOAT_HARD 0x400 #endif static int failed; static const char *argv0; static const char *outfile; static void fail(const char *fmt, ...) { va_list ap; failed = 1; fprintf(stderr, "%s: ", argv0); va_start(ap, fmt); vfprintf(stderr, fmt, ap); va_end(ap); exit(EXIT_FAILURE); } static void cleanup(void) { if (failed && outfile != NULL) unlink(outfile); } static Elf32_Word read_elf_word(Elf32_Word word, bool swap) { return swap ? swab32(word) : word; } static Elf32_Half read_elf_half(Elf32_Half half, bool swap) { return swap ? swab16(half) : half; } static void write_elf_word(Elf32_Word val, Elf32_Word *dst, bool swap) { *dst = swap ? swab32(val) : val; } int main(int argc, char **argv) { const Elf32_Ehdr *inhdr; bool clear_soft_float; const char *infile; Elf32_Word e_flags; const void *inbuf; struct stat stat; void *outbuf; bool swap; int outfd; int infd; atexit(cleanup); argv0 = argv[0]; if (argc != 3) fail("Usage: %s [infile] [outfile]\n", argv[0]); infile = argv[1]; outfile = argv[2]; infd = open(infile, O_RDONLY); if (infd < 0) fail("Cannot open %s: %s\n", infile, strerror(errno)); if (fstat(infd, &stat) != 0) fail("Failed stat for %s: %s\n", infile, strerror(errno)); inbuf = mmap(NULL, stat.st_size, PROT_READ, MAP_PRIVATE, infd, 0); if (inbuf == MAP_FAILED) fail("Failed to map %s: %s\n", infile, strerror(errno)); close(infd); inhdr = inbuf; if (memcmp(&inhdr->e_ident, ELFMAG, SELFMAG) != 0) fail("Not an ELF file\n"); if (inhdr->e_ident[EI_CLASS] != ELFCLASS32) fail("Unsupported ELF class\n"); swap = inhdr->e_ident[EI_DATA] != HOST_ORDER; if (read_elf_half(inhdr->e_type, swap) != ET_DYN) fail("Not a shared object\n"); if (read_elf_half(inhdr->e_machine, swap) != EM_ARM) fail("Unsupported architecture %#x\n", inhdr->e_machine); e_flags = read_elf_word(inhdr->e_flags, swap); if (EF_ARM_EABI_VERSION(e_flags) != EF_ARM_EABI_VER5) { fail("Unsupported EABI version %#x\n", EF_ARM_EABI_VERSION(e_flags)); } if (e_flags & EF_ARM_ABI_FLOAT_HARD) fail("Unexpected hard-float flag set in e_flags\n"); clear_soft_float = !!(e_flags & EF_ARM_ABI_FLOAT_SOFT); outfd = open(outfile, O_RDWR | O_CREAT | O_TRUNC, S_IRUSR | S_IWUSR); if (outfd < 0) fail("Cannot open %s: %s\n", outfile, strerror(errno)); if (ftruncate(outfd, stat.st_size) != 0) fail("Cannot truncate %s: %s\n", outfile, strerror(errno)); outbuf = mmap(NULL, stat.st_size, PROT_READ | PROT_WRITE, MAP_SHARED, outfd, 0); if (outbuf == MAP_FAILED) fail("Failed to map %s: %s\n", outfile, strerror(errno)); close(outfd); memcpy(outbuf, inbuf, stat.st_size); if (clear_soft_float) { Elf32_Ehdr *outhdr; outhdr = outbuf; e_flags &= ~EF_ARM_ABI_FLOAT_SOFT; write_elf_word(e_flags, &outhdr->e_flags, swap); } if (msync(outbuf, stat.st_size, MS_SYNC) != 0) fail("Failed to sync %s: %s\n", outfile, strerror(errno)); return EXIT_SUCCESS; }
linux-master
arch/arm/vdso/vdsomunge.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2012-2018 ARM Limited * * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text. * Here we can supply some information useful to userland. */ #include <linux/uts.h> #include <linux/version.h> #include <linux/elfnote.h> #include <linux/build-salt.h> ELFNOTE32("Linux", 0, LINUX_VERSION_CODE); BUILD_SALT;
linux-master
arch/arm/vdso/note.c
// SPDX-License-Identifier: GPL-2.0-only /* * ARM userspace implementations of gettimeofday() and similar. * * Copyright 2015 Mentor Graphics Corporation. */ #include <linux/time.h> #include <linux/types.h> #include <asm/vdso.h> #include <asm/unwind.h> int __vdso_clock_gettime(clockid_t clock, struct old_timespec32 *ts) { return __cvdso_clock_gettime32(clock, ts); } int __vdso_clock_gettime64(clockid_t clock, struct __kernel_timespec *ts) { return __cvdso_clock_gettime(clock, ts); } int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz) { return __cvdso_gettimeofday(tv, tz); } int __vdso_clock_getres(clockid_t clock_id, struct old_timespec32 *res) { return __cvdso_clock_getres_time32(clock_id, res); } /* Avoid unresolved references emitted by GCC */ void __aeabi_unwind_cpp_pr0(void) { } void __aeabi_unwind_cpp_pr1(void) { } void __aeabi_unwind_cpp_pr2(void) { }
linux-master
arch/arm/vdso/vgettimeofday.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2013 STMicroelectronics (R&D) Limited. * Author(s): Srinivas Kandagatla <[email protected]> */ #include <asm/hardware/cache-l2x0.h> #include <asm/mach/arch.h> #include "smp.h" static const char *const stih41x_dt_match[] __initconst = { "st,stih415", "st,stih416", "st,stih407", "st,stih410", "st,stih418", NULL }; DT_MACHINE_START(STM, "STi SoC with Flattened Device Tree") .dt_compat = stih41x_dt_match, .l2c_aux_val = L2C_AUX_CTRL_SHARED_OVERRIDE | L310_AUX_CTRL_DATA_PREFETCH | L310_AUX_CTRL_INSTR_PREFETCH | L2C_AUX_CTRL_WAY_SIZE(4), .l2c_aux_mask = 0xc0000fff, .smp = smp_ops(sti_smp_ops), MACHINE_END
linux-master
arch/arm/mach-sti/board-dt.c
// SPDX-License-Identifier: GPL-2.0-only /* * arch/arm/mach-sti/platsmp.c * * Copyright (C) 2013 STMicroelectronics (R&D) Limited. * http://www.st.com * * Cloned from linux/arch/arm/mach-vexpress/platsmp.c * * Copyright (C) 2002 ARM Ltd. * All Rights Reserved */ #include <linux/init.h> #include <linux/errno.h> #include <linux/delay.h> #include <linux/smp.h> #include <linux/io.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/memblock.h> #include <asm/cacheflush.h> #include <asm/smp_plat.h> #include <asm/smp_scu.h> #include "smp.h" static u32 __iomem *cpu_strt_ptr; static int sti_boot_secondary(unsigned int cpu, struct task_struct *idle) { unsigned long entry_pa = __pa_symbol(secondary_startup); /* * Secondary CPU is initialised and started by a U-BOOTROM firmware. * Secondary CPU is spinning and waiting for a write at cpu_strt_ptr. * Writing secondary_startup address at cpu_strt_ptr makes it to * jump directly to secondary_startup(). */ __raw_writel(entry_pa, cpu_strt_ptr); /* wmb so that data is actually written before cache flush is done */ smp_wmb(); sync_cache_w(cpu_strt_ptr); return 0; } static void __init sti_smp_prepare_cpus(unsigned int max_cpus) { struct device_node *np; void __iomem *scu_base; u32 release_phys; int cpu; np = of_find_compatible_node(NULL, NULL, "arm,cortex-a9-scu"); if (np) { scu_base = of_iomap(np, 0); scu_enable(scu_base); of_node_put(np); } if (max_cpus <= 1) return; for_each_possible_cpu(cpu) { np = of_get_cpu_node(cpu, NULL); if (!np) continue; if (of_property_read_u32(np, "cpu-release-addr", &release_phys)) { pr_err("CPU %d: missing or invalid cpu-release-addr " "property\n", cpu); continue; } /* * cpu-release-addr is usually configured in SBC DMEM but can * also be in RAM. */ if (!memblock_is_memory(release_phys)) cpu_strt_ptr = ioremap(release_phys, sizeof(release_phys)); else cpu_strt_ptr = (u32 __iomem *)phys_to_virt(release_phys); set_cpu_possible(cpu, true); } } const struct smp_operations sti_smp_ops __initconst = { .smp_prepare_cpus = sti_smp_prepare_cpus, .smp_boot_secondary = sti_boot_secondary, };
linux-master
arch/arm/mach-sti/platsmp.c
// SPDX-License-Identifier: GPL-2.0 #define _LINUX_STRING_H_ #include <linux/compiler.h> /* for inline */ #include <linux/types.h> /* for size_t */ #include <linux/stddef.h> /* for NULL */ #include <linux/linkage.h> #include <asm/string.h> #include "misc.h" #define STATIC static #define STATIC_RW_DATA /* non-static please */ /* Diagnostic functions */ #ifdef DEBUG # define Assert(cond,msg) {if(!(cond)) error(msg);} # define Trace(x) fprintf x # define Tracev(x) {if (verbose) fprintf x ;} # define Tracevv(x) {if (verbose>1) fprintf x ;} # define Tracec(c,x) {if (verbose && (c)) fprintf x ;} # define Tracecv(c,x) {if (verbose>1 && (c)) fprintf x ;} #else # define Assert(cond,msg) # define Trace(x) # define Tracev(x) # define Tracevv(x) # define Tracec(c,x) # define Tracecv(c,x) #endif /* Not needed, but used in some headers pulled in by decompressors */ extern char * strstr(const char * s1, const char *s2); extern size_t strlen(const char *s); extern int strcmp(const char *cs, const char *ct); extern int memcmp(const void *cs, const void *ct, size_t count); extern char * strchrnul(const char *, int); #ifdef CONFIG_KERNEL_GZIP #include "../../../../lib/decompress_inflate.c" #endif #ifdef CONFIG_KERNEL_LZO #include "../../../../lib/decompress_unlzo.c" #endif #ifdef CONFIG_KERNEL_LZMA #include "../../../../lib/decompress_unlzma.c" #endif #ifdef CONFIG_KERNEL_XZ /* Prevent KASAN override of string helpers in decompressor */ #undef memmove #define memmove memmove #undef memcpy #define memcpy memcpy #include "../../../../lib/decompress_unxz.c" #endif #ifdef CONFIG_KERNEL_LZ4 #include "../../../../lib/decompress_unlz4.c" #endif int do_decompress(u8 *input, int len, u8 *output, void (*error)(char *x)) { return __decompress(input, len, NULL, NULL, output, 0, NULL, error); }
linux-master
arch/arm/boot/compressed/decompress.c
// SPDX-License-Identifier: GPL-2.0-only #include "../../../../lib/fdt.c"
linux-master
arch/arm/boot/compressed/fdt.c
// SPDX-License-Identifier: GPL-2.0-only #include "../../../../lib/fonts/font_acorn_8x8.c"
linux-master
arch/arm/boot/compressed/font.c
// SPDX-License-Identifier: GPL-2.0 /* * arch/arm/boot/compressed/string.c * * Small subset of simple string routines */ #define __NO_FORTIFY #include <linux/string.h> /* * The decompressor is built without KASan but uses the same redirects as the * rest of the kernel when CONFIG_KASAN is enabled, defining e.g. memcpy() * to __memcpy() but since we are not linking with the main kernel string * library in the decompressor, that will lead to link failures. * * Undefine KASan's versions, define the wrapped functions and alias them to * the right names so that when e.g. __memcpy() appear in the code, it will * still be linked to this local version of memcpy(). */ #ifdef CONFIG_KASAN #undef memcpy #undef memmove #undef memset void *__memcpy(void *__dest, __const void *__src, size_t __n) __alias(memcpy); void *__memmove(void *__dest, __const void *__src, size_t count) __alias(memmove); void *__memset(void *s, int c, size_t count) __alias(memset); #endif void *memcpy(void *__dest, __const void *__src, size_t __n) { int i = 0; unsigned char *d = (unsigned char *)__dest, *s = (unsigned char *)__src; for (i = __n >> 3; i > 0; i--) { *d++ = *s++; *d++ = *s++; *d++ = *s++; *d++ = *s++; *d++ = *s++; *d++ = *s++; *d++ = *s++; *d++ = *s++; } if (__n & 1 << 2) { *d++ = *s++; *d++ = *s++; *d++ = *s++; *d++ = *s++; } if (__n & 1 << 1) { *d++ = *s++; *d++ = *s++; } if (__n & 1) *d++ = *s++; return __dest; } void *memmove(void *__dest, __const void *__src, size_t count) { unsigned char *d = __dest; const unsigned char *s = __src; if (__dest == __src) return __dest; if (__dest < __src) return memcpy(__dest, __src, count); while (count--) d[count] = s[count]; return __dest; } size_t strlen(const char *s) { const char *sc = s; while (*sc != '\0') sc++; return sc - s; } size_t strnlen(const char *s, size_t count) { const char *sc; for (sc = s; count-- && *sc != '\0'; ++sc) /* nothing */; return sc - s; } int memcmp(const void *cs, const void *ct, size_t count) { const unsigned char *su1 = cs, *su2 = ct, *end = su1 + count; int res = 0; while (su1 < end) { res = *su1++ - *su2++; if (res) break; } return res; } int strcmp(const char *cs, const char *ct) { unsigned char c1, c2; int res = 0; do { c1 = *cs++; c2 = *ct++; res = c1 - c2; if (res) break; } while (c1); return res; } void *memchr(const void *s, int c, size_t count) { const unsigned char *p = s; while (count--) if ((unsigned char)c == *p++) return (void *)(p - 1); return NULL; } char *strchr(const char *s, int c) { while (*s != (char)c) if (*s++ == '\0') return NULL; return (char *)s; } char *strrchr(const char *s, int c) { const char *last = NULL; do { if (*s == (char)c) last = s; } while (*s++); return (char *)last; } #undef memset void *memset(void *s, int c, size_t count) { char *xs = s; while (count--) *xs++ = c; return s; }
linux-master
arch/arm/boot/compressed/string.c
// SPDX-License-Identifier: GPL-2.0-only #include <linux/kernel.h> #include <linux/libfdt.h> #include <linux/sizes.h> #include "misc.h" static const void *get_prop(const void *fdt, const char *node_path, const char *property, int minlen) { const void *prop; int offset, len; offset = fdt_path_offset(fdt, node_path); if (offset < 0) return NULL; prop = fdt_getprop(fdt, offset, property, &len); if (!prop || len < minlen) return NULL; return prop; } static uint32_t get_cells(const void *fdt, const char *name) { const fdt32_t *prop = get_prop(fdt, "/", name, sizeof(fdt32_t)); if (!prop) { /* default */ return 1; } return fdt32_ld(prop); } static uint64_t get_val(const fdt32_t *cells, uint32_t ncells) { uint64_t r; r = fdt32_ld(cells); if (ncells > 1) r = (r << 32) | fdt32_ld(cells + 1); return r; } /* * Check the start of physical memory * * Traditionally, the start address of physical memory is obtained by masking * the program counter. However, this does require that this address is a * multiple of 128 MiB, precluding booting Linux on platforms where this * requirement is not fulfilled. * Hence validate the calculated address against the memory information in the * DTB, and, if out-of-range, replace it by the real start address. * To preserve backwards compatibility (systems reserving a block of memory * at the start of physical memory, kdump, ...), the traditional method is * used if it yields a valid address, unless the "linux,usable-memory-range" * property is present. * * Return value: start address of physical memory to use */ uint32_t fdt_check_mem_start(uint32_t mem_start, const void *fdt) { uint32_t addr_cells, size_cells, usable_base, base; uint32_t fdt_mem_start = 0xffffffff; const fdt32_t *usable, *reg, *endp; uint64_t size, usable_end, end; const char *type; int offset, len; if (!fdt) return mem_start; if (fdt_magic(fdt) != FDT_MAGIC) return mem_start; /* There may be multiple cells on LPAE platforms */ addr_cells = get_cells(fdt, "#address-cells"); size_cells = get_cells(fdt, "#size-cells"); if (addr_cells > 2 || size_cells > 2) return mem_start; /* * Usable memory in case of a crash dump kernel * This property describes a limitation: memory within this range is * only valid when also described through another mechanism */ usable = get_prop(fdt, "/chosen", "linux,usable-memory-range", (addr_cells + size_cells) * sizeof(fdt32_t)); if (usable) { size = get_val(usable + addr_cells, size_cells); if (!size) return mem_start; if (addr_cells > 1 && fdt32_ld(usable)) { /* Outside 32-bit address space */ return mem_start; } usable_base = fdt32_ld(usable + addr_cells - 1); usable_end = usable_base + size; } /* Walk all memory nodes and regions */ for (offset = fdt_next_node(fdt, -1, NULL); offset >= 0; offset = fdt_next_node(fdt, offset, NULL)) { type = fdt_getprop(fdt, offset, "device_type", NULL); if (!type || strcmp(type, "memory")) continue; reg = fdt_getprop(fdt, offset, "linux,usable-memory", &len); if (!reg) reg = fdt_getprop(fdt, offset, "reg", &len); if (!reg) continue; for (endp = reg + (len / sizeof(fdt32_t)); endp - reg >= addr_cells + size_cells; reg += addr_cells + size_cells) { size = get_val(reg + addr_cells, size_cells); if (!size) continue; if (addr_cells > 1 && fdt32_ld(reg)) { /* Outside 32-bit address space, skipping */ continue; } base = fdt32_ld(reg + addr_cells - 1); end = base + size; if (usable) { /* * Clip to usable range, which takes precedence * over mem_start */ if (base < usable_base) base = usable_base; if (end > usable_end) end = usable_end; if (end <= base) continue; } else if (mem_start >= base && mem_start < end) { /* Calculated address is valid, use it */ return mem_start; } if (base < fdt_mem_start) fdt_mem_start = base; } } if (fdt_mem_start == 0xffffffff) { /* No usable memory found, falling back to default */ return mem_start; } /* * The calculated address is not usable, or was overridden by the * "linux,usable-memory-range" property. * Use the lowest usable physical memory address from the DTB instead, * and make sure this is a multiple of 2 MiB for phys/virt patching. */ return round_up(fdt_mem_start, SZ_2M); }
linux-master
arch/arm/boot/compressed/fdt_check_mem_start.c
// SPDX-License-Identifier: GPL-2.0-only #include "../../../../lib/fdt_wip.c"
linux-master
arch/arm/boot/compressed/fdt_wip.c
// SPDX-License-Identifier: GPL-2.0-only #include "../../../../lib/fdt_rw.c"
linux-master
arch/arm/boot/compressed/fdt_rw.c
// SPDX-License-Identifier: GPL-2.0 /* * misc.c * * This is a collection of several routines from gzip-1.0.3 * adapted for Linux. * * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994 * * Modified for ARM Linux by Russell King * * Nicolas Pitre <[email protected]> 1999/04/14 : * For this code to run directly from Flash, all constant variables must * be marked with 'const' and all other variables initialized at run-time * only. This way all non constant variables will end up in the bss segment, * which should point to addresses in RAM and cleared to 0 on start. * This allows for a much quicker boot time. */ unsigned int __machine_arch_type; #include <linux/compiler.h> /* for inline */ #include <linux/types.h> #include <linux/linkage.h> #include "misc.h" #ifdef CONFIG_ARCH_EP93XX #include "misc-ep93xx.h" #endif static void putstr(const char *ptr); #include CONFIG_UNCOMPRESS_INCLUDE #ifdef CONFIG_DEBUG_ICEDCC #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) || defined(CONFIG_CPU_V7) static void icedcc_putc(int ch) { int status, i = 0x4000000; do { if (--i < 0) return; asm volatile ("mrc p14, 0, %0, c0, c1, 0" : "=r" (status)); } while (status & (1 << 29)); asm("mcr p14, 0, %0, c0, c5, 0" : : "r" (ch)); } #elif defined(CONFIG_CPU_XSCALE) static void icedcc_putc(int ch) { int status, i = 0x4000000; do { if (--i < 0) return; asm volatile ("mrc p14, 0, %0, c14, c0, 0" : "=r" (status)); } while (status & (1 << 28)); asm("mcr p14, 0, %0, c8, c0, 0" : : "r" (ch)); } #else static void icedcc_putc(int ch) { int status, i = 0x4000000; do { if (--i < 0) return; asm volatile ("mrc p14, 0, %0, c0, c0, 0" : "=r" (status)); } while (status & 2); asm("mcr p14, 0, %0, c1, c0, 0" : : "r" (ch)); } #endif #define putc(ch) icedcc_putc(ch) #endif static void putstr(const char *ptr) { char c; while ((c = *ptr++) != '\0') { if (c == '\n') putc('\r'); putc(c); } flush(); } /* * gzip declarations */ unsigned char *output_data; unsigned long free_mem_ptr; unsigned long free_mem_end_ptr; #ifndef arch_error #define arch_error(x) #endif void error(char *x) { arch_error(x); putstr("\n\n"); putstr(x); putstr("\n\n -- System halted"); while(1); /* Halt */ } asmlinkage void __div0(void) { error("Attempting division by 0!"); } void decompress_kernel(unsigned long output_start, unsigned long free_mem_ptr_p, unsigned long free_mem_ptr_end_p, int arch_id) { int ret; output_data = (unsigned char *)output_start; free_mem_ptr = free_mem_ptr_p; free_mem_end_ptr = free_mem_ptr_end_p; __machine_arch_type = arch_id; #ifdef CONFIG_ARCH_EP93XX ep93xx_decomp_setup(); #endif arch_decomp_setup(); putstr("Uncompressing Linux..."); ret = do_decompress(input_data, input_data_end - input_data, output_data, error); if (ret) error("decompressor returned an error"); else putstr(" done, booting the kernel.\n"); } void fortify_panic(const char *name) { error("detected buffer overflow"); }
linux-master
arch/arm/boot/compressed/misc.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/libfdt_env.h> #include <asm/setup.h> #include <libfdt.h> #include "misc.h" #if defined(CONFIG_ARM_ATAG_DTB_COMPAT_CMDLINE_EXTEND) #define do_extend_cmdline 1 #else #define do_extend_cmdline 0 #endif #define NR_BANKS 16 static int node_offset(void *fdt, const char *node_path) { int offset = fdt_path_offset(fdt, node_path); if (offset == -FDT_ERR_NOTFOUND) /* Add the node to root if not found, dropping the leading '/' */ offset = fdt_add_subnode(fdt, 0, node_path + 1); return offset; } static int setprop(void *fdt, const char *node_path, const char *property, void *val_array, int size) { int offset = node_offset(fdt, node_path); if (offset < 0) return offset; return fdt_setprop(fdt, offset, property, val_array, size); } static int setprop_string(void *fdt, const char *node_path, const char *property, const char *string) { int offset = node_offset(fdt, node_path); if (offset < 0) return offset; return fdt_setprop_string(fdt, offset, property, string); } static int setprop_cell(void *fdt, const char *node_path, const char *property, uint32_t val) { int offset = node_offset(fdt, node_path); if (offset < 0) return offset; return fdt_setprop_cell(fdt, offset, property, val); } static const void *getprop(const void *fdt, const char *node_path, const char *property, int *len) { int offset = fdt_path_offset(fdt, node_path); if (offset == -FDT_ERR_NOTFOUND) return NULL; return fdt_getprop(fdt, offset, property, len); } static uint32_t get_cell_size(const void *fdt) { int len; uint32_t cell_size = 1; const __be32 *size_len = getprop(fdt, "/", "#size-cells", &len); if (size_len) cell_size = fdt32_to_cpu(*size_len); return cell_size; } static void merge_fdt_bootargs(void *fdt, const char *fdt_cmdline) { char cmdline[COMMAND_LINE_SIZE]; const char *fdt_bootargs; char *ptr = cmdline; int len = 0; /* copy the fdt command line into the buffer */ fdt_bootargs = getprop(fdt, "/chosen", "bootargs", &len); if (fdt_bootargs) if (len < COMMAND_LINE_SIZE) { memcpy(ptr, fdt_bootargs, len); /* len is the length of the string * including the NULL terminator */ ptr += len - 1; } /* and append the ATAG_CMDLINE */ if (fdt_cmdline) { len = strlen(fdt_cmdline); if (ptr - cmdline + len + 2 < COMMAND_LINE_SIZE) { *ptr++ = ' '; memcpy(ptr, fdt_cmdline, len); ptr += len; } } *ptr = '\0'; setprop_string(fdt, "/chosen", "bootargs", cmdline); } static void hex_str(char *out, uint32_t value) { uint32_t digit; int idx; for (idx = 7; idx >= 0; idx--) { digit = value >> 28; value <<= 4; digit &= 0xf; if (digit < 10) digit += '0'; else digit += 'A'-10; *out++ = digit; } *out = '\0'; } /* * Convert and fold provided ATAGs into the provided FDT. * * Return values: * = 0 -> pretend success * = 1 -> bad ATAG (may retry with another possible ATAG pointer) * < 0 -> error from libfdt */ int atags_to_fdt(void *atag_list, void *fdt, int total_space) { struct tag *atag = atag_list; /* In the case of 64 bits memory size, need to reserve 2 cells for * address and size for each bank */ __be32 mem_reg_property[2 * 2 * NR_BANKS]; int memcount = 0; int ret, memsize; /* make sure we've got an aligned pointer */ if ((u32)atag_list & 0x3) return 1; /* if we get a DTB here we're done already */ if (*(__be32 *)atag_list == cpu_to_fdt32(FDT_MAGIC)) return 0; /* validate the ATAG */ if (atag->hdr.tag != ATAG_CORE || (atag->hdr.size != tag_size(tag_core) && atag->hdr.size != 2)) return 1; /* let's give it all the room it could need */ ret = fdt_open_into(fdt, fdt, total_space); if (ret < 0) return ret; for_each_tag(atag, atag_list) { if (atag->hdr.tag == ATAG_CMDLINE) { /* Append the ATAGS command line to the device tree * command line. * NB: This means that if the same parameter is set in * the device tree and in the tags, the one from the * tags will be chosen. */ if (do_extend_cmdline) merge_fdt_bootargs(fdt, atag->u.cmdline.cmdline); else setprop_string(fdt, "/chosen", "bootargs", atag->u.cmdline.cmdline); } else if (atag->hdr.tag == ATAG_MEM) { if (memcount >= sizeof(mem_reg_property)/4) continue; if (!atag->u.mem.size) continue; memsize = get_cell_size(fdt); if (memsize == 2) { /* if memsize is 2, that means that * each data needs 2 cells of 32 bits, * so the data are 64 bits */ __be64 *mem_reg_prop64 = (__be64 *)mem_reg_property; mem_reg_prop64[memcount++] = cpu_to_fdt64(atag->u.mem.start); mem_reg_prop64[memcount++] = cpu_to_fdt64(atag->u.mem.size); } else { mem_reg_property[memcount++] = cpu_to_fdt32(atag->u.mem.start); mem_reg_property[memcount++] = cpu_to_fdt32(atag->u.mem.size); } } else if (atag->hdr.tag == ATAG_INITRD2) { uint32_t initrd_start, initrd_size; initrd_start = atag->u.initrd.start; initrd_size = atag->u.initrd.size; setprop_cell(fdt, "/chosen", "linux,initrd-start", initrd_start); setprop_cell(fdt, "/chosen", "linux,initrd-end", initrd_start + initrd_size); } else if (atag->hdr.tag == ATAG_SERIAL) { char serno[16+2]; hex_str(serno, atag->u.serialnr.high); hex_str(serno+8, atag->u.serialnr.low); setprop_string(fdt, "/", "serial-number", serno); } } if (memcount) { setprop(fdt, "/memory", "reg", mem_reg_property, 4 * memcount * memsize); } return fdt_pack(fdt); }
linux-master
arch/arm/boot/compressed/atags_to_fdt.c
// SPDX-License-Identifier: GPL-2.0-only #include "../../../../lib/fdt_ro.c"
linux-master
arch/arm/boot/compressed/fdt_ro.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/arch/arm/mach-footbridge/netwinder-hw.c * * Netwinder machine fixup * * Copyright (C) 1998, 1999 Russell King, Phil Blundell */ #include <linux/module.h> #include <linux/ioport.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/io.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/leds.h> #include <asm/hardware/dec21285.h> #include <asm/mach-types.h> #include <asm/setup.h> #include <asm/system_misc.h> #include <asm/mach/arch.h> #include "common.h" #define IRDA_IO_BASE 0x180 #define GP1_IO_BASE 0x338 #define GP2_IO_BASE 0x33a /* * Winbond WB83977F accessibility stuff */ static inline void wb977_open(void) { outb(0x87, 0x370); outb(0x87, 0x370); } static inline void wb977_close(void) { outb(0xaa, 0x370); } static inline void wb977_wb(int reg, int val) { outb(reg, 0x370); outb(val, 0x371); } static inline void wb977_ww(int reg, int val) { outb(reg, 0x370); outb(val >> 8, 0x371); outb(reg + 1, 0x370); outb(val & 255, 0x371); } #define wb977_device_select(dev) wb977_wb(0x07, dev) #define wb977_device_disable() wb977_wb(0x30, 0x00) #define wb977_device_enable() wb977_wb(0x30, 0x01) /* * This is a lock for accessing ports GP1_IO_BASE and GP2_IO_BASE */ DEFINE_RAW_SPINLOCK(nw_gpio_lock); EXPORT_SYMBOL(nw_gpio_lock); static unsigned int current_gpio_op; static unsigned int current_gpio_io; static unsigned int current_cpld; void nw_gpio_modify_op(unsigned int mask, unsigned int set) { unsigned int new_gpio, changed; new_gpio = (current_gpio_op & ~mask) | set; changed = new_gpio ^ current_gpio_op; current_gpio_op = new_gpio; if (changed & 0xff) outb(new_gpio, GP1_IO_BASE); if (changed & 0xff00) outb(new_gpio >> 8, GP2_IO_BASE); } EXPORT_SYMBOL(nw_gpio_modify_op); static inline void __gpio_modify_io(int mask, int in) { unsigned int new_gpio, changed; int port; new_gpio = (current_gpio_io & ~mask) | in; changed = new_gpio ^ current_gpio_io; current_gpio_io = new_gpio; changed >>= 1; new_gpio >>= 1; wb977_device_select(7); for (port = 0xe1; changed && port < 0xe8; changed >>= 1) { wb977_wb(port, new_gpio & 1); port += 1; new_gpio >>= 1; } wb977_device_select(8); for (port = 0xe8; changed && port < 0xec; changed >>= 1) { wb977_wb(port, new_gpio & 1); port += 1; new_gpio >>= 1; } } void nw_gpio_modify_io(unsigned int mask, unsigned int in) { /* Open up the SuperIO chip */ wb977_open(); __gpio_modify_io(mask, in); /* Close up the EFER gate */ wb977_close(); } EXPORT_SYMBOL(nw_gpio_modify_io); unsigned int nw_gpio_read(void) { return inb(GP1_IO_BASE) | inb(GP2_IO_BASE) << 8; } EXPORT_SYMBOL(nw_gpio_read); /* * Initialise the Winbond W83977F global registers */ static inline void wb977_init_global(void) { /* * Enable R/W config registers */ wb977_wb(0x26, 0x40); /* * Power down FDC (not used) */ wb977_wb(0x22, 0xfe); /* * GP12, GP11, CIRRX, IRRXH, GP10 */ wb977_wb(0x2a, 0xc1); /* * GP23, GP22, GP21, GP20, GP13 */ wb977_wb(0x2b, 0x6b); /* * GP17, GP16, GP15, GP14 */ wb977_wb(0x2c, 0x55); } /* * Initialise the Winbond W83977F printer port */ static inline void wb977_init_printer(void) { wb977_device_select(1); /* * mode 1 == EPP */ wb977_wb(0xf0, 0x01); } /* * Initialise the Winbond W83977F keyboard controller */ static inline void wb977_init_keyboard(void) { wb977_device_select(5); /* * Keyboard controller address */ wb977_ww(0x60, 0x0060); wb977_ww(0x62, 0x0064); /* * Keyboard IRQ 1, active high, edge trigger */ wb977_wb(0x70, 1); wb977_wb(0x71, 0x02); /* * Mouse IRQ 5, active high, edge trigger */ wb977_wb(0x72, 5); wb977_wb(0x73, 0x02); /* * KBC 8MHz */ wb977_wb(0xf0, 0x40); /* * Enable device */ wb977_device_enable(); } /* * Initialise the Winbond W83977F Infra-Red device */ static inline void wb977_init_irda(void) { wb977_device_select(6); /* * IR base address */ wb977_ww(0x60, IRDA_IO_BASE); /* * IRDA IRQ 6, active high, edge trigger */ wb977_wb(0x70, 6); wb977_wb(0x71, 0x02); /* * RX DMA - ISA DMA 0 */ wb977_wb(0x74, 0x00); /* * TX DMA - Disable Tx DMA */ wb977_wb(0x75, 0x04); /* * Append CRC, Enable bank selection */ wb977_wb(0xf0, 0x03); /* * Enable device */ wb977_device_enable(); } /* * Initialise Winbond W83977F general purpose IO */ static inline void wb977_init_gpio(void) { unsigned long flags; /* * Set up initial I/O definitions */ current_gpio_io = -1; __gpio_modify_io(-1, GPIO_DONE | GPIO_WDTIMER); wb977_device_select(7); /* * Group1 base address */ wb977_ww(0x60, GP1_IO_BASE); wb977_ww(0x62, 0); wb977_ww(0x64, 0); /* * GP10 (Orage button) IRQ 10, active high, edge trigger */ wb977_wb(0x70, 10); wb977_wb(0x71, 0x02); /* * GP10: Debounce filter enabled, IRQ, input */ wb977_wb(0xe0, 0x19); /* * Enable Group1 */ wb977_device_enable(); wb977_device_select(8); /* * Group2 base address */ wb977_ww(0x60, GP2_IO_BASE); /* * Clear watchdog timer regs * - timer disable */ wb977_wb(0xf2, 0x00); /* * - disable LED, no mouse nor keyboard IRQ */ wb977_wb(0xf3, 0x00); /* * - timer counting, disable power LED, disable timeouot */ wb977_wb(0xf4, 0x00); /* * Enable group2 */ wb977_device_enable(); /* * Set Group1/Group2 outputs */ raw_spin_lock_irqsave(&nw_gpio_lock, flags); nw_gpio_modify_op(-1, GPIO_RED_LED | GPIO_FAN); raw_spin_unlock_irqrestore(&nw_gpio_lock, flags); } /* * Initialise the Winbond W83977F chip. */ static void __init wb977_init(void) { request_region(0x370, 2, "W83977AF configuration"); /* * Open up the SuperIO chip */ wb977_open(); /* * Initialise the global registers */ wb977_init_global(); /* * Initialise the various devices in * the multi-IO chip. */ wb977_init_printer(); wb977_init_keyboard(); wb977_init_irda(); wb977_init_gpio(); /* * Close up the EFER gate */ wb977_close(); } void nw_cpld_modify(unsigned int mask, unsigned int set) { int msk; current_cpld = (current_cpld & ~mask) | set; nw_gpio_modify_io(GPIO_DATA | GPIO_IOCLK | GPIO_IOLOAD, 0); nw_gpio_modify_op(GPIO_IOLOAD, 0); for (msk = 8; msk; msk >>= 1) { int bit = current_cpld & msk; nw_gpio_modify_op(GPIO_DATA | GPIO_IOCLK, bit ? GPIO_DATA : 0); nw_gpio_modify_op(GPIO_IOCLK, GPIO_IOCLK); } nw_gpio_modify_op(GPIO_IOCLK|GPIO_DATA, 0); nw_gpio_modify_op(GPIO_IOLOAD|GPIO_DSCLK, GPIO_IOLOAD|GPIO_DSCLK); nw_gpio_modify_op(GPIO_IOLOAD, 0); } EXPORT_SYMBOL(nw_cpld_modify); static void __init cpld_init(void) { unsigned long flags; raw_spin_lock_irqsave(&nw_gpio_lock, flags); nw_cpld_modify(-1, CPLD_UNMUTE | CPLD_7111_DISABLE); raw_spin_unlock_irqrestore(&nw_gpio_lock, flags); } static unsigned char rwa_unlock[] __initdata = { 0x00, 0x00, 0x6a, 0xb5, 0xda, 0xed, 0xf6, 0xfb, 0x7d, 0xbe, 0xdf, 0x6f, 0x37, 0x1b, 0x0d, 0x86, 0xc3, 0x61, 0xb0, 0x58, 0x2c, 0x16, 0x8b, 0x45, 0xa2, 0xd1, 0xe8, 0x74, 0x3a, 0x9d, 0xce, 0xe7, 0x73, 0x39 }; #ifndef DEBUG #define dprintk(x...) #else #define dprintk(x...) printk(x) #endif #define WRITE_RWA(r,v) do { outb((r), 0x279); udelay(10); outb((v), 0xa79); } while (0) static inline void rwa010_unlock(void) { int i; WRITE_RWA(2, 2); mdelay(10); for (i = 0; i < sizeof(rwa_unlock); i++) { outb(rwa_unlock[i], 0x279); udelay(10); } } static inline void rwa010_read_ident(void) { unsigned char si[9]; int i, j; WRITE_RWA(3, 0); WRITE_RWA(0, 128); outb(1, 0x279); mdelay(1); dprintk("Identifier: "); for (i = 0; i < 9; i++) { si[i] = 0; for (j = 0; j < 8; j++) { int bit; udelay(250); inb(0x203); udelay(250); bit = inb(0x203); dprintk("%02X ", bit); bit = (bit == 0xaa) ? 1 : 0; si[i] |= bit << j; } dprintk("(%02X) ", si[i]); } dprintk("\n"); } static inline void rwa010_global_init(void) { WRITE_RWA(6, 2); // Assign a card no = 2 dprintk("Card no = %d\n", inb(0x203)); /* disable the modem section of the chip */ WRITE_RWA(7, 3); WRITE_RWA(0x30, 0); /* disable the cdrom section of the chip */ WRITE_RWA(7, 4); WRITE_RWA(0x30, 0); /* disable the MPU-401 section of the chip */ WRITE_RWA(7, 2); WRITE_RWA(0x30, 0); } static inline void rwa010_game_port_init(void) { int i; WRITE_RWA(7, 5); dprintk("Slider base: "); WRITE_RWA(0x61, 1); i = inb(0x203); WRITE_RWA(0x60, 2); dprintk("%02X%02X (201)\n", inb(0x203), i); WRITE_RWA(0x30, 1); } static inline void rwa010_waveartist_init(int base, int irq, int dma) { int i; WRITE_RWA(7, 0); dprintk("WaveArtist base: "); WRITE_RWA(0x61, base & 255); i = inb(0x203); WRITE_RWA(0x60, base >> 8); dprintk("%02X%02X (%X),", inb(0x203), i, base); WRITE_RWA(0x70, irq); dprintk(" irq: %d (%d),", inb(0x203), irq); WRITE_RWA(0x74, dma); dprintk(" dma: %d (%d)\n", inb(0x203), dma); WRITE_RWA(0x30, 1); } static inline void rwa010_soundblaster_init(int sb_base, int al_base, int irq, int dma) { int i; WRITE_RWA(7, 1); dprintk("SoundBlaster base: "); WRITE_RWA(0x61, sb_base & 255); i = inb(0x203); WRITE_RWA(0x60, sb_base >> 8); dprintk("%02X%02X (%X),", inb(0x203), i, sb_base); dprintk(" irq: "); WRITE_RWA(0x70, irq); dprintk("%d (%d),", inb(0x203), irq); dprintk(" 8-bit DMA: "); WRITE_RWA(0x74, dma); dprintk("%d (%d)\n", inb(0x203), dma); dprintk("AdLib base: "); WRITE_RWA(0x63, al_base & 255); i = inb(0x203); WRITE_RWA(0x62, al_base >> 8); dprintk("%02X%02X (%X)\n", inb(0x203), i, al_base); WRITE_RWA(0x30, 1); } static void rwa010_soundblaster_reset(void) { int i; outb(1, 0x226); udelay(3); outb(0, 0x226); for (i = 0; i < 5; i++) { if (inb(0x22e) & 0x80) break; mdelay(1); } if (i == 5) printk("SoundBlaster: DSP reset failed\n"); dprintk("SoundBlaster DSP reset: %02X (AA)\n", inb(0x22a)); for (i = 0; i < 5; i++) { if ((inb(0x22c) & 0x80) == 0) break; mdelay(1); } if (i == 5) printk("SoundBlaster: DSP not ready\n"); else { outb(0xe1, 0x22c); dprintk("SoundBlaster DSP id: "); i = inb(0x22a); udelay(1); i |= inb(0x22a) << 8; dprintk("%04X\n", i); for (i = 0; i < 5; i++) { if ((inb(0x22c) & 0x80) == 0) break; mdelay(1); } if (i == 5) printk("SoundBlaster: could not turn speaker off\n"); outb(0xd3, 0x22c); } /* turn on OPL3 */ outb(5, 0x38a); outb(1, 0x38b); } static void __init rwa010_init(void) { rwa010_unlock(); rwa010_read_ident(); rwa010_global_init(); rwa010_game_port_init(); rwa010_waveartist_init(0x250, 3, 7); rwa010_soundblaster_init(0x220, 0x388, 3, 1); rwa010_soundblaster_reset(); } /* * Initialise any other hardware after we've got the PCI bus * initialised. We may need the PCI bus to talk to this other * hardware. */ static int __init nw_hw_init(void) { if (machine_is_netwinder()) { wb977_init(); cpld_init(); rwa010_init(); } return 0; } __initcall(nw_hw_init); /* * Older NeTTroms either do not provide a parameters * page, or they don't supply correct information in * the parameter page. */ static void __init fixup_netwinder(struct tag *tags, char **cmdline) { #ifdef CONFIG_ISAPNP extern int isapnp_disable; /* * We must not use the kernels ISAPnP code * on the NetWinder - it will reset the settings * for the WaveArtist chip and render it inoperable. */ isapnp_disable = 1; #endif } static void netwinder_restart(enum reboot_mode mode, const char *cmd) { if (mode == REBOOT_SOFT) { /* Jump into the ROM */ soft_restart(0x41000000); } else { local_irq_disable(); local_fiq_disable(); /* open up the SuperIO chip */ outb(0x87, 0x370); outb(0x87, 0x370); /* aux function group 1 (logical device 7) */ outb(0x07, 0x370); outb(0x07, 0x371); /* set GP16 for WD-TIMER output */ outb(0xe6, 0x370); outb(0x00, 0x371); /* set a RED LED and toggle WD_TIMER for rebooting */ outb(0xc4, 0x338); } } /* LEDs */ #if defined(CONFIG_NEW_LEDS) && defined(CONFIG_LEDS_CLASS) struct netwinder_led { struct led_classdev cdev; u8 mask; }; /* * The triggers lines up below will only be used if the * LED triggers are compiled in. */ static const struct { const char *name; const char *trigger; } netwinder_leds[] = { { "netwinder:green", "heartbeat", }, { "netwinder:red", "cpu0", }, }; /* * The LED control in Netwinder is reversed: * - setting bit means turn off LED * - clearing bit means turn on LED */ static void netwinder_led_set(struct led_classdev *cdev, enum led_brightness b) { struct netwinder_led *led = container_of(cdev, struct netwinder_led, cdev); unsigned long flags; u32 reg; raw_spin_lock_irqsave(&nw_gpio_lock, flags); reg = nw_gpio_read(); if (b != LED_OFF) reg &= ~led->mask; else reg |= led->mask; nw_gpio_modify_op(led->mask, reg); raw_spin_unlock_irqrestore(&nw_gpio_lock, flags); } static enum led_brightness netwinder_led_get(struct led_classdev *cdev) { struct netwinder_led *led = container_of(cdev, struct netwinder_led, cdev); unsigned long flags; u32 reg; raw_spin_lock_irqsave(&nw_gpio_lock, flags); reg = nw_gpio_read(); raw_spin_unlock_irqrestore(&nw_gpio_lock, flags); return (reg & led->mask) ? LED_OFF : LED_FULL; } static int __init netwinder_leds_init(void) { int i; if (!machine_is_netwinder()) return -ENODEV; for (i = 0; i < ARRAY_SIZE(netwinder_leds); i++) { struct netwinder_led *led; led = kzalloc(sizeof(*led), GFP_KERNEL); if (!led) break; led->cdev.name = netwinder_leds[i].name; led->cdev.brightness_set = netwinder_led_set; led->cdev.brightness_get = netwinder_led_get; led->cdev.default_trigger = netwinder_leds[i].trigger; if (i == 0) led->mask = GPIO_GREEN_LED; else led->mask = GPIO_RED_LED; if (led_classdev_register(NULL, &led->cdev) < 0) { kfree(led); break; } } return 0; } /* * Since we may have triggers on any subsystem, defer registration * until after subsystem_init. */ fs_initcall(netwinder_leds_init); #endif MACHINE_START(NETWINDER, "Rebel-NetWinder") /* Maintainer: Russell King/Rebel.com */ .atag_offset = 0x100, .video_start = 0x000a0000, .video_end = 0x000bffff, .reserve_lp0 = 1, .reserve_lp2 = 1, .fixup = fixup_netwinder, .map_io = footbridge_map_io, .init_irq = footbridge_init_irq, .init_time = isa_timer_init, .restart = netwinder_restart, MACHINE_END
linux-master
arch/arm/mach-footbridge/netwinder-hw.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/arch/arm/mach-footbridge/common.c * * Copyright (C) 1998-2000 Russell King, Dave Gilbert. */ #include <linux/module.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/ioport.h> #include <linux/list.h> #include <linux/init.h> #include <linux/io.h> #include <linux/spinlock.h> #include <linux/dma-direct.h> #include <video/vga.h> #include <asm/page.h> #include <asm/irq.h> #include <asm/mach-types.h> #include <asm/setup.h> #include <asm/system_misc.h> #include <asm/hardware/dec21285.h> #include <asm/mach/irq.h> #include <asm/mach/map.h> #include <asm/mach/pci.h> #include "common.h" #include <mach/hardware.h> #include <mach/irqs.h> #include <asm/hardware/dec21285.h> static int dc21285_get_irq(void) { void __iomem *irqstatus = (void __iomem *)CSR_IRQ_STATUS; u32 mask = readl(irqstatus); if (mask & IRQ_MASK_SDRAMPARITY) return IRQ_SDRAMPARITY; if (mask & IRQ_MASK_UART_RX) return IRQ_CONRX; if (mask & IRQ_MASK_DMA1) return IRQ_DMA1; if (mask & IRQ_MASK_DMA2) return IRQ_DMA2; if (mask & IRQ_MASK_IN0) return IRQ_IN0; if (mask & IRQ_MASK_IN1) return IRQ_IN1; if (mask & IRQ_MASK_IN2) return IRQ_IN2; if (mask & IRQ_MASK_IN3) return IRQ_IN3; if (mask & IRQ_MASK_PCI) return IRQ_PCI; if (mask & IRQ_MASK_DOORBELLHOST) return IRQ_DOORBELLHOST; if (mask & IRQ_MASK_I2OINPOST) return IRQ_I2OINPOST; if (mask & IRQ_MASK_TIMER1) return IRQ_TIMER1; if (mask & IRQ_MASK_TIMER2) return IRQ_TIMER2; if (mask & IRQ_MASK_TIMER3) return IRQ_TIMER3; if (mask & IRQ_MASK_UART_TX) return IRQ_CONTX; if (mask & IRQ_MASK_PCI_ABORT) return IRQ_PCI_ABORT; if (mask & IRQ_MASK_PCI_SERR) return IRQ_PCI_SERR; if (mask & IRQ_MASK_DISCARD_TIMER) return IRQ_DISCARD_TIMER; if (mask & IRQ_MASK_PCI_DPERR) return IRQ_PCI_DPERR; if (mask & IRQ_MASK_PCI_PERR) return IRQ_PCI_PERR; return 0; } static void dc21285_handle_irq(struct pt_regs *regs) { int irq; do { irq = dc21285_get_irq(); if (!irq) break; generic_handle_irq(irq); } while (1); } unsigned int mem_fclk_21285 = 50000000; EXPORT_SYMBOL(mem_fclk_21285); static int __init early_fclk(char *arg) { mem_fclk_21285 = simple_strtoul(arg, NULL, 0); return 0; } early_param("mem_fclk_21285", early_fclk); static int __init parse_tag_memclk(const struct tag *tag) { mem_fclk_21285 = tag->u.memclk.fmemclk; return 0; } __tagtable(ATAG_MEMCLK, parse_tag_memclk); /* * Footbridge IRQ translation table * Converts from our IRQ numbers into FootBridge masks */ static const int fb_irq_mask[] = { IRQ_MASK_UART_RX, /* 0 */ IRQ_MASK_UART_TX, /* 1 */ IRQ_MASK_TIMER1, /* 2 */ IRQ_MASK_TIMER2, /* 3 */ IRQ_MASK_TIMER3, /* 4 */ IRQ_MASK_IN0, /* 5 */ IRQ_MASK_IN1, /* 6 */ IRQ_MASK_IN2, /* 7 */ IRQ_MASK_IN3, /* 8 */ IRQ_MASK_DOORBELLHOST, /* 9 */ IRQ_MASK_DMA1, /* 10 */ IRQ_MASK_DMA2, /* 11 */ IRQ_MASK_PCI, /* 12 */ IRQ_MASK_SDRAMPARITY, /* 13 */ IRQ_MASK_I2OINPOST, /* 14 */ IRQ_MASK_PCI_ABORT, /* 15 */ IRQ_MASK_PCI_SERR, /* 16 */ IRQ_MASK_DISCARD_TIMER, /* 17 */ IRQ_MASK_PCI_DPERR, /* 18 */ IRQ_MASK_PCI_PERR, /* 19 */ }; static void fb_mask_irq(struct irq_data *d) { *CSR_IRQ_DISABLE = fb_irq_mask[_DC21285_INR(d->irq)]; } static void fb_unmask_irq(struct irq_data *d) { *CSR_IRQ_ENABLE = fb_irq_mask[_DC21285_INR(d->irq)]; } static struct irq_chip fb_chip = { .irq_ack = fb_mask_irq, .irq_mask = fb_mask_irq, .irq_unmask = fb_unmask_irq, }; static void __init __fb_init_irq(void) { unsigned int irq; /* * setup DC21285 IRQs */ *CSR_IRQ_DISABLE = -1; *CSR_FIQ_DISABLE = -1; for (irq = _DC21285_IRQ(0); irq < _DC21285_IRQ(20); irq++) { irq_set_chip_and_handler(irq, &fb_chip, handle_level_irq); irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE); } } void __init footbridge_init_irq(void) { set_handle_irq(dc21285_handle_irq); __fb_init_irq(); if (machine_is_ebsa285()) /* The following is dependent on which slot * you plug the Southbridge card into. We * currently assume that you plug it into * the right-hand most slot. */ isa_init_irq(IRQ_PCI); if (machine_is_netwinder()) isa_init_irq(IRQ_IN3); } /* * Common mapping for all systems. Note that the outbound write flush is * commented out since there is a "No Fix" problem with it. Not mapping * it means that we have extra bullet protection on our feet. */ static struct map_desc ebsa285_host_io_desc[] __initdata = { { .virtual = ARMCSR_BASE, .pfn = __phys_to_pfn(DC21285_ARMCSR_BASE), .length = ARMCSR_SIZE, .type = MT_DEVICE, }, { .virtual = PCIMEM_BASE, .pfn = __phys_to_pfn(DC21285_PCI_MEM), .length = PCIMEM_SIZE, .type = MT_DEVICE, }, { .virtual = PCICFG0_BASE, .pfn = __phys_to_pfn(DC21285_PCI_TYPE_0_CONFIG), .length = PCICFG0_SIZE, .type = MT_DEVICE, }, { .virtual = PCICFG1_BASE, .pfn = __phys_to_pfn(DC21285_PCI_TYPE_1_CONFIG), .length = PCICFG1_SIZE, .type = MT_DEVICE, }, { .virtual = PCIIACK_BASE, .pfn = __phys_to_pfn(DC21285_PCI_IACK), .length = PCIIACK_SIZE, .type = MT_DEVICE, }, }; void __init footbridge_map_io(void) { iotable_init(ebsa285_host_io_desc, ARRAY_SIZE(ebsa285_host_io_desc)); pci_map_io_early(__phys_to_pfn(DC21285_PCI_IO)); vga_base = PCIMEM_BASE; } void footbridge_restart(enum reboot_mode mode, const char *cmd) { if (mode == REBOOT_SOFT) { /* Jump into the ROM */ soft_restart(0x41000000); } else { /* * Force the watchdog to do a CPU reset. * * After making sure that the watchdog is disabled * (so we can change the timer registers) we first * enable the timer to autoreload itself. Next, the * timer interval is set really short and any * current interrupt request is cleared (so we can * see an edge transition). Finally, TIMER4 is * enabled as the watchdog. */ *CSR_SA110_CNTL &= ~(1 << 13); *CSR_TIMER4_CNTL = TIMER_CNTL_ENABLE | TIMER_CNTL_AUTORELOAD | TIMER_CNTL_DIV16; *CSR_TIMER4_LOAD = 0x2; *CSR_TIMER4_CLR = 0; *CSR_SA110_CNTL |= (1 << 13); } }
linux-master
arch/arm/mach-footbridge/common.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/arch/arm/mach-footbridge/netwinder-pci.c * * PCI bios-type initialisation for PCI machines * * Bits taken from various places. */ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/init.h> #include <asm/irq.h> #include <asm/mach/pci.h> #include <asm/mach-types.h> /* * We now use the slot ID instead of the device identifiers to select * which interrupt is routed where. */ static int netwinder_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { switch (slot) { case 0: /* host bridge */ return 0; case 9: /* CyberPro */ return IRQ_NETWINDER_VGA; case 10: /* DC21143 */ return IRQ_NETWINDER_ETHER100; case 12: /* Winbond 553 */ return IRQ_ISA_HARDDISK1; case 13: /* Winbond 89C940F */ return IRQ_NETWINDER_ETHER10; default: printk(KERN_ERR "PCI: unknown device in slot %s\n", pci_name(dev)); return 0; } } static struct hw_pci netwinder_pci __initdata = { .map_irq = netwinder_map_irq, .nr_controllers = 1, .ops = &dc21285_ops, .setup = dc21285_setup, .preinit = dc21285_preinit, .postinit = dc21285_postinit, }; static int __init netwinder_pci_init(void) { if (machine_is_netwinder()) pci_common_init(&netwinder_pci); return 0; } subsys_initcall(netwinder_pci_init);
linux-master
arch/arm/mach-footbridge/netwinder-pci.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/arch/arm/kernel/dec21285.c: PCI functions for DC21285 * * Copyright (C) 1998-2001 Russell King * Copyright (C) 1998-2000 Phil Blundell */ #include <linux/dma-map-ops.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/interrupt.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/irq.h> #include <linux/io.h> #include <linux/spinlock.h> #include <asm/irq.h> #include <asm/mach/pci.h> #include <asm/hardware/dec21285.h> #define MAX_SLOTS 21 #define PCICMD_ABORT ((PCI_STATUS_REC_MASTER_ABORT| \ PCI_STATUS_REC_TARGET_ABORT)<<16) #define PCICMD_ERROR_BITS ((PCI_STATUS_DETECTED_PARITY | \ PCI_STATUS_REC_MASTER_ABORT | \ PCI_STATUS_REC_TARGET_ABORT | \ PCI_STATUS_PARITY) << 16) extern int setup_arm_irq(int, struct irqaction *); static unsigned long dc21285_base_address(struct pci_bus *bus, unsigned int devfn) { unsigned long addr = 0; if (bus->number == 0) { if (PCI_SLOT(devfn) == 0) /* * For devfn 0, point at the 21285 */ addr = ARMCSR_BASE; else { devfn -= 1 << 3; if (devfn < PCI_DEVFN(MAX_SLOTS, 0)) addr = PCICFG0_BASE | 0xc00000 | (devfn << 8); } } else addr = PCICFG1_BASE | (bus->number << 16) | (devfn << 8); return addr; } static int dc21285_read_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *value) { unsigned long addr = dc21285_base_address(bus, devfn); u32 v = 0xffffffff; if (addr) switch (size) { case 1: asm volatile("ldrb %0, [%1, %2]" : "=r" (v) : "r" (addr), "r" (where) : "cc"); break; case 2: asm volatile("ldrh %0, [%1, %2]" : "=r" (v) : "r" (addr), "r" (where) : "cc"); break; case 4: asm volatile("ldr %0, [%1, %2]" : "=r" (v) : "r" (addr), "r" (where) : "cc"); break; } *value = v; v = *CSR_PCICMD; if (v & PCICMD_ABORT) { *CSR_PCICMD = v & (0xffff|PCICMD_ABORT); return -1; } return PCIBIOS_SUCCESSFUL; } static int dc21285_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value) { unsigned long addr = dc21285_base_address(bus, devfn); u32 v; if (addr) switch (size) { case 1: asm volatile("strb %0, [%1, %2]" : : "r" (value), "r" (addr), "r" (where) : "cc"); break; case 2: asm volatile("strh %0, [%1, %2]" : : "r" (value), "r" (addr), "r" (where) : "cc"); break; case 4: asm volatile("str %0, [%1, %2]" : : "r" (value), "r" (addr), "r" (where) : "cc"); break; } v = *CSR_PCICMD; if (v & PCICMD_ABORT) { *CSR_PCICMD = v & (0xffff|PCICMD_ABORT); return -1; } return PCIBIOS_SUCCESSFUL; } struct pci_ops dc21285_ops = { .read = dc21285_read_config, .write = dc21285_write_config, }; static struct timer_list serr_timer; static struct timer_list perr_timer; static void dc21285_enable_error(struct timer_list *timer) { del_timer(timer); if (timer == &serr_timer) enable_irq(IRQ_PCI_SERR); else if (timer == &perr_timer) enable_irq(IRQ_PCI_PERR); } /* * Warn on PCI errors. */ static irqreturn_t dc21285_abort_irq(int irq, void *dev_id) { unsigned int cmd; unsigned int status; cmd = *CSR_PCICMD; status = cmd >> 16; cmd = cmd & 0xffff; if (status & PCI_STATUS_REC_MASTER_ABORT) { printk(KERN_DEBUG "PCI: master abort, pc=0x%08lx\n", instruction_pointer(get_irq_regs())); cmd |= PCI_STATUS_REC_MASTER_ABORT << 16; } if (status & PCI_STATUS_REC_TARGET_ABORT) { printk(KERN_DEBUG "PCI: target abort: "); pcibios_report_status(PCI_STATUS_REC_MASTER_ABORT | PCI_STATUS_SIG_TARGET_ABORT | PCI_STATUS_REC_TARGET_ABORT, 1); printk("\n"); cmd |= PCI_STATUS_REC_TARGET_ABORT << 16; } *CSR_PCICMD = cmd; return IRQ_HANDLED; } static irqreturn_t dc21285_serr_irq(int irq, void *dev_id) { struct timer_list *timer = dev_id; unsigned int cntl; printk(KERN_DEBUG "PCI: system error received: "); pcibios_report_status(PCI_STATUS_SIG_SYSTEM_ERROR, 1); printk("\n"); cntl = *CSR_SA110_CNTL & 0xffffdf07; *CSR_SA110_CNTL = cntl | SA110_CNTL_RXSERR; /* * back off this interrupt */ disable_irq(irq); timer->expires = jiffies + HZ; add_timer(timer); return IRQ_HANDLED; } static irqreturn_t dc21285_discard_irq(int irq, void *dev_id) { printk(KERN_DEBUG "PCI: discard timer expired\n"); *CSR_SA110_CNTL &= 0xffffde07; return IRQ_HANDLED; } static irqreturn_t dc21285_dparity_irq(int irq, void *dev_id) { unsigned int cmd; printk(KERN_DEBUG "PCI: data parity error detected: "); pcibios_report_status(PCI_STATUS_PARITY | PCI_STATUS_DETECTED_PARITY, 1); printk("\n"); cmd = *CSR_PCICMD & 0xffff; *CSR_PCICMD = cmd | 1 << 24; return IRQ_HANDLED; } static irqreturn_t dc21285_parity_irq(int irq, void *dev_id) { struct timer_list *timer = dev_id; unsigned int cmd; printk(KERN_DEBUG "PCI: parity error detected: "); pcibios_report_status(PCI_STATUS_PARITY | PCI_STATUS_DETECTED_PARITY, 1); printk("\n"); cmd = *CSR_PCICMD & 0xffff; *CSR_PCICMD = cmd | 1 << 31; /* * back off this interrupt */ disable_irq(irq); timer->expires = jiffies + HZ; add_timer(timer); return IRQ_HANDLED; } static int dc21285_pci_bus_notifier(struct notifier_block *nb, unsigned long action, void *data) { if (action != BUS_NOTIFY_ADD_DEVICE) return NOTIFY_DONE; dma_direct_set_offset(data, PHYS_OFFSET, BUS_OFFSET, SZ_256M); return NOTIFY_OK; } static struct notifier_block dc21285_pci_bus_nb = { .notifier_call = dc21285_pci_bus_notifier, }; int __init dc21285_setup(int nr, struct pci_sys_data *sys) { struct resource *res; res = kcalloc(2, sizeof(struct resource), GFP_KERNEL); if (!res) { printk("out of memory for root bus resources"); return 0; } res[0].flags = IORESOURCE_MEM; res[0].name = "Footbridge non-prefetch"; res[1].flags = IORESOURCE_MEM | IORESOURCE_PREFETCH; res[1].name = "Footbridge prefetch"; allocate_resource(&iomem_resource, &res[1], 0x20000000, 0xa0000000, 0xffffffff, 0x20000000, NULL, NULL); allocate_resource(&iomem_resource, &res[0], 0x40000000, 0x80000000, 0xffffffff, 0x40000000, NULL, NULL); sys->mem_offset = DC21285_PCI_MEM; pci_add_resource_offset(&sys->resources, &res[0], sys->mem_offset); pci_add_resource_offset(&sys->resources, &res[1], sys->mem_offset); bus_register_notifier(&pci_bus_type, &dc21285_pci_bus_nb); return 1; } #define dc21285_request_irq(_a, _b, _c, _d, _e) \ WARN_ON(request_irq(_a, _b, _c, _d, _e) < 0) void __init dc21285_preinit(void) { unsigned int mem_size, mem_mask; pcibios_min_mem = 0x81000000; mem_size = (unsigned int)high_memory - PAGE_OFFSET; for (mem_mask = 0x00100000; mem_mask < 0x10000000; mem_mask <<= 1) if (mem_mask >= mem_size) break; /* * These registers need to be set up whether we're the * central function or not. */ *CSR_SDRAMBASEMASK = (mem_mask - 1) & 0x0ffc0000; *CSR_SDRAMBASEOFFSET = 0; *CSR_ROMBASEMASK = 0x80000000; *CSR_CSRBASEMASK = 0; *CSR_CSRBASEOFFSET = 0; *CSR_PCIADDR_EXTN = 0; printk(KERN_INFO "PCI: DC21285 footbridge, revision %02lX, in " "central function mode\n", *CSR_CLASSREV & 0xff); /* * Clear any existing errors - we aren't * interested in historical data... */ *CSR_SA110_CNTL = (*CSR_SA110_CNTL & 0xffffde07) | SA110_CNTL_RXSERR; *CSR_PCICMD = (*CSR_PCICMD & 0xffff) | PCICMD_ERROR_BITS; timer_setup(&serr_timer, dc21285_enable_error, 0); timer_setup(&perr_timer, dc21285_enable_error, 0); /* * We don't care if these fail. */ dc21285_request_irq(IRQ_PCI_SERR, dc21285_serr_irq, 0, "PCI system error", &serr_timer); dc21285_request_irq(IRQ_PCI_PERR, dc21285_parity_irq, 0, "PCI parity error", &perr_timer); dc21285_request_irq(IRQ_PCI_ABORT, dc21285_abort_irq, 0, "PCI abort", NULL); dc21285_request_irq(IRQ_DISCARD_TIMER, dc21285_discard_irq, 0, "Discard timer", NULL); dc21285_request_irq(IRQ_PCI_DPERR, dc21285_dparity_irq, 0, "PCI data parity", NULL); /* * Map our SDRAM at a known address in PCI space, just in case * the firmware had other ideas. Using a nonzero base is * necessary, since some VGA cards forcefully use PCI addresses * in the range 0x000a0000 to 0x000c0000. (eg, S3 cards). */ *CSR_PCICSRBASE = 0xf4000000; *CSR_PCICSRIOBASE = 0; *CSR_PCISDRAMBASE = BUS_OFFSET; *CSR_PCIROMBASE = 0; *CSR_PCICMD = PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE | PCICMD_ERROR_BITS; } void __init dc21285_postinit(void) { register_isa_ports(DC21285_PCI_MEM, DC21285_PCI_IO, 0); }
linux-master
arch/arm/mach-footbridge/dc21285.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/arch/arm/mach-footbridge/isa-timer.c * * Copyright (C) 1998 Russell King. * Copyright (C) 1998 Phil Blundell */ #include <linux/clockchips.h> #include <linux/i8253.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/spinlock.h> #include <linux/timex.h> #include <asm/irq.h> #include <asm/mach/time.h> #include "common.h" static irqreturn_t pit_timer_interrupt(int irq, void *dev_id) { struct clock_event_device *ce = dev_id; ce->event_handler(ce); return IRQ_HANDLED; } void __init isa_timer_init(void) { clocksource_i8253_init(); if (request_irq(i8253_clockevent.irq, pit_timer_interrupt, IRQF_TIMER | IRQF_IRQPOLL, "pit", &i8253_clockevent)) pr_err("Failed to request irq %d(pit)\n", i8253_clockevent.irq); clockevent_i8253_init(false); }
linux-master
arch/arm/mach-footbridge/isa-timer.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/arch/arm/mach-footbridge/dc21285-timer.c * * Copyright (C) 1998 Russell King. * Copyright (C) 1998 Phil Blundell */ #include <linux/clockchips.h> #include <linux/clocksource.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/sched_clock.h> #include <asm/irq.h> #include <asm/hardware/dec21285.h> #include <asm/mach/time.h> #include <asm/system_info.h> #include "common.h" static u64 cksrc_dc21285_read(struct clocksource *cs) { return cs->mask - *CSR_TIMER2_VALUE; } static int cksrc_dc21285_enable(struct clocksource *cs) { *CSR_TIMER2_LOAD = cs->mask; *CSR_TIMER2_CLR = 0; *CSR_TIMER2_CNTL = TIMER_CNTL_ENABLE | TIMER_CNTL_DIV16; return 0; } static void cksrc_dc21285_disable(struct clocksource *cs) { *CSR_TIMER2_CNTL = 0; } static struct clocksource cksrc_dc21285 = { .name = "dc21285_timer2", .rating = 200, .read = cksrc_dc21285_read, .enable = cksrc_dc21285_enable, .disable = cksrc_dc21285_disable, .mask = CLOCKSOURCE_MASK(24), .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; static int ckevt_dc21285_set_next_event(unsigned long delta, struct clock_event_device *c) { *CSR_TIMER1_CLR = 0; *CSR_TIMER1_LOAD = delta; *CSR_TIMER1_CNTL = TIMER_CNTL_ENABLE | TIMER_CNTL_DIV16; return 0; } static int ckevt_dc21285_shutdown(struct clock_event_device *c) { *CSR_TIMER1_CNTL = 0; return 0; } static int ckevt_dc21285_set_periodic(struct clock_event_device *c) { *CSR_TIMER1_CLR = 0; *CSR_TIMER1_LOAD = (mem_fclk_21285 + 8 * HZ) / (16 * HZ); *CSR_TIMER1_CNTL = TIMER_CNTL_ENABLE | TIMER_CNTL_AUTORELOAD | TIMER_CNTL_DIV16; return 0; } static struct clock_event_device ckevt_dc21285 = { .name = "dc21285_timer1", .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, .rating = 200, .irq = IRQ_TIMER1, .set_next_event = ckevt_dc21285_set_next_event, .set_state_shutdown = ckevt_dc21285_shutdown, .set_state_periodic = ckevt_dc21285_set_periodic, .set_state_oneshot = ckevt_dc21285_shutdown, .tick_resume = ckevt_dc21285_set_periodic, }; static irqreturn_t timer1_interrupt(int irq, void *dev_id) { struct clock_event_device *ce = dev_id; *CSR_TIMER1_CLR = 0; /* Stop the timer if in one-shot mode */ if (clockevent_state_oneshot(ce)) *CSR_TIMER1_CNTL = 0; ce->event_handler(ce); return IRQ_HANDLED; } /* * Set up timer interrupt. */ void __init footbridge_timer_init(void) { struct clock_event_device *ce = &ckevt_dc21285; unsigned rate = DIV_ROUND_CLOSEST(mem_fclk_21285, 16); clocksource_register_hz(&cksrc_dc21285, rate); if (request_irq(ce->irq, timer1_interrupt, IRQF_TIMER | IRQF_IRQPOLL, "dc21285_timer1", &ckevt_dc21285)) pr_err("Failed to request irq %d (dc21285_timer1)", ce->irq); ce->cpumask = cpumask_of(smp_processor_id()); clockevents_config_and_register(ce, rate, 0x4, 0xffffff); } static u64 notrace footbridge_read_sched_clock(void) { return ~*CSR_TIMER3_VALUE; } void __init footbridge_sched_clock(void) { unsigned rate = DIV_ROUND_CLOSEST(mem_fclk_21285, 16); *CSR_TIMER3_LOAD = 0; *CSR_TIMER3_CLR = 0; *CSR_TIMER3_CNTL = TIMER_CNTL_ENABLE | TIMER_CNTL_DIV16; sched_clock_register(footbridge_read_sched_clock, 24, rate); }
linux-master
arch/arm/mach-footbridge/dc21285-timer.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/arch/arm/mach-footbridge/ebsa285-pci.c * * PCI bios-type initialisation for PCI machines * * Bits taken from various places. */ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/init.h> #include <asm/irq.h> #include <asm/mach/pci.h> #include <asm/mach-types.h> static int irqmap_ebsa285[] = { IRQ_IN3, IRQ_IN1, IRQ_IN0, IRQ_PCI }; static int ebsa285_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { if (dev->vendor == PCI_VENDOR_ID_CONTAQ && dev->device == PCI_DEVICE_ID_CONTAQ_82C693) switch (PCI_FUNC(dev->devfn)) { case 1: return 14; case 2: return 15; case 3: return 12; } return irqmap_ebsa285[(slot + pin) & 3]; } static struct hw_pci ebsa285_pci __initdata = { .map_irq = ebsa285_map_irq, .nr_controllers = 1, .ops = &dc21285_ops, .setup = dc21285_setup, .preinit = dc21285_preinit, .postinit = dc21285_postinit, }; static int __init ebsa285_init_pci(void) { if (machine_is_ebsa285()) pci_common_init(&ebsa285_pci); return 0; } subsys_initcall(ebsa285_init_pci);
linux-master
arch/arm/mach-footbridge/ebsa285-pci.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/arch/arm/mach-footbridge/isa.c * * Copyright (C) 2004 Russell King. */ #include <linux/init.h> #include <linux/serial_8250.h> #include <asm/irq.h> #include <asm/hardware/dec21285.h> #include "common.h" static struct resource rtc_resources[] = { [0] = { .start = 0x70, .end = 0x73, .flags = IORESOURCE_IO, }, [1] = { .start = IRQ_ISA_RTC_ALARM, .end = IRQ_ISA_RTC_ALARM, .flags = IORESOURCE_IRQ, } }; static struct platform_device rtc_device = { .name = "rtc_cmos", .id = -1, .resource = rtc_resources, .num_resources = ARRAY_SIZE(rtc_resources), }; static struct resource serial_resources[] = { [0] = { .start = 0x3f8, .end = 0x3ff, .flags = IORESOURCE_IO, }, [1] = { .start = 0x2f8, .end = 0x2ff, .flags = IORESOURCE_IO, }, }; static struct plat_serial8250_port serial_platform_data[] = { { .iobase = 0x3f8, .irq = IRQ_ISA_UART, .uartclk = 1843200, .regshift = 0, .iotype = UPIO_PORT, .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST, }, { .iobase = 0x2f8, .irq = IRQ_ISA_UART2, .uartclk = 1843200, .regshift = 0, .iotype = UPIO_PORT, .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST, }, { }, }; static struct platform_device serial_device = { .name = "serial8250", .id = PLAT8250_DEV_PLATFORM, .dev = { .platform_data = serial_platform_data, }, .resource = serial_resources, .num_resources = ARRAY_SIZE(serial_resources), }; static int __init footbridge_isa_init(void) { int err = 0; /* Personal server doesn't have RTC */ isa_rtc_init(); err = platform_device_register(&rtc_device); if (err) printk(KERN_ERR "Unable to register RTC device: %d\n", err); err = platform_device_register(&serial_device); if (err) printk(KERN_ERR "Unable to register serial device: %d\n", err); return 0; } arch_initcall(footbridge_isa_init);
linux-master
arch/arm/mach-footbridge/isa.c
// SPDX-License-Identifier: GPL-2.0 /* * arch/arm/mach-footbridge/isa-rtc.c * * Copyright (C) 1998 Russell King. * Copyright (C) 1998 Phil Blundell * * CATS has a real-time clock, though the evaluation board doesn't. * * Changelog: * 21-Mar-1998 RMK Created * 27-Aug-1998 PJB CATS support * 28-Dec-1998 APH Made leds optional * 20-Jan-1999 RMK Started merge of EBSA285, CATS and NetWinder * 16-Mar-1999 RMK More support for EBSA285-like machines with RTCs in */ #define RTC_PORT(x) (0x70+(x)) #define RTC_ALWAYS_BCD 0 #include <linux/init.h> #include <linux/mc146818rtc.h> #include <linux/io.h> #include "common.h" void __init isa_rtc_init(void) { int reg_d, reg_b; /* * Probe for the RTC. */ reg_d = CMOS_READ(RTC_REG_D); /* * make sure the divider is set */ CMOS_WRITE(RTC_REF_CLCK_32KHZ, RTC_REG_A); /* * Set control reg B * (24 hour mode, update enabled) */ reg_b = CMOS_READ(RTC_REG_B) & 0x7f; reg_b |= 2; CMOS_WRITE(reg_b, RTC_REG_B); if ((CMOS_READ(RTC_REG_A) & 0x7f) == RTC_REF_CLCK_32KHZ && CMOS_READ(RTC_REG_B) == reg_b) { /* * We have a RTC. Check the battery */ if ((reg_d & 0x80) == 0) printk(KERN_WARNING "RTC: *** warning: CMOS battery bad\n"); } }
linux-master
arch/arm/mach-footbridge/isa-rtc.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/arch/arm/mach-footbridge/ebsa285.c * * EBSA285 machine fixup */ #include <linux/init.h> #include <linux/io.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/leds.h> #include <asm/hardware/dec21285.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include "common.h" /* LEDs */ #if defined(CONFIG_NEW_LEDS) && defined(CONFIG_LEDS_CLASS) #define XBUS_AMBER_L BIT(0) #define XBUS_GREEN_L BIT(1) #define XBUS_RED_L BIT(2) #define XBUS_TOGGLE BIT(7) struct ebsa285_led { struct led_classdev cdev; u8 mask; }; /* * The triggers lines up below will only be used if the * LED triggers are compiled in. */ static const struct { const char *name; const char *trigger; } ebsa285_leds[] = { { "ebsa285:amber", "cpu0", }, { "ebsa285:green", "heartbeat", }, { "ebsa285:red",}, }; static unsigned char hw_led_state; static void __iomem *xbus; static void ebsa285_led_set(struct led_classdev *cdev, enum led_brightness b) { struct ebsa285_led *led = container_of(cdev, struct ebsa285_led, cdev); if (b == LED_OFF) hw_led_state |= led->mask; else hw_led_state &= ~led->mask; writeb(hw_led_state, xbus); } static enum led_brightness ebsa285_led_get(struct led_classdev *cdev) { struct ebsa285_led *led = container_of(cdev, struct ebsa285_led, cdev); return hw_led_state & led->mask ? LED_OFF : LED_FULL; } static int __init ebsa285_leds_init(void) { int i; if (!machine_is_ebsa285()) return -ENODEV; xbus = ioremap(XBUS_CS2, SZ_4K); if (!xbus) return -ENOMEM; /* 3 LEDS all off */ hw_led_state = XBUS_AMBER_L | XBUS_GREEN_L | XBUS_RED_L; writeb(hw_led_state, xbus); for (i = 0; i < ARRAY_SIZE(ebsa285_leds); i++) { struct ebsa285_led *led; led = kzalloc(sizeof(*led), GFP_KERNEL); if (!led) break; led->cdev.name = ebsa285_leds[i].name; led->cdev.brightness_set = ebsa285_led_set; led->cdev.brightness_get = ebsa285_led_get; led->cdev.default_trigger = ebsa285_leds[i].trigger; led->mask = BIT(i); if (led_classdev_register(NULL, &led->cdev) < 0) { kfree(led); break; } } return 0; } /* * Since we may have triggers on any subsystem, defer registration * until after subsystem_init. */ fs_initcall(ebsa285_leds_init); #endif MACHINE_START(EBSA285, "EBSA285") /* Maintainer: Russell King */ .atag_offset = 0x100, .video_start = 0x000a0000, .video_end = 0x000bffff, .map_io = footbridge_map_io, .init_early = footbridge_sched_clock, .init_irq = footbridge_init_irq, .init_time = footbridge_timer_init, .restart = footbridge_restart, MACHINE_END
linux-master
arch/arm/mach-footbridge/ebsa285.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 1999-2000 Russell King * * ISA DMA primitives * Taken from various sources, including: * linux/include/asm/dma.h: Defines for using and allocating dma channels. * Written by Hennus Bergman, 1992. * High DMA channel support & info by Hannu Savolainen and John Boyd, * Nov. 1992. * arch/arm/kernel/dma-ebsa285.c * Copyright (C) 1998 Phil Blundell */ #include <linux/dma-map-ops.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/dma-mapping.h> #include <linux/io.h> #include <asm/dma.h> #include <asm/mach/dma.h> #include <asm/hardware/dec21285.h> #define ISA_DMA_MASK 0 #define ISA_DMA_MODE 1 #define ISA_DMA_CLRFF 2 #define ISA_DMA_PGHI 3 #define ISA_DMA_PGLO 4 #define ISA_DMA_ADDR 5 #define ISA_DMA_COUNT 6 static unsigned int isa_dma_port[8][7] = { /* MASK MODE CLRFF PAGE_HI PAGE_LO ADDR COUNT */ { 0x0a, 0x0b, 0x0c, 0x487, 0x087, 0x00, 0x01 }, { 0x0a, 0x0b, 0x0c, 0x483, 0x083, 0x02, 0x03 }, { 0x0a, 0x0b, 0x0c, 0x481, 0x081, 0x04, 0x05 }, { 0x0a, 0x0b, 0x0c, 0x482, 0x082, 0x06, 0x07 }, { 0xd4, 0xd6, 0xd8, 0x000, 0x000, 0xc0, 0xc2 }, { 0xd4, 0xd6, 0xd8, 0x48b, 0x08b, 0xc4, 0xc6 }, { 0xd4, 0xd6, 0xd8, 0x489, 0x089, 0xc8, 0xca }, { 0xd4, 0xd6, 0xd8, 0x48a, 0x08a, 0xcc, 0xce } }; static int isa_get_dma_residue(unsigned int chan, dma_t *dma) { unsigned int io_port = isa_dma_port[chan][ISA_DMA_COUNT]; int count; count = 1 + inb(io_port); count |= inb(io_port) << 8; return chan < 4 ? count : (count << 1); } static struct device isa_dma_dev = { .init_name = "fallback device", .coherent_dma_mask = ~(dma_addr_t)0, .dma_mask = &isa_dma_dev.coherent_dma_mask, }; static void isa_enable_dma(unsigned int chan, dma_t *dma) { if (dma->invalid) { unsigned long address, length; unsigned int mode; enum dma_data_direction direction; mode = (chan & 3) | dma->dma_mode; switch (dma->dma_mode & DMA_MODE_MASK) { case DMA_MODE_READ: direction = DMA_FROM_DEVICE; break; case DMA_MODE_WRITE: direction = DMA_TO_DEVICE; break; case DMA_MODE_CASCADE: direction = DMA_BIDIRECTIONAL; break; default: direction = DMA_NONE; break; } if (!dma->sg) { /* * Cope with ISA-style drivers which expect cache * coherence. */ dma->sg = &dma->buf; dma->sgcount = 1; dma->buf.length = dma->count; dma->buf.dma_address = dma_map_single(&isa_dma_dev, dma->addr, dma->count, direction); } address = dma->buf.dma_address; length = dma->buf.length - 1; outb(address >> 16, isa_dma_port[chan][ISA_DMA_PGLO]); outb(address >> 24, isa_dma_port[chan][ISA_DMA_PGHI]); if (chan >= 4) { address >>= 1; length >>= 1; } outb(0, isa_dma_port[chan][ISA_DMA_CLRFF]); outb(address, isa_dma_port[chan][ISA_DMA_ADDR]); outb(address >> 8, isa_dma_port[chan][ISA_DMA_ADDR]); outb(length, isa_dma_port[chan][ISA_DMA_COUNT]); outb(length >> 8, isa_dma_port[chan][ISA_DMA_COUNT]); outb(mode, isa_dma_port[chan][ISA_DMA_MODE]); dma->invalid = 0; } outb(chan & 3, isa_dma_port[chan][ISA_DMA_MASK]); } static void isa_disable_dma(unsigned int chan, dma_t *dma) { outb(chan | 4, isa_dma_port[chan][ISA_DMA_MASK]); } static struct dma_ops isa_dma_ops = { .type = "ISA", .enable = isa_enable_dma, .disable = isa_disable_dma, .residue = isa_get_dma_residue, }; static struct resource dma_resources[] = { { .name = "dma1", .start = 0x0000, .end = 0x000f }, { .name = "dma low page", .start = 0x0080, .end = 0x008f }, { .name = "dma2", .start = 0x00c0, .end = 0x00df }, { .name = "dma high page", .start = 0x0480, .end = 0x048f } }; static dma_t isa_dma[8]; /* * ISA DMA always starts at channel 0 */ static int __init isa_dma_init(void) { /* * Try to autodetect presence of an ISA DMA controller. * We do some minimal initialisation, and check that * channel 0's DMA address registers are writeable. */ outb(0xff, 0x0d); outb(0xff, 0xda); /* * Write high and low address, and then read them back * in the same order. */ outb(0x55, 0x00); outb(0xaa, 0x00); if (inb(0) == 0x55 && inb(0) == 0xaa) { unsigned int chan, i; for (chan = 0; chan < 8; chan++) { isa_dma[chan].d_ops = &isa_dma_ops; isa_disable_dma(chan, NULL); } outb(0x40, 0x0b); outb(0x41, 0x0b); outb(0x42, 0x0b); outb(0x43, 0x0b); outb(0xc0, 0xd6); outb(0x41, 0xd6); outb(0x42, 0xd6); outb(0x43, 0xd6); outb(0, 0xd4); outb(0x10, 0x08); outb(0x10, 0xd0); /* * Is this correct? According to my documentation, it * doesn't appear to be. It should be: * outb(0x3f, 0x40b); outb(0x3f, 0x4d6); */ outb(0x30, 0x40b); outb(0x31, 0x40b); outb(0x32, 0x40b); outb(0x33, 0x40b); outb(0x31, 0x4d6); outb(0x32, 0x4d6); outb(0x33, 0x4d6); for (i = 0; i < ARRAY_SIZE(dma_resources); i++) request_resource(&ioport_resource, dma_resources + i); for (chan = 0; chan < 8; chan++) { int ret = isa_dma_add(chan, &isa_dma[chan]); if (ret) pr_err("ISADMA%u: unable to register: %d\n", chan, ret); } request_dma(DMA_ISA_CASCADE, "cascade"); } dma_direct_set_offset(&isa_dma_dev, PHYS_OFFSET, BUS_OFFSET, SZ_256M); return 0; } core_initcall(isa_dma_init);
linux-master
arch/arm/mach-footbridge/dma-isa.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/arch/arm/mach-footbridge/irq.c * * Copyright (C) 1996-2000 Russell King * * Changelog: * 22-Aug-1998 RMK Restructured IRQ routines * 03-Sep-1998 PJB Merged CATS support * 20-Jan-1998 RMK Started merge of EBSA286, CATS and NetWinder * 26-Jan-1999 PJB Don't use IACK on CATS * 16-Mar-1999 RMK Added autodetect of ISA PICs */ #include <linux/ioport.h> #include <linux/interrupt.h> #include <linux/list.h> #include <linux/init.h> #include <linux/io.h> #include <linux/spinlock.h> #include <asm/mach/irq.h> #include <mach/hardware.h> #include <asm/hardware/dec21285.h> #include <asm/irq.h> #include <asm/mach-types.h> #include "common.h" static void isa_mask_pic_lo_irq(struct irq_data *d) { unsigned int mask = 1 << (d->irq & 7); outb(inb(PIC_MASK_LO) | mask, PIC_MASK_LO); } static void isa_ack_pic_lo_irq(struct irq_data *d) { unsigned int mask = 1 << (d->irq & 7); outb(inb(PIC_MASK_LO) | mask, PIC_MASK_LO); outb(0x20, PIC_LO); } static void isa_unmask_pic_lo_irq(struct irq_data *d) { unsigned int mask = 1 << (d->irq & 7); outb(inb(PIC_MASK_LO) & ~mask, PIC_MASK_LO); } static struct irq_chip isa_lo_chip = { .irq_ack = isa_ack_pic_lo_irq, .irq_mask = isa_mask_pic_lo_irq, .irq_unmask = isa_unmask_pic_lo_irq, }; static void isa_mask_pic_hi_irq(struct irq_data *d) { unsigned int mask = 1 << (d->irq & 7); outb(inb(PIC_MASK_HI) | mask, PIC_MASK_HI); } static void isa_ack_pic_hi_irq(struct irq_data *d) { unsigned int mask = 1 << (d->irq & 7); outb(inb(PIC_MASK_HI) | mask, PIC_MASK_HI); outb(0x62, PIC_LO); outb(0x20, PIC_HI); } static void isa_unmask_pic_hi_irq(struct irq_data *d) { unsigned int mask = 1 << (d->irq & 7); outb(inb(PIC_MASK_HI) & ~mask, PIC_MASK_HI); } static struct irq_chip isa_hi_chip = { .irq_ack = isa_ack_pic_hi_irq, .irq_mask = isa_mask_pic_hi_irq, .irq_unmask = isa_unmask_pic_hi_irq, }; static void isa_irq_handler(struct irq_desc *desc) { unsigned int isa_irq = *(unsigned char *)PCIIACK_BASE; if (isa_irq < _ISA_IRQ(0) || isa_irq >= _ISA_IRQ(16)) { do_bad_IRQ(desc); return; } generic_handle_irq(isa_irq); } static struct resource pic1_resource = { .name = "pic1", .start = 0x20, .end = 0x3f, }; static struct resource pic2_resource = { .name = "pic2", .start = 0xa0, .end = 0xbf, }; void __init isa_init_irq(unsigned int host_irq) { unsigned int irq; /* * Setup, and then probe for an ISA PIC * If the PIC is not there, then we * ignore the PIC. */ outb(0x11, PIC_LO); outb(_ISA_IRQ(0), PIC_MASK_LO); /* IRQ number */ outb(0x04, PIC_MASK_LO); /* Slave on Ch2 */ outb(0x01, PIC_MASK_LO); /* x86 */ outb(0xf5, PIC_MASK_LO); /* pattern: 11110101 */ outb(0x11, PIC_HI); outb(_ISA_IRQ(8), PIC_MASK_HI); /* IRQ number */ outb(0x02, PIC_MASK_HI); /* Slave on Ch1 */ outb(0x01, PIC_MASK_HI); /* x86 */ outb(0xfa, PIC_MASK_HI); /* pattern: 11111010 */ outb(0x0b, PIC_LO); outb(0x0b, PIC_HI); if (inb(PIC_MASK_LO) == 0xf5 && inb(PIC_MASK_HI) == 0xfa) { outb(0xff, PIC_MASK_LO);/* mask all IRQs */ outb(0xff, PIC_MASK_HI);/* mask all IRQs */ } else { printk(KERN_INFO "IRQ: ISA PIC not found\n"); host_irq = (unsigned int)-1; } if (host_irq != (unsigned int)-1) { for (irq = _ISA_IRQ(0); irq < _ISA_IRQ(8); irq++) { irq_set_chip_and_handler(irq, &isa_lo_chip, handle_level_irq); irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE); } for (irq = _ISA_IRQ(8); irq < _ISA_IRQ(16); irq++) { irq_set_chip_and_handler(irq, &isa_hi_chip, handle_level_irq); irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE); } request_resource(&ioport_resource, &pic1_resource); request_resource(&ioport_resource, &pic2_resource); irq = IRQ_ISA_CASCADE; if (request_irq(irq, no_action, 0, "cascade", NULL)) pr_err("Failed to request irq %u (cascade)\n", irq); irq_set_chained_handler(host_irq, isa_irq_handler); /* * On the NetWinder, don't automatically * enable ISA IRQ11 when it is requested. * There appears to be a missing pull-up * resistor on this line. */ if (machine_is_netwinder()) irq_modify_status(_ISA_IRQ(11), IRQ_NOREQUEST | IRQ_NOPROBE, IRQ_NOAUTOEN); } }
linux-master
arch/arm/mach-footbridge/isa-irq.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * arch/arm/mach-moxart/moxart.c * * (C) Copyright 2013, Jonas Jensen <[email protected]> */
linux-master
arch/arm/mach-moxart/moxart.c
// SPDX-License-Identifier: GPL-2.0-only /* * ARTPEC-6 device support. */ #include <linux/amba/bus.h> #include <linux/clocksource.h> #include <linux/dma-mapping.h> #include <linux/io.h> #include <linux/irqchip.h> #include <linux/irqchip/arm-gic.h> #include <linux/mfd/syscon.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/clk-provider.h> #include <linux/regmap.h> #include <linux/smp.h> #include <asm/smp_scu.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/psci.h> #include <linux/arm-smccc.h> #define ARTPEC6_DMACFG_REGNUM 0x10 #define ARTPEC6_DMACFG_UARTS_BURST 0xff #define SECURE_OP_L2C_WRITEREG 0xb4000001 static void __init artpec6_init_machine(void) { struct regmap *regmap; regmap = syscon_regmap_lookup_by_compatible("axis,artpec6-syscon"); if (!IS_ERR(regmap)) { /* Use PL011 DMA Burst Request signal instead of DMA * Single Request */ regmap_write(regmap, ARTPEC6_DMACFG_REGNUM, ARTPEC6_DMACFG_UARTS_BURST); }; } static void artpec6_l2c310_write_sec(unsigned long val, unsigned reg) { struct arm_smccc_res res; arm_smccc_smc(SECURE_OP_L2C_WRITEREG, reg, val, 0, 0, 0, 0, 0, &res); WARN_ON(res.a0); } static const char * const artpec6_dt_match[] = { "axis,artpec6", NULL }; DT_MACHINE_START(ARTPEC6, "Axis ARTPEC-6 Platform") .l2c_aux_val = 0x0C000000, .l2c_aux_mask = 0xF3FFFFFF, .l2c_write_sec = artpec6_l2c310_write_sec, .init_machine = artpec6_init_machine, .dt_compat = artpec6_dt_match, MACHINE_END
linux-master
arch/arm/mach-artpec/board-artpec6.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright: (C) 2018 Socionext Inc. * Copyright: (C) 2015 Linaro Ltd. */ #include <linux/cpu_pm.h> #include <linux/irqchip/arm-gic.h> #include <linux/of_address.h> #include <linux/suspend.h> #include <asm/cacheflush.h> #include <asm/cp15.h> #include <asm/idmap.h> #include <asm/smp_plat.h> #include <asm/suspend.h> #define M10V_MAX_CPU 4 #define KERNEL_UNBOOT_FLAG 0x12345678 static void __iomem *m10v_smp_base; static int m10v_boot_secondary(unsigned int l_cpu, struct task_struct *idle) { unsigned int mpidr, cpu, cluster; if (!m10v_smp_base) return -ENXIO; mpidr = cpu_logical_map(l_cpu); cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); if (cpu >= M10V_MAX_CPU) return -EINVAL; pr_info("%s: cpu %u l_cpu %u cluster %u\n", __func__, cpu, l_cpu, cluster); writel(__pa_symbol(secondary_startup), m10v_smp_base + cpu * 4); arch_send_wakeup_ipi_mask(cpumask_of(l_cpu)); return 0; } static void m10v_smp_init(unsigned int max_cpus) { unsigned int mpidr, cpu, cluster; struct device_node *np; np = of_find_compatible_node(NULL, NULL, "socionext,milbeaut-smp-sram"); if (!np) return; m10v_smp_base = of_iomap(np, 0); if (!m10v_smp_base) return; mpidr = read_cpuid_mpidr(); cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); pr_info("MCPM boot on cpu_%u cluster_%u\n", cpu, cluster); for (cpu = 0; cpu < M10V_MAX_CPU; cpu++) writel(KERNEL_UNBOOT_FLAG, m10v_smp_base + cpu * 4); } #ifdef CONFIG_HOTPLUG_CPU static void m10v_cpu_die(unsigned int l_cpu) { gic_cpu_if_down(0); v7_exit_coherency_flush(louis); wfi(); } static int m10v_cpu_kill(unsigned int l_cpu) { unsigned int mpidr, cpu; mpidr = cpu_logical_map(l_cpu); cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); writel(KERNEL_UNBOOT_FLAG, m10v_smp_base + cpu * 4); return 1; } #endif static struct smp_operations m10v_smp_ops __initdata = { .smp_prepare_cpus = m10v_smp_init, .smp_boot_secondary = m10v_boot_secondary, #ifdef CONFIG_HOTPLUG_CPU .cpu_die = m10v_cpu_die, .cpu_kill = m10v_cpu_kill, #endif }; CPU_METHOD_OF_DECLARE(m10v_smp, "socionext,milbeaut-m10v-smp", &m10v_smp_ops); static int m10v_pm_valid(suspend_state_t state) { return (state == PM_SUSPEND_STANDBY) || (state == PM_SUSPEND_MEM); } typedef void (*phys_reset_t)(unsigned long); static phys_reset_t phys_reset; static int m10v_die(unsigned long arg) { setup_mm_for_reboot(); asm("wfi"); /* Boot just like a secondary */ phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset); phys_reset(virt_to_phys(cpu_resume)); return 0; } static int m10v_pm_enter(suspend_state_t state) { switch (state) { case PM_SUSPEND_STANDBY: asm("wfi"); break; case PM_SUSPEND_MEM: cpu_pm_enter(); cpu_suspend(0, m10v_die); cpu_pm_exit(); break; } return 0; } static const struct platform_suspend_ops m10v_pm_ops = { .valid = m10v_pm_valid, .enter = m10v_pm_enter, }; struct clk *m10v_clclk_register(struct device *cpu_dev); static int __init m10v_pm_init(void) { if (of_machine_is_compatible("socionext,milbeaut-evb")) suspend_set_ops(&m10v_pm_ops); return 0; } late_initcall(m10v_pm_init);
linux-master
arch/arm/mach-milbeaut/platsmp.c
// SPDX-License-Identifier: GPL-2.0-only /* * arch/arm/plat-spear/pl080.c * * DMAC pl080 definitions for SPEAr platform * * Copyright (C) 2012 ST Microelectronics * Viresh Kumar <[email protected]> */ #include <linux/amba/pl08x.h> #include <linux/amba/bus.h> #include <linux/bug.h> #include <linux/err.h> #include <linux/io.h> #include <linux/spinlock_types.h> #include "spear.h" #include "misc_regs.h" #include "pl080.h" static spinlock_t lock = __SPIN_LOCK_UNLOCKED(x); struct { unsigned char busy; unsigned char val; } signals[16] = {{0, 0}, }; int pl080_get_signal(const struct pl08x_channel_data *cd) { unsigned int signal = cd->min_signal, val; unsigned long flags; spin_lock_irqsave(&lock, flags); /* Return if signal is already acquired by somebody else */ if (signals[signal].busy && (signals[signal].val != cd->muxval)) { spin_unlock_irqrestore(&lock, flags); return -EBUSY; } /* If acquiring for the first time, configure it */ if (!signals[signal].busy) { val = readl(DMA_CHN_CFG); /* * Each request line has two bits in DMA_CHN_CFG register. To * goto the bits of current request line, do left shift of * value by 2 * signal number. */ val &= ~(0x3 << (signal * 2)); val |= cd->muxval << (signal * 2); writel(val, DMA_CHN_CFG); } signals[signal].busy++; signals[signal].val = cd->muxval; spin_unlock_irqrestore(&lock, flags); return signal; } void pl080_put_signal(const struct pl08x_channel_data *cd, int signal) { unsigned long flags; spin_lock_irqsave(&lock, flags); /* if signal is not used */ if (!signals[signal].busy) BUG(); signals[signal].busy--; spin_unlock_irqrestore(&lock, flags); }
linux-master
arch/arm/mach-spear/pl080.c
// SPDX-License-Identifier: GPL-2.0-only /* * arch/arm/plat-spear/restart.c * * SPEAr platform specific restart functions * * Copyright (C) 2009 ST Microelectronics * Viresh Kumar <[email protected]> */ #include <linux/io.h> #include <linux/amba/sp810.h> #include <linux/reboot.h> #include <asm/system_misc.h> #include "spear.h" #include "generic.h" #define SPEAR13XX_SYS_SW_RES (VA_MISC_BASE + 0x204) void spear_restart(enum reboot_mode mode, const char *cmd) { if (mode == REBOOT_SOFT) { /* software reset, Jump into ROM at address 0 */ soft_restart(0); } else { /* hardware reset, Use on-chip reset capability */ #ifdef CONFIG_ARCH_SPEAR13XX writel_relaxed(0x01, SPEAR13XX_SYS_SW_RES); #endif #if defined(CONFIG_ARCH_SPEAR3XX) || defined(CONFIG_ARCH_SPEAR6XX) sysctl_soft_reset((void __iomem *)VA_SPEAR_SYS_CTRL_BASE); #endif } }
linux-master
arch/arm/mach-spear/restart.c
// SPDX-License-Identifier: GPL-2.0-only /* * arch/arm/mach-spear13xx/spear13xx.c * * SPEAr13XX machines common source file * * Copyright (C) 2012 ST Microelectronics * Viresh Kumar <[email protected]> */ #define pr_fmt(fmt) "SPEAr13xx: " fmt #include <linux/amba/pl022.h> #include <linux/clk.h> #include <linux/clk/spear.h> #include <linux/clocksource.h> #include <linux/err.h> #include <linux/of.h> #include <asm/hardware/cache-l2x0.h> #include <asm/mach/map.h> #include "spear.h" #include "generic.h" void __init spear13xx_l2x0_init(void) { /* * 512KB (64KB/way), 8-way associativity, parity supported * * FIXME: 9th bit, of Auxiliary Controller register must be set * for some spear13xx devices for stable L2 operation. * * Enable Early BRESP, L2 prefetch for Instruction and Data, * write alloc and 'Full line of zero' options * */ if (!IS_ENABLED(CONFIG_CACHE_L2X0)) return; writel_relaxed(0x06, VA_L2CC_BASE + L310_PREFETCH_CTRL); /* * Program following latencies in order to make * SPEAr1340 work at 600 MHz */ writel_relaxed(0x221, VA_L2CC_BASE + L310_TAG_LATENCY_CTRL); writel_relaxed(0x441, VA_L2CC_BASE + L310_DATA_LATENCY_CTRL); l2x0_init(VA_L2CC_BASE, 0x30a00001, 0xfe0fffff); } /* * Following will create 16MB static virtual/physical mappings * PHYSICAL VIRTUAL * 0xB3000000 0xF9000000 * 0xE0000000 0xFD000000 * 0xEC000000 0xFC000000 * 0xED000000 0xFB000000 */ static struct map_desc spear13xx_io_desc[] __initdata = { { .virtual = (unsigned long)VA_PERIP_GRP2_BASE, .pfn = __phys_to_pfn(PERIP_GRP2_BASE), .length = SZ_16M, .type = MT_DEVICE }, { .virtual = (unsigned long)VA_PERIP_GRP1_BASE, .pfn = __phys_to_pfn(PERIP_GRP1_BASE), .length = SZ_16M, .type = MT_DEVICE }, { .virtual = (unsigned long)VA_A9SM_AND_MPMC_BASE, .pfn = __phys_to_pfn(A9SM_AND_MPMC_BASE), .length = SZ_16M, .type = MT_DEVICE }, { .virtual = (unsigned long)VA_L2CC_BASE, .pfn = __phys_to_pfn(L2CC_BASE), .length = SZ_4K, .type = MT_DEVICE }, }; /* This will create static memory mapping for selected devices */ void __init spear13xx_map_io(void) { iotable_init(spear13xx_io_desc, ARRAY_SIZE(spear13xx_io_desc)); } static void __init spear13xx_clk_init(void) { if (of_machine_is_compatible("st,spear1310")) spear1310_clk_init(VA_MISC_BASE, VA_SPEAR1310_RAS_BASE); else if (of_machine_is_compatible("st,spear1340")) spear1340_clk_init(VA_MISC_BASE); else pr_err("%s: Unknown machine\n", __func__); } void __init spear13xx_timer_init(void) { char pclk_name[] = "osc_24m_clk"; struct clk *gpt_clk, *pclk; spear13xx_clk_init(); /* get the system timer clock */ gpt_clk = clk_get_sys("gpt0", NULL); if (IS_ERR(gpt_clk)) { pr_err("%s:couldn't get clk for gpt\n", __func__); BUG(); } /* get the suitable parent clock for timer*/ pclk = clk_get(NULL, pclk_name); if (IS_ERR(pclk)) { pr_err("%s:couldn't get %s as parent for gpt\n", __func__, pclk_name); BUG(); } clk_set_parent(gpt_clk, pclk); clk_put(gpt_clk); clk_put(pclk); spear_setup_of_timer(); timer_probe(); }
linux-master
arch/arm/mach-spear/spear13xx.c
// SPDX-License-Identifier: GPL-2.0-only /* * arch/arm/mach-spear3xx/spear310.c * * SPEAr310 machine source file * * Copyright (C) 2009-2012 ST Microelectronics * Viresh Kumar <[email protected]> */ #define pr_fmt(fmt) "SPEAr310: " fmt #include <linux/amba/pl08x.h> #include <linux/amba/serial.h> #include <linux/of_platform.h> #include <asm/mach/arch.h> #include "generic.h" #include "spear.h" #define SPEAR310_UART1_BASE UL(0xB2000000) #define SPEAR310_UART2_BASE UL(0xB2080000) #define SPEAR310_UART3_BASE UL(0xB2100000) #define SPEAR310_UART4_BASE UL(0xB2180000) #define SPEAR310_UART5_BASE UL(0xB2200000) /* DMAC platform data's slave info */ struct pl08x_channel_data spear310_dma_info[] = { { .bus_id = "uart0_rx", .min_signal = 2, .max_signal = 2, .muxval = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart0_tx", .min_signal = 3, .max_signal = 3, .muxval = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ssp0_rx", .min_signal = 8, .max_signal = 8, .muxval = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ssp0_tx", .min_signal = 9, .max_signal = 9, .muxval = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "i2c_rx", .min_signal = 10, .max_signal = 10, .muxval = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "i2c_tx", .min_signal = 11, .max_signal = 11, .muxval = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "irda", .min_signal = 12, .max_signal = 12, .muxval = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "adc", .min_signal = 13, .max_signal = 13, .muxval = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "to_jpeg", .min_signal = 14, .max_signal = 14, .muxval = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "from_jpeg", .min_signal = 15, .max_signal = 15, .muxval = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart1_rx", .min_signal = 0, .max_signal = 0, .muxval = 1, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart1_tx", .min_signal = 1, .max_signal = 1, .muxval = 1, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart2_rx", .min_signal = 2, .max_signal = 2, .muxval = 1, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart2_tx", .min_signal = 3, .max_signal = 3, .muxval = 1, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart3_rx", .min_signal = 4, .max_signal = 4, .muxval = 1, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart3_tx", .min_signal = 5, .max_signal = 5, .muxval = 1, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart4_rx", .min_signal = 6, .max_signal = 6, .muxval = 1, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart4_tx", .min_signal = 7, .max_signal = 7, .muxval = 1, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart5_rx", .min_signal = 8, .max_signal = 8, .muxval = 1, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart5_tx", .min_signal = 9, .max_signal = 9, .muxval = 1, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras5_rx", .min_signal = 10, .max_signal = 10, .muxval = 1, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras5_tx", .min_signal = 11, .max_signal = 11, .muxval = 1, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras6_rx", .min_signal = 12, .max_signal = 12, .muxval = 1, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras6_tx", .min_signal = 13, .max_signal = 13, .muxval = 1, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras7_rx", .min_signal = 14, .max_signal = 14, .muxval = 1, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras7_tx", .min_signal = 15, .max_signal = 15, .muxval = 1, .periph_buses = PL08X_AHB1, }, }; /* uart devices plat data */ static struct amba_pl011_data spear310_uart_data[] = { { .dma_filter = pl08x_filter_id, .dma_tx_param = "uart1_tx", .dma_rx_param = "uart1_rx", }, { .dma_filter = pl08x_filter_id, .dma_tx_param = "uart2_tx", .dma_rx_param = "uart2_rx", }, { .dma_filter = pl08x_filter_id, .dma_tx_param = "uart3_tx", .dma_rx_param = "uart3_rx", }, { .dma_filter = pl08x_filter_id, .dma_tx_param = "uart4_tx", .dma_rx_param = "uart4_rx", }, { .dma_filter = pl08x_filter_id, .dma_tx_param = "uart5_tx", .dma_rx_param = "uart5_rx", }, }; /* Add SPEAr310 auxdata to pass platform data */ static struct of_dev_auxdata spear310_auxdata_lookup[] __initdata = { OF_DEV_AUXDATA("arm,pl022", SPEAR3XX_ICM1_SSP_BASE, NULL, &pl022_plat_data), OF_DEV_AUXDATA("arm,pl080", SPEAR_ICM3_DMA_BASE, NULL, &pl080_plat_data), OF_DEV_AUXDATA("arm,pl011", SPEAR310_UART1_BASE, NULL, &spear310_uart_data[0]), OF_DEV_AUXDATA("arm,pl011", SPEAR310_UART2_BASE, NULL, &spear310_uart_data[1]), OF_DEV_AUXDATA("arm,pl011", SPEAR310_UART3_BASE, NULL, &spear310_uart_data[2]), OF_DEV_AUXDATA("arm,pl011", SPEAR310_UART4_BASE, NULL, &spear310_uart_data[3]), OF_DEV_AUXDATA("arm,pl011", SPEAR310_UART5_BASE, NULL, &spear310_uart_data[4]), {} }; static void __init spear310_dt_init(void) { pl080_plat_data.slave_channels = spear310_dma_info; pl080_plat_data.num_slave_channels = ARRAY_SIZE(spear310_dma_info); of_platform_default_populate(NULL, spear310_auxdata_lookup, NULL); } static const char * const spear310_dt_board_compat[] = { "st,spear310", "st,spear310-evb", NULL, }; static void __init spear310_map_io(void) { spear3xx_map_io(); } DT_MACHINE_START(SPEAR310_DT, "ST SPEAr310 SoC with Flattened Device Tree") .map_io = spear310_map_io, .init_time = spear3xx_timer_init, .init_machine = spear310_dt_init, .restart = spear_restart, .dt_compat = spear310_dt_board_compat, MACHINE_END
linux-master
arch/arm/mach-spear/spear310.c
// SPDX-License-Identifier: GPL-2.0-only /* * arch/arm/mach-spear3xx/spear300.c * * SPEAr300 machine source file * * Copyright (C) 2009-2012 ST Microelectronics * Viresh Kumar <[email protected]> */ #define pr_fmt(fmt) "SPEAr300: " fmt #include <linux/amba/pl08x.h> #include <linux/of_platform.h> #include <asm/mach/arch.h> #include "generic.h" #include "spear.h" /* DMAC platform data's slave info */ struct pl08x_channel_data spear300_dma_info[] = { { .bus_id = "uart0_rx", .min_signal = 2, .max_signal = 2, .muxval = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart0_tx", .min_signal = 3, .max_signal = 3, .muxval = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ssp0_rx", .min_signal = 8, .max_signal = 8, .muxval = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ssp0_tx", .min_signal = 9, .max_signal = 9, .muxval = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "i2c_rx", .min_signal = 10, .max_signal = 10, .muxval = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "i2c_tx", .min_signal = 11, .max_signal = 11, .muxval = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "irda", .min_signal = 12, .max_signal = 12, .muxval = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "adc", .min_signal = 13, .max_signal = 13, .muxval = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "to_jpeg", .min_signal = 14, .max_signal = 14, .muxval = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "from_jpeg", .min_signal = 15, .max_signal = 15, .muxval = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras0_rx", .min_signal = 0, .max_signal = 0, .muxval = 1, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras0_tx", .min_signal = 1, .max_signal = 1, .muxval = 1, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras1_rx", .min_signal = 2, .max_signal = 2, .muxval = 1, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras1_tx", .min_signal = 3, .max_signal = 3, .muxval = 1, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras2_rx", .min_signal = 4, .max_signal = 4, .muxval = 1, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras2_tx", .min_signal = 5, .max_signal = 5, .muxval = 1, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras3_rx", .min_signal = 6, .max_signal = 6, .muxval = 1, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras3_tx", .min_signal = 7, .max_signal = 7, .muxval = 1, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras4_rx", .min_signal = 8, .max_signal = 8, .muxval = 1, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras4_tx", .min_signal = 9, .max_signal = 9, .muxval = 1, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras5_rx", .min_signal = 10, .max_signal = 10, .muxval = 1, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras5_tx", .min_signal = 11, .max_signal = 11, .muxval = 1, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras6_rx", .min_signal = 12, .max_signal = 12, .muxval = 1, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras6_tx", .min_signal = 13, .max_signal = 13, .muxval = 1, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras7_rx", .min_signal = 14, .max_signal = 14, .muxval = 1, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras7_tx", .min_signal = 15, .max_signal = 15, .muxval = 1, .periph_buses = PL08X_AHB1, }, }; /* Add SPEAr300 auxdata to pass platform data */ static struct of_dev_auxdata spear300_auxdata_lookup[] __initdata = { OF_DEV_AUXDATA("arm,pl022", SPEAR3XX_ICM1_SSP_BASE, NULL, &pl022_plat_data), OF_DEV_AUXDATA("arm,pl080", SPEAR_ICM3_DMA_BASE, NULL, &pl080_plat_data), {} }; static void __init spear300_dt_init(void) { pl080_plat_data.slave_channels = spear300_dma_info; pl080_plat_data.num_slave_channels = ARRAY_SIZE(spear300_dma_info); of_platform_default_populate(NULL, spear300_auxdata_lookup, NULL); } static const char * const spear300_dt_board_compat[] = { "st,spear300", "st,spear300-evb", NULL, }; static void __init spear300_map_io(void) { spear3xx_map_io(); } DT_MACHINE_START(SPEAR300_DT, "ST SPEAr300 SoC with Flattened Device Tree") .map_io = spear300_map_io, .init_time = spear3xx_timer_init, .init_machine = spear300_dt_init, .restart = spear_restart, .dt_compat = spear300_dt_board_compat, MACHINE_END
linux-master
arch/arm/mach-spear/spear300.c
// SPDX-License-Identifier: GPL-2.0-only /* * arch/arm/mach-spear3xx/spear3xx.c * * SPEAr3XX machines common source file * * Copyright (C) 2009-2012 ST Microelectronics * Viresh Kumar <[email protected]> */ #define pr_fmt(fmt) "SPEAr3xx: " fmt #include <linux/amba/pl022.h> #include <linux/amba/pl080.h> #include <linux/clk.h> #include <linux/clk/spear.h> #include <linux/io.h> #include <asm/mach/map.h> #include "pl080.h" #include "generic.h" #include "spear.h" #include "misc_regs.h" /* ssp device registration */ struct pl022_ssp_controller pl022_plat_data = { .bus_id = 0, .enable_dma = 1, .dma_filter = pl08x_filter_id, .dma_tx_param = "ssp0_tx", .dma_rx_param = "ssp0_rx", }; /* dmac device registration */ struct pl08x_platform_data pl080_plat_data = { .memcpy_burst_size = PL08X_BURST_SZ_16, .memcpy_bus_width = PL08X_BUS_WIDTH_32_BITS, .memcpy_prot_buff = true, .memcpy_prot_cache = true, .lli_buses = PL08X_AHB1, .mem_buses = PL08X_AHB1, .get_xfer_signal = pl080_get_signal, .put_xfer_signal = pl080_put_signal, }; /* * Following will create 16MB static virtual/physical mappings * PHYSICAL VIRTUAL * 0xD0000000 0xFD000000 * 0xFC000000 0xFC000000 */ struct map_desc spear3xx_io_desc[] __initdata = { { .virtual = (unsigned long)VA_SPEAR_ICM1_2_BASE, .pfn = __phys_to_pfn(SPEAR_ICM1_2_BASE), .length = SZ_16M, .type = MT_DEVICE }, { .virtual = (unsigned long)VA_SPEAR_ICM3_SMI_CTRL_BASE, .pfn = __phys_to_pfn(SPEAR_ICM3_SMI_CTRL_BASE), .length = SZ_16M, .type = MT_DEVICE }, }; /* This will create static memory mapping for selected devices */ void __init spear3xx_map_io(void) { iotable_init(spear3xx_io_desc, ARRAY_SIZE(spear3xx_io_desc)); } void __init spear3xx_timer_init(void) { char pclk_name[] = "pll3_clk"; struct clk *gpt_clk, *pclk; spear3xx_clk_init(MISC_BASE, VA_SPEAR320_SOC_CONFIG_BASE); /* get the system timer clock */ gpt_clk = clk_get_sys("gpt0", NULL); if (IS_ERR(gpt_clk)) { pr_err("%s:couldn't get clk for gpt\n", __func__); BUG(); } /* get the suitable parent clock for timer*/ pclk = clk_get(NULL, pclk_name); if (IS_ERR(pclk)) { pr_err("%s:couldn't get %s as parent for gpt\n", __func__, pclk_name); BUG(); } clk_set_parent(gpt_clk, pclk); clk_put(gpt_clk); clk_put(pclk); spear_setup_of_timer(); }
linux-master
arch/arm/mach-spear/spear3xx.c
// SPDX-License-Identifier: GPL-2.0-only /* * arch/arm/plat-spear/time.c * * Copyright (C) 2010 ST Microelectronics * Shiraz Hashim<[email protected]> */ #include <linux/clk.h> #include <linux/clockchips.h> #include <linux/clocksource.h> #include <linux/err.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/of_irq.h> #include <linux/of_address.h> #include <linux/time.h> #include <linux/irq.h> #include <asm/mach/time.h> #include "generic.h" /* * We would use TIMER0 and TIMER1 as clockevent and clocksource. * Timer0 and Timer1 both belong to same gpt block in cpu subbsystem. Further * they share same functional clock. Any change in one's functional clock will * also affect other timer. */ #define CLKEVT 0 /* gpt0, channel0 as clockevent */ #define CLKSRC 1 /* gpt0, channel1 as clocksource */ /* Register offsets, x is channel number */ #define CR(x) ((x) * 0x80 + 0x80) #define IR(x) ((x) * 0x80 + 0x84) #define LOAD(x) ((x) * 0x80 + 0x88) #define COUNT(x) ((x) * 0x80 + 0x8C) /* Reg bit definitions */ #define CTRL_INT_ENABLE 0x0100 #define CTRL_ENABLE 0x0020 #define CTRL_ONE_SHOT 0x0010 #define CTRL_PRESCALER1 0x0 #define CTRL_PRESCALER2 0x1 #define CTRL_PRESCALER4 0x2 #define CTRL_PRESCALER8 0x3 #define CTRL_PRESCALER16 0x4 #define CTRL_PRESCALER32 0x5 #define CTRL_PRESCALER64 0x6 #define CTRL_PRESCALER128 0x7 #define CTRL_PRESCALER256 0x8 #define INT_STATUS 0x1 /* * Minimum clocksource/clockevent timer range in seconds */ #define SPEAR_MIN_RANGE 4 static __iomem void *gpt_base; static struct clk *gpt_clk; static int clockevent_next_event(unsigned long evt, struct clock_event_device *clk_event_dev); static void __init spear_clocksource_init(void) { u32 tick_rate; u16 val; /* program the prescaler (/256)*/ writew(CTRL_PRESCALER256, gpt_base + CR(CLKSRC)); /* find out actual clock driving Timer */ tick_rate = clk_get_rate(gpt_clk); tick_rate >>= CTRL_PRESCALER256; writew(0xFFFF, gpt_base + LOAD(CLKSRC)); val = readw(gpt_base + CR(CLKSRC)); val &= ~CTRL_ONE_SHOT; /* autoreload mode */ val |= CTRL_ENABLE ; writew(val, gpt_base + CR(CLKSRC)); /* register the clocksource */ clocksource_mmio_init(gpt_base + COUNT(CLKSRC), "tmr1", tick_rate, 200, 16, clocksource_mmio_readw_up); } static inline void spear_timer_shutdown(struct clock_event_device *evt) { u16 val = readw(gpt_base + CR(CLKEVT)); /* stop the timer */ val &= ~CTRL_ENABLE; writew(val, gpt_base + CR(CLKEVT)); } static int spear_shutdown(struct clock_event_device *evt) { spear_timer_shutdown(evt); return 0; } static int spear_set_oneshot(struct clock_event_device *evt) { u16 val; /* stop the timer */ spear_timer_shutdown(evt); val = readw(gpt_base + CR(CLKEVT)); val |= CTRL_ONE_SHOT; writew(val, gpt_base + CR(CLKEVT)); return 0; } static int spear_set_periodic(struct clock_event_device *evt) { u32 period; u16 val; /* stop the timer */ spear_timer_shutdown(evt); period = clk_get_rate(gpt_clk) / HZ; period >>= CTRL_PRESCALER16; writew(period, gpt_base + LOAD(CLKEVT)); val = readw(gpt_base + CR(CLKEVT)); val &= ~CTRL_ONE_SHOT; val |= CTRL_ENABLE | CTRL_INT_ENABLE; writew(val, gpt_base + CR(CLKEVT)); return 0; } static struct clock_event_device clkevt = { .name = "tmr0", .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, .set_state_shutdown = spear_shutdown, .set_state_periodic = spear_set_periodic, .set_state_oneshot = spear_set_oneshot, .tick_resume = spear_shutdown, .set_next_event = clockevent_next_event, .shift = 0, /* to be computed */ }; static int clockevent_next_event(unsigned long cycles, struct clock_event_device *clk_event_dev) { u16 val = readw(gpt_base + CR(CLKEVT)); if (val & CTRL_ENABLE) writew(val & ~CTRL_ENABLE, gpt_base + CR(CLKEVT)); writew(cycles, gpt_base + LOAD(CLKEVT)); val |= CTRL_ENABLE | CTRL_INT_ENABLE; writew(val, gpt_base + CR(CLKEVT)); return 0; } static irqreturn_t spear_timer_interrupt(int irq, void *dev_id) { struct clock_event_device *evt = &clkevt; writew(INT_STATUS, gpt_base + IR(CLKEVT)); evt->event_handler(evt); return IRQ_HANDLED; } static void __init spear_clockevent_init(int irq) { u32 tick_rate; /* program the prescaler */ writew(CTRL_PRESCALER16, gpt_base + CR(CLKEVT)); tick_rate = clk_get_rate(gpt_clk); tick_rate >>= CTRL_PRESCALER16; clkevt.cpumask = cpumask_of(0); clockevents_config_and_register(&clkevt, tick_rate, 3, 0xfff0); if (request_irq(irq, spear_timer_interrupt, IRQF_TIMER, "timer", NULL)) pr_err("Failed to request irq %d (timer)\n", irq); } static const struct of_device_id timer_of_match[] __initconst = { { .compatible = "st,spear-timer", }, { }, }; void __init spear_setup_of_timer(void) { struct device_node *np; int irq, ret; np = of_find_matching_node(NULL, timer_of_match); if (!np) { pr_err("%s: No timer passed via DT\n", __func__); return; } irq = irq_of_parse_and_map(np, 0); if (!irq) { pr_err("%s: No irq passed for timer via DT\n", __func__); goto err_put_np; } gpt_base = of_iomap(np, 0); if (!gpt_base) { pr_err("%s: of iomap failed\n", __func__); goto err_put_np; } gpt_clk = clk_get_sys("gpt0", NULL); if (IS_ERR(gpt_clk)) { pr_err("%s:couldn't get clk for gpt\n", __func__); goto err_iomap; } ret = clk_prepare_enable(gpt_clk); if (ret < 0) { pr_err("%s:couldn't prepare-enable gpt clock\n", __func__); goto err_prepare_enable_clk; } of_node_put(np); spear_clockevent_init(irq); spear_clocksource_init(); return; err_prepare_enable_clk: clk_put(gpt_clk); err_iomap: iounmap(gpt_base); err_put_np: of_node_put(np); }
linux-master
arch/arm/mach-spear/time.c
// SPDX-License-Identifier: GPL-2.0-only /* * arch/arm/mach-spear6xx/spear6xx.c * * SPEAr6XX machines common source file * * Copyright (C) 2009 ST Microelectronics * Rajeev Kumar<[email protected]> * * Copyright 2012 Stefan Roese <[email protected]> */ #include <linux/amba/pl08x.h> #include <linux/clk.h> #include <linux/clk/spear.h> #include <linux/err.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_platform.h> #include <linux/amba/pl080.h> #include <asm/mach/arch.h> #include <asm/mach/time.h> #include <asm/mach/map.h> #include "pl080.h" #include "generic.h" #include "spear.h" #include "misc_regs.h" /* dmac device registration */ static struct pl08x_channel_data spear600_dma_info[] = { { .bus_id = "ssp1_rx", .min_signal = 0, .max_signal = 0, .muxval = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ssp1_tx", .min_signal = 1, .max_signal = 1, .muxval = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart0_rx", .min_signal = 2, .max_signal = 2, .muxval = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart0_tx", .min_signal = 3, .max_signal = 3, .muxval = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart1_rx", .min_signal = 4, .max_signal = 4, .muxval = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart1_tx", .min_signal = 5, .max_signal = 5, .muxval = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ssp2_rx", .min_signal = 6, .max_signal = 6, .muxval = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ssp2_tx", .min_signal = 7, .max_signal = 7, .muxval = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ssp0_rx", .min_signal = 8, .max_signal = 8, .muxval = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ssp0_tx", .min_signal = 9, .max_signal = 9, .muxval = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "i2c_rx", .min_signal = 10, .max_signal = 10, .muxval = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "i2c_tx", .min_signal = 11, .max_signal = 11, .muxval = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "irda", .min_signal = 12, .max_signal = 12, .muxval = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "adc", .min_signal = 13, .max_signal = 13, .muxval = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "to_jpeg", .min_signal = 14, .max_signal = 14, .muxval = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "from_jpeg", .min_signal = 15, .max_signal = 15, .muxval = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras0_rx", .min_signal = 0, .max_signal = 0, .muxval = 1, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras0_tx", .min_signal = 1, .max_signal = 1, .muxval = 1, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras1_rx", .min_signal = 2, .max_signal = 2, .muxval = 1, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras1_tx", .min_signal = 3, .max_signal = 3, .muxval = 1, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras2_rx", .min_signal = 4, .max_signal = 4, .muxval = 1, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras2_tx", .min_signal = 5, .max_signal = 5, .muxval = 1, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras3_rx", .min_signal = 6, .max_signal = 6, .muxval = 1, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras3_tx", .min_signal = 7, .max_signal = 7, .muxval = 1, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras4_rx", .min_signal = 8, .max_signal = 8, .muxval = 1, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras4_tx", .min_signal = 9, .max_signal = 9, .muxval = 1, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras5_rx", .min_signal = 10, .max_signal = 10, .muxval = 1, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras5_tx", .min_signal = 11, .max_signal = 11, .muxval = 1, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras6_rx", .min_signal = 12, .max_signal = 12, .muxval = 1, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras6_tx", .min_signal = 13, .max_signal = 13, .muxval = 1, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras7_rx", .min_signal = 14, .max_signal = 14, .muxval = 1, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras7_tx", .min_signal = 15, .max_signal = 15, .muxval = 1, .periph_buses = PL08X_AHB1, }, { .bus_id = "ext0_rx", .min_signal = 0, .max_signal = 0, .muxval = 2, .periph_buses = PL08X_AHB2, }, { .bus_id = "ext0_tx", .min_signal = 1, .max_signal = 1, .muxval = 2, .periph_buses = PL08X_AHB2, }, { .bus_id = "ext1_rx", .min_signal = 2, .max_signal = 2, .muxval = 2, .periph_buses = PL08X_AHB2, }, { .bus_id = "ext1_tx", .min_signal = 3, .max_signal = 3, .muxval = 2, .periph_buses = PL08X_AHB2, }, { .bus_id = "ext2_rx", .min_signal = 4, .max_signal = 4, .muxval = 2, .periph_buses = PL08X_AHB2, }, { .bus_id = "ext2_tx", .min_signal = 5, .max_signal = 5, .muxval = 2, .periph_buses = PL08X_AHB2, }, { .bus_id = "ext3_rx", .min_signal = 6, .max_signal = 6, .muxval = 2, .periph_buses = PL08X_AHB2, }, { .bus_id = "ext3_tx", .min_signal = 7, .max_signal = 7, .muxval = 2, .periph_buses = PL08X_AHB2, }, { .bus_id = "ext4_rx", .min_signal = 8, .max_signal = 8, .muxval = 2, .periph_buses = PL08X_AHB2, }, { .bus_id = "ext4_tx", .min_signal = 9, .max_signal = 9, .muxval = 2, .periph_buses = PL08X_AHB2, }, { .bus_id = "ext5_rx", .min_signal = 10, .max_signal = 10, .muxval = 2, .periph_buses = PL08X_AHB2, }, { .bus_id = "ext5_tx", .min_signal = 11, .max_signal = 11, .muxval = 2, .periph_buses = PL08X_AHB2, }, { .bus_id = "ext6_rx", .min_signal = 12, .max_signal = 12, .muxval = 2, .periph_buses = PL08X_AHB2, }, { .bus_id = "ext6_tx", .min_signal = 13, .max_signal = 13, .muxval = 2, .periph_buses = PL08X_AHB2, }, { .bus_id = "ext7_rx", .min_signal = 14, .max_signal = 14, .muxval = 2, .periph_buses = PL08X_AHB2, }, { .bus_id = "ext7_tx", .min_signal = 15, .max_signal = 15, .muxval = 2, .periph_buses = PL08X_AHB2, }, }; static struct pl08x_platform_data spear6xx_pl080_plat_data = { .memcpy_burst_size = PL08X_BURST_SZ_16, .memcpy_bus_width = PL08X_BUS_WIDTH_32_BITS, .memcpy_prot_buff = true, .memcpy_prot_cache = true, .lli_buses = PL08X_AHB1, .mem_buses = PL08X_AHB1, .get_xfer_signal = pl080_get_signal, .put_xfer_signal = pl080_put_signal, .slave_channels = spear600_dma_info, .num_slave_channels = ARRAY_SIZE(spear600_dma_info), }; /* * Following will create 16MB static virtual/physical mappings * PHYSICAL VIRTUAL * 0xF0000000 0xF0000000 * 0xF1000000 0xF1000000 * 0xD0000000 0xFD000000 * 0xFC000000 0xFC000000 */ static struct map_desc spear6xx_io_desc[] __initdata = { { .virtual = (unsigned long)VA_SPEAR6XX_ML_CPU_BASE, .pfn = __phys_to_pfn(SPEAR_ICM3_ML1_2_BASE), .length = 2 * SZ_16M, .type = MT_DEVICE }, { .virtual = (unsigned long)VA_SPEAR_ICM1_2_BASE, .pfn = __phys_to_pfn(SPEAR_ICM1_2_BASE), .length = SZ_16M, .type = MT_DEVICE }, { .virtual = (unsigned long)VA_SPEAR_ICM3_SMI_CTRL_BASE, .pfn = __phys_to_pfn(SPEAR_ICM3_SMI_CTRL_BASE), .length = SZ_16M, .type = MT_DEVICE }, }; /* This will create static memory mapping for selected devices */ static void __init spear6xx_map_io(void) { iotable_init(spear6xx_io_desc, ARRAY_SIZE(spear6xx_io_desc)); } static void __init spear6xx_timer_init(void) { char pclk_name[] = "pll3_clk"; struct clk *gpt_clk, *pclk; spear6xx_clk_init(MISC_BASE); /* get the system timer clock */ gpt_clk = clk_get_sys("gpt0", NULL); if (IS_ERR(gpt_clk)) { pr_err("%s:couldn't get clk for gpt\n", __func__); BUG(); } /* get the suitable parent clock for timer*/ pclk = clk_get(NULL, pclk_name); if (IS_ERR(pclk)) { pr_err("%s:couldn't get %s as parent for gpt\n", __func__, pclk_name); BUG(); } clk_set_parent(gpt_clk, pclk); clk_put(gpt_clk); clk_put(pclk); spear_setup_of_timer(); } /* Add auxdata to pass platform data */ static struct of_dev_auxdata spear6xx_auxdata_lookup[] __initdata = { OF_DEV_AUXDATA("arm,pl080", SPEAR_ICM3_DMA_BASE, NULL, &spear6xx_pl080_plat_data), {} }; static void __init spear600_dt_init(void) { of_platform_default_populate(NULL, spear6xx_auxdata_lookup, NULL); } static const char *spear600_dt_board_compat[] = { "st,spear600", NULL }; DT_MACHINE_START(SPEAR600_DT, "ST SPEAr600 (Flattened Device Tree)") .map_io = spear6xx_map_io, .init_time = spear6xx_timer_init, .init_machine = spear600_dt_init, .restart = spear_restart, .dt_compat = spear600_dt_board_compat, MACHINE_END
linux-master
arch/arm/mach-spear/spear6xx.c
// SPDX-License-Identifier: GPL-2.0-only /* * arch/arm/mach-spear3xx/spear320.c * * SPEAr320 machine source file * * Copyright (C) 2009-2012 ST Microelectronics * Viresh Kumar <[email protected]> */ #define pr_fmt(fmt) "SPEAr320: " fmt #include <linux/amba/pl022.h> #include <linux/amba/pl08x.h> #include <linux/amba/serial.h> #include <linux/of_platform.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include "generic.h" #include "spear.h" #define SPEAR320_UART1_BASE UL(0xA3000000) #define SPEAR320_UART2_BASE UL(0xA4000000) #define SPEAR320_SSP0_BASE UL(0xA5000000) #define SPEAR320_SSP1_BASE UL(0xA6000000) /* DMAC platform data's slave info */ struct pl08x_channel_data spear320_dma_info[] = { { .bus_id = "uart0_rx", .min_signal = 2, .max_signal = 2, .muxval = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart0_tx", .min_signal = 3, .max_signal = 3, .muxval = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ssp0_rx", .min_signal = 8, .max_signal = 8, .muxval = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ssp0_tx", .min_signal = 9, .max_signal = 9, .muxval = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "i2c0_rx", .min_signal = 10, .max_signal = 10, .muxval = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "i2c0_tx", .min_signal = 11, .max_signal = 11, .muxval = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "irda", .min_signal = 12, .max_signal = 12, .muxval = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "adc", .min_signal = 13, .max_signal = 13, .muxval = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "to_jpeg", .min_signal = 14, .max_signal = 14, .muxval = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "from_jpeg", .min_signal = 15, .max_signal = 15, .muxval = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ssp1_rx", .min_signal = 0, .max_signal = 0, .muxval = 1, .periph_buses = PL08X_AHB2, }, { .bus_id = "ssp1_tx", .min_signal = 1, .max_signal = 1, .muxval = 1, .periph_buses = PL08X_AHB2, }, { .bus_id = "ssp2_rx", .min_signal = 2, .max_signal = 2, .muxval = 1, .periph_buses = PL08X_AHB2, }, { .bus_id = "ssp2_tx", .min_signal = 3, .max_signal = 3, .muxval = 1, .periph_buses = PL08X_AHB2, }, { .bus_id = "uart1_rx", .min_signal = 4, .max_signal = 4, .muxval = 1, .periph_buses = PL08X_AHB2, }, { .bus_id = "uart1_tx", .min_signal = 5, .max_signal = 5, .muxval = 1, .periph_buses = PL08X_AHB2, }, { .bus_id = "uart2_rx", .min_signal = 6, .max_signal = 6, .muxval = 1, .periph_buses = PL08X_AHB2, }, { .bus_id = "uart2_tx", .min_signal = 7, .max_signal = 7, .muxval = 1, .periph_buses = PL08X_AHB2, }, { .bus_id = "i2c1_rx", .min_signal = 8, .max_signal = 8, .muxval = 1, .periph_buses = PL08X_AHB2, }, { .bus_id = "i2c1_tx", .min_signal = 9, .max_signal = 9, .muxval = 1, .periph_buses = PL08X_AHB2, }, { .bus_id = "i2c2_rx", .min_signal = 10, .max_signal = 10, .muxval = 1, .periph_buses = PL08X_AHB2, }, { .bus_id = "i2c2_tx", .min_signal = 11, .max_signal = 11, .muxval = 1, .periph_buses = PL08X_AHB2, }, { .bus_id = "i2s_rx", .min_signal = 12, .max_signal = 12, .muxval = 1, .periph_buses = PL08X_AHB2, }, { .bus_id = "i2s_tx", .min_signal = 13, .max_signal = 13, .muxval = 1, .periph_buses = PL08X_AHB2, }, { .bus_id = "rs485_rx", .min_signal = 14, .max_signal = 14, .muxval = 1, .periph_buses = PL08X_AHB2, }, { .bus_id = "rs485_tx", .min_signal = 15, .max_signal = 15, .muxval = 1, .periph_buses = PL08X_AHB2, }, }; static struct pl022_ssp_controller spear320_ssp_data[] = { { .bus_id = 1, .enable_dma = 1, .dma_filter = pl08x_filter_id, .dma_tx_param = "ssp1_tx", .dma_rx_param = "ssp1_rx", }, { .bus_id = 2, .enable_dma = 1, .dma_filter = pl08x_filter_id, .dma_tx_param = "ssp2_tx", .dma_rx_param = "ssp2_rx", } }; static struct amba_pl011_data spear320_uart_data[] = { { .dma_filter = pl08x_filter_id, .dma_tx_param = "uart1_tx", .dma_rx_param = "uart1_rx", }, { .dma_filter = pl08x_filter_id, .dma_tx_param = "uart2_tx", .dma_rx_param = "uart2_rx", }, }; /* Add SPEAr310 auxdata to pass platform data */ static struct of_dev_auxdata spear320_auxdata_lookup[] __initdata = { OF_DEV_AUXDATA("arm,pl022", SPEAR3XX_ICM1_SSP_BASE, NULL, &pl022_plat_data), OF_DEV_AUXDATA("arm,pl080", SPEAR_ICM3_DMA_BASE, NULL, &pl080_plat_data), OF_DEV_AUXDATA("arm,pl022", SPEAR320_SSP0_BASE, NULL, &spear320_ssp_data[0]), OF_DEV_AUXDATA("arm,pl022", SPEAR320_SSP1_BASE, NULL, &spear320_ssp_data[1]), OF_DEV_AUXDATA("arm,pl011", SPEAR320_UART1_BASE, NULL, &spear320_uart_data[0]), OF_DEV_AUXDATA("arm,pl011", SPEAR320_UART2_BASE, NULL, &spear320_uart_data[1]), {} }; static void __init spear320_dt_init(void) { pl080_plat_data.slave_channels = spear320_dma_info; pl080_plat_data.num_slave_channels = ARRAY_SIZE(spear320_dma_info); of_platform_default_populate(NULL, spear320_auxdata_lookup, NULL); } static const char * const spear320_dt_board_compat[] = { "st,spear320", "st,spear320-evb", "st,spear320-hmi", NULL, }; struct map_desc spear320_io_desc[] __initdata = { { .virtual = (unsigned long)VA_SPEAR320_SOC_CONFIG_BASE, .pfn = __phys_to_pfn(SPEAR320_SOC_CONFIG_BASE), .length = SZ_16M, .type = MT_DEVICE }, }; static void __init spear320_map_io(void) { iotable_init(spear320_io_desc, ARRAY_SIZE(spear320_io_desc)); spear3xx_map_io(); } DT_MACHINE_START(SPEAR320_DT, "ST SPEAr320 SoC with Flattened Device Tree") .map_io = spear320_map_io, .init_time = spear3xx_timer_init, .init_machine = spear320_dt_init, .restart = spear_restart, .dt_compat = spear320_dt_board_compat, MACHINE_END
linux-master
arch/arm/mach-spear/spear320.c
// SPDX-License-Identifier: GPL-2.0-only /* * arch/arm/mach-spear13xx/spear1340.c * * SPEAr1340 machine source file * * Copyright (C) 2012 ST Microelectronics * Viresh Kumar <[email protected]> */ #define pr_fmt(fmt) "SPEAr1340: " fmt #include <linux/platform_device.h> #include <asm/mach/arch.h> #include "generic.h" static void __init spear1340_dt_init(void) { platform_device_register_simple("spear-cpufreq", -1, NULL, 0); } static const char * const spear1340_dt_board_compat[] = { "st,spear1340", "st,spear1340-evb", NULL, }; DT_MACHINE_START(SPEAR1340_DT, "ST SPEAr1340 SoC with Flattened Device Tree") .smp = smp_ops(spear13xx_smp_ops), .map_io = spear13xx_map_io, .init_time = spear13xx_timer_init, .init_machine = spear1340_dt_init, .restart = spear_restart, .dt_compat = spear1340_dt_board_compat, MACHINE_END
linux-master
arch/arm/mach-spear/spear1340.c
// SPDX-License-Identifier: GPL-2.0-only /* * arch/arm/mach-spear13xx/platsmp.c * * based upon linux/arch/arm/mach-realview/platsmp.c * * Copyright (C) 2012 ST Microelectronics Ltd. * Shiraz Hashim <[email protected]> */ #include <linux/delay.h> #include <linux/jiffies.h> #include <linux/io.h> #include <linux/smp.h> #include <asm/cacheflush.h> #include <asm/smp_scu.h> #include "spear.h" #include "generic.h" /* XXX spear_pen_release is cargo culted code - DO NOT COPY XXX */ volatile int spear_pen_release = -1; /* * XXX CARGO CULTED CODE - DO NOT COPY XXX * * Write spear_pen_release in a way that is guaranteed to be visible to * all observers, irrespective of whether they're taking part in coherency * or not. This is necessary for the hotplug code to work reliably. */ static void spear_write_pen_release(int val) { spear_pen_release = val; smp_wmb(); sync_cache_w(&spear_pen_release); } static DEFINE_SPINLOCK(boot_lock); static void __iomem *scu_base = IOMEM(VA_SCU_BASE); static void spear13xx_secondary_init(unsigned int cpu) { /* * let the primary processor know we're out of the * pen, then head off into the C entry point */ spear_write_pen_release(-1); /* * Synchronise with the boot thread. */ spin_lock(&boot_lock); spin_unlock(&boot_lock); } static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle) { unsigned long timeout; /* * set synchronisation state between this boot processor * and the secondary one */ spin_lock(&boot_lock); /* * The secondary processor is waiting to be released from * the holding pen - release it, then wait for it to flag * that it has been released by resetting spear_pen_release. * * Note that "spear_pen_release" is the hardware CPU ID, whereas * "cpu" is Linux's internal ID. */ spear_write_pen_release(cpu); timeout = jiffies + (1 * HZ); while (time_before(jiffies, timeout)) { smp_rmb(); if (spear_pen_release == -1) break; udelay(10); } /* * now the secondary core is starting up let it run its * calibrations, then wait for it to finish */ spin_unlock(&boot_lock); return spear_pen_release != -1 ? -ENOSYS : 0; } /* * Initialise the CPU possible map early - this describes the CPUs * which may be present or become present in the system. */ static void __init spear13xx_smp_init_cpus(void) { unsigned int i, ncores = scu_get_core_count(scu_base); if (ncores > nr_cpu_ids) { pr_warn("SMP: %u cores greater than maximum (%u), clipping\n", ncores, nr_cpu_ids); ncores = nr_cpu_ids; } for (i = 0; i < ncores; i++) set_cpu_possible(i, true); } static void __init spear13xx_smp_prepare_cpus(unsigned int max_cpus) { scu_enable(scu_base); /* * Write the address of secondary startup into the system-wide location * (presently it is in SRAM). The BootMonitor waits until it receives a * soft interrupt, and then the secondary CPU branches to this address. */ __raw_writel(__pa_symbol(spear13xx_secondary_startup), SYS_LOCATION); } const struct smp_operations spear13xx_smp_ops __initconst = { .smp_init_cpus = spear13xx_smp_init_cpus, .smp_prepare_cpus = spear13xx_smp_prepare_cpus, .smp_secondary_init = spear13xx_secondary_init, .smp_boot_secondary = spear13xx_boot_secondary, #ifdef CONFIG_HOTPLUG_CPU .cpu_die = spear13xx_cpu_die, #endif };
linux-master
arch/arm/mach-spear/platsmp.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/arch/arm/mach-spear13xx/hotplug.c * * Copyright (C) 2012 ST Microelectronics Ltd. * Deepak Sikri <[email protected]> * * based upon linux/arch/arm/mach-realview/hotplug.c */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/smp.h> #include <asm/cp15.h> #include <asm/smp_plat.h> #include "generic.h" static inline void cpu_enter_lowpower(void) { unsigned int v; asm volatile( " mcr p15, 0, %1, c7, c5, 0\n" " dsb\n" /* * Turn off coherency */ " mrc p15, 0, %0, c1, c0, 1\n" " bic %0, %0, #0x20\n" " mcr p15, 0, %0, c1, c0, 1\n" " mrc p15, 0, %0, c1, c0, 0\n" " bic %0, %0, %2\n" " mcr p15, 0, %0, c1, c0, 0\n" : "=&r" (v) : "r" (0), "Ir" (CR_C) : "cc", "memory"); } static inline void cpu_leave_lowpower(void) { unsigned int v; asm volatile("mrc p15, 0, %0, c1, c0, 0\n" " orr %0, %0, %1\n" " mcr p15, 0, %0, c1, c0, 0\n" " mrc p15, 0, %0, c1, c0, 1\n" " orr %0, %0, #0x20\n" " mcr p15, 0, %0, c1, c0, 1\n" : "=&r" (v) : "Ir" (CR_C) : "cc"); } static inline void spear13xx_do_lowpower(unsigned int cpu, int *spurious) { for (;;) { wfi(); if (spear_pen_release == cpu) { /* * OK, proper wakeup, we're done */ break; } /* * Getting here, means that we have come out of WFI without * having been woken up - this shouldn't happen * * Just note it happening - when we're woken, we can report * its occurrence. */ (*spurious)++; } } /* * platform-specific code to shutdown a CPU * * Called with IRQs disabled */ void spear13xx_cpu_die(unsigned int cpu) { int spurious = 0; /* * we're ready for shutdown now, so do it */ cpu_enter_lowpower(); spear13xx_do_lowpower(cpu, &spurious); /* * bring this CPU back into the world of cache * coherency, and then restore interrupts */ cpu_leave_lowpower(); if (spurious) pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious); }
linux-master
arch/arm/mach-spear/hotplug.c
// SPDX-License-Identifier: GPL-2.0-only /* * arch/arm/mach-spear13xx/spear1310.c * * SPEAr1310 machine source file * * Copyright (C) 2012 ST Microelectronics * Viresh Kumar <[email protected]> */ #define pr_fmt(fmt) "SPEAr1310: " fmt #include <linux/amba/pl022.h> #include <linux/pata_arasan_cf_data.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include "generic.h" #include "spear.h" /* Base addresses */ #define SPEAR1310_RAS_GRP1_BASE UL(0xD8000000) #define VA_SPEAR1310_RAS_GRP1_BASE UL(0xFA000000) static void __init spear1310_dt_init(void) { platform_device_register_simple("spear-cpufreq", -1, NULL, 0); } static const char * const spear1310_dt_board_compat[] = { "st,spear1310", "st,spear1310-evb", NULL, }; /* * Following will create 16MB static virtual/physical mappings * PHYSICAL VIRTUAL * 0xD8000000 0xFA000000 */ static struct map_desc spear1310_io_desc[] __initdata = { { .virtual = VA_SPEAR1310_RAS_GRP1_BASE, .pfn = __phys_to_pfn(SPEAR1310_RAS_GRP1_BASE), .length = SZ_16M, .type = MT_DEVICE }, }; static void __init spear1310_map_io(void) { iotable_init(spear1310_io_desc, ARRAY_SIZE(spear1310_io_desc)); spear13xx_map_io(); } DT_MACHINE_START(SPEAR1310_DT, "ST SPEAr1310 SoC with Flattened Device Tree") .smp = smp_ops(spear13xx_smp_ops), .map_io = spear1310_map_io, .init_time = spear13xx_timer_init, .init_machine = spear1310_dt_init, .restart = spear_restart, .dt_compat = spear1310_dt_board_compat, MACHINE_END
linux-master
arch/arm/mach-spear/spear1310.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2010-2011 Calxeda, Inc. */ #include <linux/clk.h> #include <linux/clkdev.h> #include <linux/clocksource.h> #include <linux/dma-map-ops.h> #include <linux/input.h> #include <linux/io.h> #include <linux/irqchip.h> #include <linux/pl320-ipc.h> #include <linux/of.h> #include <linux/of_irq.h> #include <linux/of_address.h> #include <linux/reboot.h> #include <linux/amba/bus.h> #include <linux/platform_device.h> #include <linux/psci.h> #include <asm/hardware/cache-l2x0.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include "core.h" #include "sysregs.h" void __iomem *sregs_base; void __iomem *scu_base_addr; static void __init highbank_scu_map_io(void) { unsigned long base; /* Get SCU base */ asm("mrc p15, 4, %0, c15, c0, 0" : "=r" (base)); scu_base_addr = ioremap(base, SZ_4K); } static void highbank_l2c310_write_sec(unsigned long val, unsigned reg) { if (reg == L2X0_CTRL) highbank_smc1(0x102, val); else WARN_ONCE(1, "Highbank L2C310: ignoring write to reg 0x%x\n", reg); } static void __init highbank_init_irq(void) { irqchip_init(); if (of_find_compatible_node(NULL, NULL, "arm,cortex-a9")) highbank_scu_map_io(); } static void highbank_power_off(void) { highbank_set_pwr_shutdown(); while (1) cpu_do_idle(); } static int highbank_platform_notifier(struct notifier_block *nb, unsigned long event, void *__dev) { struct resource *res; int reg = -1; u32 val; struct device *dev = __dev; if (event != BUS_NOTIFY_ADD_DEVICE) return NOTIFY_DONE; if (of_device_is_compatible(dev->of_node, "calxeda,hb-ahci")) reg = 0xc; else if (of_device_is_compatible(dev->of_node, "calxeda,hb-sdhci")) reg = 0x18; else if (of_device_is_compatible(dev->of_node, "arm,pl330")) reg = 0x20; else if (of_device_is_compatible(dev->of_node, "calxeda,hb-xgmac")) { res = platform_get_resource(to_platform_device(dev), IORESOURCE_MEM, 0); if (res) { if (res->start == 0xfff50000) reg = 0; else if (res->start == 0xfff51000) reg = 4; } } if (reg < 0) return NOTIFY_DONE; if (of_property_read_bool(dev->of_node, "dma-coherent")) { val = readl(sregs_base + reg); writel(val | 0xff01, sregs_base + reg); dev->dma_coherent = true; } return NOTIFY_OK; } static struct notifier_block highbank_amba_nb = { .notifier_call = highbank_platform_notifier, }; static struct notifier_block highbank_platform_nb = { .notifier_call = highbank_platform_notifier, }; static struct platform_device highbank_cpuidle_device = { .name = "cpuidle-calxeda", }; static int hb_keys_notifier(struct notifier_block *nb, unsigned long event, void *data) { u32 key = *(u32 *)data; if (event != 0x1000) return 0; if (key == KEY_POWER) orderly_poweroff(false); else if (key == 0xffff) ctrl_alt_del(); return 0; } static struct notifier_block hb_keys_nb = { .notifier_call = hb_keys_notifier, }; static void __init highbank_init(void) { struct device_node *np; /* Map system registers */ np = of_find_compatible_node(NULL, NULL, "calxeda,hb-sregs"); sregs_base = of_iomap(np, 0); WARN_ON(!sregs_base); pm_power_off = highbank_power_off; highbank_pm_init(); bus_register_notifier(&platform_bus_type, &highbank_platform_nb); bus_register_notifier(&amba_bustype, &highbank_amba_nb); pl320_ipc_register_notifier(&hb_keys_nb); if (psci_ops.cpu_suspend) platform_device_register(&highbank_cpuidle_device); } static const char *const highbank_match[] __initconst = { "calxeda,highbank", "calxeda,ecx-2000", NULL, }; DT_MACHINE_START(HIGHBANK, "Highbank") #if defined(CONFIG_ZONE_DMA) && defined(CONFIG_ARM_LPAE) .dma_zone_size = (4ULL * SZ_1G), #endif .l2c_aux_val = 0, .l2c_aux_mask = ~0, .l2c_write_sec = highbank_l2c310_write_sec, .init_irq = highbank_init_irq, .init_machine = highbank_init, .dt_compat = highbank_match, .restart = highbank_restart, MACHINE_END
linux-master
arch/arm/mach-highbank/highbank.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2011 Calxeda, Inc. */ #include <linux/io.h> #include <asm/proc-fns.h> #include <linux/reboot.h> #include "core.h" #include "sysregs.h" void highbank_restart(enum reboot_mode mode, const char *cmd) { if (mode == REBOOT_HARD) highbank_set_pwr_hard_reset(); else highbank_set_pwr_soft_reset(); while (1) cpu_do_idle(); }
linux-master
arch/arm/mach-highbank/system.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2011 Calxeda, Inc. */ #include <linux/cpu_pm.h> #include <linux/init.h> #include <linux/psci.h> #include <linux/suspend.h> #include <asm/suspend.h> #include <uapi/linux/psci.h> #include "core.h" #define HIGHBANK_SUSPEND_PARAM \ ((0 << PSCI_0_2_POWER_STATE_ID_SHIFT) | \ (1 << PSCI_0_2_POWER_STATE_AFFL_SHIFT) | \ (PSCI_POWER_STATE_TYPE_POWER_DOWN << PSCI_0_2_POWER_STATE_TYPE_SHIFT)) static int highbank_suspend_finish(unsigned long val) { return psci_ops.cpu_suspend(HIGHBANK_SUSPEND_PARAM, __pa(cpu_resume)); } static int highbank_pm_enter(suspend_state_t state) { cpu_pm_enter(); cpu_cluster_pm_enter(); cpu_suspend(0, highbank_suspend_finish); cpu_cluster_pm_exit(); cpu_pm_exit(); return 0; } static const struct platform_suspend_ops highbank_pm_ops = { .enter = highbank_pm_enter, .valid = suspend_valid_only_mem, }; void __init highbank_pm_init(void) { if (!psci_ops.cpu_suspend) return; suspend_set_ops(&highbank_pm_ops); }
linux-master
arch/arm/mach-highbank/pm.c