python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0-only
/*
* The R_INTC in Allwinner A31 and newer SoCs manages several types of
* interrupts, as shown below:
*
* NMI IRQ DIRECT IRQs MUXED IRQs
* bit 0 bits 1-15^ bits 19-31
*
* +---------+ +---------+ +---------+ +---------+
* | NMI Pad | | IRQ d | | IRQ m | | IRQ m+7 |
* +---------+ +---------+ +---------+ +---------+
* | | | | | | |
* | | | | |......| |
* +------V------+ +------------+ | | | +--V------V--+ |
* | Invert/ | | Write 1 to | | | | | AND with | |
* | Edge Detect | | PENDING[0] | | | | | MUX[m/8] | |
* +-------------+ +------------+ | | | +------------+ |
* | | | | | | |
* +--V-------V--+ +--V--+ | +--V--+ | +--V--+
* | Set Reset| | GIC | | | GIC | | | GIC |
* | Latch | | SPI | | | SPI |... | ...| SPI |
* +-------------+ | N+d | | | m | | | m+7 |
* | | +-----+ | +-----+ | +-----+
* | | | |
* +-------V-+ +-V----------+ +---------V--+ +--------V--------+
* | GIC SPI | | AND with | | AND with | | AND with |
* | N (=32) | | ENABLE[0] | | ENABLE[d] | | ENABLE[19+m/8] |
* +---------+ +------------+ +------------+ +-----------------+
* | | |
* +------V-----+ +------V-----+ +--------V--------+
* | Read | | Read | | Read |
* | PENDING[0] | | PENDING[d] | | PENDING[19+m/8] |
* +------------+ +------------+ +-----------------+
*
* ^ bits 16-18 are direct IRQs for peripherals with banked interrupts, such as
* the MSGBOX. These IRQs do not map to any GIC SPI.
*
* The H6 variant adds two more (banked) direct IRQs and implements the full
* set of 128 mux bits. This requires a second set of top-level registers.
*/
#include <linux/bitmap.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqchip.h>
#include <linux/irqdomain.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/syscore_ops.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#define SUN6I_NMI_CTRL (0x0c)
#define SUN6I_IRQ_PENDING(n) (0x10 + 4 * (n))
#define SUN6I_IRQ_ENABLE(n) (0x40 + 4 * (n))
#define SUN6I_MUX_ENABLE(n) (0xc0 + 4 * (n))
#define SUN6I_NMI_SRC_TYPE_LEVEL_LOW 0
#define SUN6I_NMI_SRC_TYPE_EDGE_FALLING 1
#define SUN6I_NMI_SRC_TYPE_LEVEL_HIGH 2
#define SUN6I_NMI_SRC_TYPE_EDGE_RISING 3
#define SUN6I_NMI_BIT BIT(0)
#define SUN6I_NMI_NEEDS_ACK ((void *)1)
#define SUN6I_NR_TOP_LEVEL_IRQS 64
#define SUN6I_NR_DIRECT_IRQS 16
#define SUN6I_NR_MUX_BITS 128
struct sun6i_r_intc_variant {
u32 first_mux_irq;
u32 nr_mux_irqs;
u32 mux_valid[BITS_TO_U32(SUN6I_NR_MUX_BITS)];
};
static void __iomem *base;
static irq_hw_number_t nmi_hwirq;
static DECLARE_BITMAP(wake_irq_enabled, SUN6I_NR_TOP_LEVEL_IRQS);
static DECLARE_BITMAP(wake_mux_enabled, SUN6I_NR_MUX_BITS);
static DECLARE_BITMAP(wake_mux_valid, SUN6I_NR_MUX_BITS);
static void sun6i_r_intc_ack_nmi(void)
{
writel_relaxed(SUN6I_NMI_BIT, base + SUN6I_IRQ_PENDING(0));
}
static void sun6i_r_intc_nmi_ack(struct irq_data *data)
{
if (irqd_get_trigger_type(data) & IRQ_TYPE_EDGE_BOTH)
sun6i_r_intc_ack_nmi();
else
data->chip_data = SUN6I_NMI_NEEDS_ACK;
}
static void sun6i_r_intc_nmi_eoi(struct irq_data *data)
{
/* For oneshot IRQs, delay the ack until the IRQ is unmasked. */
if (data->chip_data == SUN6I_NMI_NEEDS_ACK && !irqd_irq_masked(data)) {
data->chip_data = NULL;
sun6i_r_intc_ack_nmi();
}
irq_chip_eoi_parent(data);
}
static void sun6i_r_intc_nmi_unmask(struct irq_data *data)
{
if (data->chip_data == SUN6I_NMI_NEEDS_ACK) {
data->chip_data = NULL;
sun6i_r_intc_ack_nmi();
}
irq_chip_unmask_parent(data);
}
static int sun6i_r_intc_nmi_set_type(struct irq_data *data, unsigned int type)
{
u32 nmi_src_type;
switch (type) {
case IRQ_TYPE_EDGE_RISING:
nmi_src_type = SUN6I_NMI_SRC_TYPE_EDGE_RISING;
break;
case IRQ_TYPE_EDGE_FALLING:
nmi_src_type = SUN6I_NMI_SRC_TYPE_EDGE_FALLING;
break;
case IRQ_TYPE_LEVEL_HIGH:
nmi_src_type = SUN6I_NMI_SRC_TYPE_LEVEL_HIGH;
break;
case IRQ_TYPE_LEVEL_LOW:
nmi_src_type = SUN6I_NMI_SRC_TYPE_LEVEL_LOW;
break;
default:
return -EINVAL;
}
writel_relaxed(nmi_src_type, base + SUN6I_NMI_CTRL);
/*
* The "External NMI" GIC input connects to a latch inside R_INTC, not
* directly to the pin. So the GIC trigger type does not depend on the
* NMI pin trigger type.
*/
return irq_chip_set_type_parent(data, IRQ_TYPE_LEVEL_HIGH);
}
static int sun6i_r_intc_nmi_set_irqchip_state(struct irq_data *data,
enum irqchip_irq_state which,
bool state)
{
if (which == IRQCHIP_STATE_PENDING && !state)
sun6i_r_intc_ack_nmi();
return irq_chip_set_parent_state(data, which, state);
}
static int sun6i_r_intc_irq_set_wake(struct irq_data *data, unsigned int on)
{
unsigned long offset_from_nmi = data->hwirq - nmi_hwirq;
if (offset_from_nmi < SUN6I_NR_DIRECT_IRQS)
assign_bit(offset_from_nmi, wake_irq_enabled, on);
else if (test_bit(data->hwirq, wake_mux_valid))
assign_bit(data->hwirq, wake_mux_enabled, on);
else
/* Not wakeup capable. */
return -EPERM;
return 0;
}
static struct irq_chip sun6i_r_intc_nmi_chip = {
.name = "sun6i-r-intc",
.irq_ack = sun6i_r_intc_nmi_ack,
.irq_mask = irq_chip_mask_parent,
.irq_unmask = sun6i_r_intc_nmi_unmask,
.irq_eoi = sun6i_r_intc_nmi_eoi,
.irq_set_affinity = irq_chip_set_affinity_parent,
.irq_set_type = sun6i_r_intc_nmi_set_type,
.irq_set_irqchip_state = sun6i_r_intc_nmi_set_irqchip_state,
.irq_set_wake = sun6i_r_intc_irq_set_wake,
.flags = IRQCHIP_SET_TYPE_MASKED,
};
static struct irq_chip sun6i_r_intc_wakeup_chip = {
.name = "sun6i-r-intc",
.irq_mask = irq_chip_mask_parent,
.irq_unmask = irq_chip_unmask_parent,
.irq_eoi = irq_chip_eoi_parent,
.irq_set_affinity = irq_chip_set_affinity_parent,
.irq_set_type = irq_chip_set_type_parent,
.irq_set_wake = sun6i_r_intc_irq_set_wake,
.flags = IRQCHIP_SET_TYPE_MASKED,
};
static int sun6i_r_intc_domain_translate(struct irq_domain *domain,
struct irq_fwspec *fwspec,
unsigned long *hwirq,
unsigned int *type)
{
/* Accept the old two-cell binding for the NMI only. */
if (fwspec->param_count == 2 && fwspec->param[0] == 0) {
*hwirq = nmi_hwirq;
*type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK;
return 0;
}
/* Otherwise this binding should match the GIC SPI binding. */
if (fwspec->param_count < 3)
return -EINVAL;
if (fwspec->param[0] != GIC_SPI)
return -EINVAL;
*hwirq = fwspec->param[1];
*type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
return 0;
}
static int sun6i_r_intc_domain_alloc(struct irq_domain *domain,
unsigned int virq,
unsigned int nr_irqs, void *arg)
{
struct irq_fwspec *fwspec = arg;
struct irq_fwspec gic_fwspec;
unsigned long hwirq;
unsigned int type;
int i, ret;
ret = sun6i_r_intc_domain_translate(domain, fwspec, &hwirq, &type);
if (ret)
return ret;
if (hwirq + nr_irqs > SUN6I_NR_MUX_BITS)
return -EINVAL;
/* Construct a GIC-compatible fwspec from this fwspec. */
gic_fwspec = (struct irq_fwspec) {
.fwnode = domain->parent->fwnode,
.param_count = 3,
.param = { GIC_SPI, hwirq, type },
};
ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &gic_fwspec);
if (ret)
return ret;
for (i = 0; i < nr_irqs; ++i, ++hwirq, ++virq) {
if (hwirq == nmi_hwirq) {
irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
&sun6i_r_intc_nmi_chip,
NULL);
irq_set_handler(virq, handle_fasteoi_ack_irq);
} else {
irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
&sun6i_r_intc_wakeup_chip,
NULL);
}
}
return 0;
}
static const struct irq_domain_ops sun6i_r_intc_domain_ops = {
.translate = sun6i_r_intc_domain_translate,
.alloc = sun6i_r_intc_domain_alloc,
.free = irq_domain_free_irqs_common,
};
static int sun6i_r_intc_suspend(void)
{
u32 buf[BITS_TO_U32(max(SUN6I_NR_TOP_LEVEL_IRQS, SUN6I_NR_MUX_BITS))];
int i;
/* Wake IRQs are enabled during system sleep and shutdown. */
bitmap_to_arr32(buf, wake_irq_enabled, SUN6I_NR_TOP_LEVEL_IRQS);
for (i = 0; i < BITS_TO_U32(SUN6I_NR_TOP_LEVEL_IRQS); ++i)
writel_relaxed(buf[i], base + SUN6I_IRQ_ENABLE(i));
bitmap_to_arr32(buf, wake_mux_enabled, SUN6I_NR_MUX_BITS);
for (i = 0; i < BITS_TO_U32(SUN6I_NR_MUX_BITS); ++i)
writel_relaxed(buf[i], base + SUN6I_MUX_ENABLE(i));
return 0;
}
static void sun6i_r_intc_resume(void)
{
int i;
/* Only the NMI is relevant during normal operation. */
writel_relaxed(SUN6I_NMI_BIT, base + SUN6I_IRQ_ENABLE(0));
for (i = 1; i < BITS_TO_U32(SUN6I_NR_TOP_LEVEL_IRQS); ++i)
writel_relaxed(0, base + SUN6I_IRQ_ENABLE(i));
}
static void sun6i_r_intc_shutdown(void)
{
sun6i_r_intc_suspend();
}
static struct syscore_ops sun6i_r_intc_syscore_ops = {
.suspend = sun6i_r_intc_suspend,
.resume = sun6i_r_intc_resume,
.shutdown = sun6i_r_intc_shutdown,
};
static int __init sun6i_r_intc_init(struct device_node *node,
struct device_node *parent,
const struct sun6i_r_intc_variant *v)
{
struct irq_domain *domain, *parent_domain;
struct of_phandle_args nmi_parent;
int ret;
/* Extract the NMI hwirq number from the OF node. */
ret = of_irq_parse_one(node, 0, &nmi_parent);
if (ret)
return ret;
if (nmi_parent.args_count < 3 ||
nmi_parent.args[0] != GIC_SPI ||
nmi_parent.args[2] != IRQ_TYPE_LEVEL_HIGH)
return -EINVAL;
nmi_hwirq = nmi_parent.args[1];
bitmap_set(wake_irq_enabled, v->first_mux_irq, v->nr_mux_irqs);
bitmap_from_arr32(wake_mux_valid, v->mux_valid, SUN6I_NR_MUX_BITS);
parent_domain = irq_find_host(parent);
if (!parent_domain) {
pr_err("%pOF: Failed to obtain parent domain\n", node);
return -ENXIO;
}
base = of_io_request_and_map(node, 0, NULL);
if (IS_ERR(base)) {
pr_err("%pOF: Failed to map MMIO region\n", node);
return PTR_ERR(base);
}
domain = irq_domain_add_hierarchy(parent_domain, 0, 0, node,
&sun6i_r_intc_domain_ops, NULL);
if (!domain) {
pr_err("%pOF: Failed to allocate domain\n", node);
iounmap(base);
return -ENOMEM;
}
register_syscore_ops(&sun6i_r_intc_syscore_ops);
sun6i_r_intc_ack_nmi();
sun6i_r_intc_resume();
return 0;
}
static const struct sun6i_r_intc_variant sun6i_a31_r_intc_variant __initconst = {
.first_mux_irq = 19,
.nr_mux_irqs = 13,
.mux_valid = { 0xffffffff, 0xfff80000, 0xffffffff, 0x0000000f },
};
static int __init sun6i_a31_r_intc_init(struct device_node *node,
struct device_node *parent)
{
return sun6i_r_intc_init(node, parent, &sun6i_a31_r_intc_variant);
}
IRQCHIP_DECLARE(sun6i_a31_r_intc, "allwinner,sun6i-a31-r-intc", sun6i_a31_r_intc_init);
static const struct sun6i_r_intc_variant sun50i_h6_r_intc_variant __initconst = {
.first_mux_irq = 21,
.nr_mux_irqs = 16,
.mux_valid = { 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff },
};
static int __init sun50i_h6_r_intc_init(struct device_node *node,
struct device_node *parent)
{
return sun6i_r_intc_init(node, parent, &sun50i_h6_r_intc_variant);
}
IRQCHIP_DECLARE(sun50i_h6_r_intc, "allwinner,sun50i-h6-r-intc", sun50i_h6_r_intc_init);
|
linux-master
|
drivers/irqchip/irq-sun6i-r.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*/
#include <linux/bitfield.h>
#include <linux/irq.h>
#include <linux/irqchip.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
/* FIC Registers */
#define AL_FIC_CAUSE 0x00
#define AL_FIC_SET_CAUSE 0x08
#define AL_FIC_MASK 0x10
#define AL_FIC_CONTROL 0x28
#define CONTROL_TRIGGER_RISING BIT(3)
#define CONTROL_MASK_MSI_X BIT(5)
#define NR_FIC_IRQS 32
MODULE_AUTHOR("Talel Shenhar");
MODULE_DESCRIPTION("Amazon's Annapurna Labs Interrupt Controller Driver");
enum al_fic_state {
AL_FIC_UNCONFIGURED = 0,
AL_FIC_CONFIGURED_LEVEL,
AL_FIC_CONFIGURED_RISING_EDGE,
};
struct al_fic {
void __iomem *base;
struct irq_domain *domain;
const char *name;
unsigned int parent_irq;
enum al_fic_state state;
};
static void al_fic_set_trigger(struct al_fic *fic,
struct irq_chip_generic *gc,
enum al_fic_state new_state)
{
irq_flow_handler_t handler;
u32 control = readl_relaxed(fic->base + AL_FIC_CONTROL);
if (new_state == AL_FIC_CONFIGURED_LEVEL) {
handler = handle_level_irq;
control &= ~CONTROL_TRIGGER_RISING;
} else {
handler = handle_edge_irq;
control |= CONTROL_TRIGGER_RISING;
}
gc->chip_types->handler = handler;
fic->state = new_state;
writel_relaxed(control, fic->base + AL_FIC_CONTROL);
}
static int al_fic_irq_set_type(struct irq_data *data, unsigned int flow_type)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
struct al_fic *fic = gc->private;
enum al_fic_state new_state;
int ret = 0;
irq_gc_lock(gc);
if (((flow_type & IRQ_TYPE_SENSE_MASK) != IRQ_TYPE_LEVEL_HIGH) &&
((flow_type & IRQ_TYPE_SENSE_MASK) != IRQ_TYPE_EDGE_RISING)) {
pr_debug("fic doesn't support flow type %d\n", flow_type);
ret = -EINVAL;
goto err;
}
new_state = (flow_type & IRQ_TYPE_LEVEL_HIGH) ?
AL_FIC_CONFIGURED_LEVEL : AL_FIC_CONFIGURED_RISING_EDGE;
/*
* A given FIC instance can be either all level or all edge triggered.
* This is generally fixed depending on what pieces of HW it's wired up
* to.
*
* We configure it based on the sensitivity of the first source
* being setup, and reject any subsequent attempt at configuring it in a
* different way.
*/
if (fic->state == AL_FIC_UNCONFIGURED) {
al_fic_set_trigger(fic, gc, new_state);
} else if (fic->state != new_state) {
pr_debug("fic %s state already configured to %d\n",
fic->name, fic->state);
ret = -EINVAL;
goto err;
}
err:
irq_gc_unlock(gc);
return ret;
}
static void al_fic_irq_handler(struct irq_desc *desc)
{
struct al_fic *fic = irq_desc_get_handler_data(desc);
struct irq_domain *domain = fic->domain;
struct irq_chip *irqchip = irq_desc_get_chip(desc);
struct irq_chip_generic *gc = irq_get_domain_generic_chip(domain, 0);
unsigned long pending;
u32 hwirq;
chained_irq_enter(irqchip, desc);
pending = readl_relaxed(fic->base + AL_FIC_CAUSE);
pending &= ~gc->mask_cache;
for_each_set_bit(hwirq, &pending, NR_FIC_IRQS)
generic_handle_domain_irq(domain, hwirq);
chained_irq_exit(irqchip, desc);
}
static int al_fic_irq_retrigger(struct irq_data *data)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
struct al_fic *fic = gc->private;
writel_relaxed(BIT(data->hwirq), fic->base + AL_FIC_SET_CAUSE);
return 1;
}
static int al_fic_register(struct device_node *node,
struct al_fic *fic)
{
struct irq_chip_generic *gc;
int ret;
fic->domain = irq_domain_add_linear(node,
NR_FIC_IRQS,
&irq_generic_chip_ops,
fic);
if (!fic->domain) {
pr_err("fail to add irq domain\n");
return -ENOMEM;
}
ret = irq_alloc_domain_generic_chips(fic->domain,
NR_FIC_IRQS,
1, fic->name,
handle_level_irq,
0, 0, IRQ_GC_INIT_MASK_CACHE);
if (ret) {
pr_err("fail to allocate generic chip (%d)\n", ret);
goto err_domain_remove;
}
gc = irq_get_domain_generic_chip(fic->domain, 0);
gc->reg_base = fic->base;
gc->chip_types->regs.mask = AL_FIC_MASK;
gc->chip_types->regs.ack = AL_FIC_CAUSE;
gc->chip_types->chip.irq_mask = irq_gc_mask_set_bit;
gc->chip_types->chip.irq_unmask = irq_gc_mask_clr_bit;
gc->chip_types->chip.irq_ack = irq_gc_ack_clr_bit;
gc->chip_types->chip.irq_set_type = al_fic_irq_set_type;
gc->chip_types->chip.irq_retrigger = al_fic_irq_retrigger;
gc->chip_types->chip.flags = IRQCHIP_SKIP_SET_WAKE;
gc->private = fic;
irq_set_chained_handler_and_data(fic->parent_irq,
al_fic_irq_handler,
fic);
return 0;
err_domain_remove:
irq_domain_remove(fic->domain);
return ret;
}
/*
* al_fic_wire_init() - initialize and configure fic in wire mode
* @of_node: optional pointer to interrupt controller's device tree node.
* @base: mmio to fic register
* @name: name of the fic
* @parent_irq: interrupt of parent
*
* This API will configure the fic hardware to to work in wire mode.
* In wire mode, fic hardware is generating a wire ("wired") interrupt.
* Interrupt can be generated based on positive edge or level - configuration is
* to be determined based on connected hardware to this fic.
*/
static struct al_fic *al_fic_wire_init(struct device_node *node,
void __iomem *base,
const char *name,
unsigned int parent_irq)
{
struct al_fic *fic;
int ret;
u32 control = CONTROL_MASK_MSI_X;
fic = kzalloc(sizeof(*fic), GFP_KERNEL);
if (!fic)
return ERR_PTR(-ENOMEM);
fic->base = base;
fic->parent_irq = parent_irq;
fic->name = name;
/* mask out all interrupts */
writel_relaxed(0xFFFFFFFF, fic->base + AL_FIC_MASK);
/* clear any pending interrupt */
writel_relaxed(0, fic->base + AL_FIC_CAUSE);
writel_relaxed(control, fic->base + AL_FIC_CONTROL);
ret = al_fic_register(node, fic);
if (ret) {
pr_err("fail to register irqchip\n");
goto err_free;
}
pr_debug("%s initialized successfully in Legacy mode (parent-irq=%u)\n",
fic->name, parent_irq);
return fic;
err_free:
kfree(fic);
return ERR_PTR(ret);
}
static int __init al_fic_init_dt(struct device_node *node,
struct device_node *parent)
{
int ret;
void __iomem *base;
unsigned int parent_irq;
struct al_fic *fic;
if (!parent) {
pr_err("%s: unsupported - device require a parent\n",
node->name);
return -EINVAL;
}
base = of_iomap(node, 0);
if (!base) {
pr_err("%s: fail to map memory\n", node->name);
return -ENOMEM;
}
parent_irq = irq_of_parse_and_map(node, 0);
if (!parent_irq) {
pr_err("%s: fail to map irq\n", node->name);
ret = -EINVAL;
goto err_unmap;
}
fic = al_fic_wire_init(node,
base,
node->name,
parent_irq);
if (IS_ERR(fic)) {
pr_err("%s: fail to initialize irqchip (%lu)\n",
node->name,
PTR_ERR(fic));
ret = PTR_ERR(fic);
goto err_irq_dispose;
}
return 0;
err_irq_dispose:
irq_dispose_mapping(parent_irq);
err_unmap:
iounmap(base);
return ret;
}
IRQCHIP_DECLARE(al_fic, "amazon,al-fic", al_fic_init_dt);
|
linux-master
|
drivers/irqchip/irq-al-fic.c
|
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2010 Broadcom
* Copyright 2012 Simon Arlott, Chris Boot, Stephen Warren
*
* Quirk 1: Shortcut interrupts don't set the bank 1/2 register pending bits
*
* If an interrupt fires on bank 1 that isn't in the shortcuts list, bit 8
* on bank 0 is set to signify that an interrupt in bank 1 has fired, and
* to look in the bank 1 status register for more information.
*
* If an interrupt fires on bank 1 that _is_ in the shortcuts list, its
* shortcut bit in bank 0 is set as well as its interrupt bit in the bank 1
* status register, but bank 0 bit 8 is _not_ set.
*
* Quirk 2: You can't mask the register 1/2 pending interrupts
*
* In a proper cascaded interrupt controller, the interrupt lines with
* cascaded interrupt controllers on them are just normal interrupt lines.
* You can mask the interrupts and get on with things. With this controller
* you can't do that.
*
* Quirk 3: The shortcut interrupts can't be (un)masked in bank 0
*
* Those interrupts that have shortcuts can only be masked/unmasked in
* their respective banks' enable/disable registers. Doing so in the bank 0
* enable/disable registers has no effect.
*
* The FIQ control register:
* Bits 0-6: IRQ (index in order of interrupts from banks 1, 2, then 0)
* Bit 7: Enable FIQ generation
* Bits 8+: Unused
*
* An interrupt must be disabled before configuring it for FIQ generation
* otherwise both handlers will fire at the same time!
*/
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/irqchip.h>
#include <linux/irqdomain.h>
#include <asm/exception.h>
/* Put the bank and irq (32 bits) into the hwirq */
#define MAKE_HWIRQ(b, n) ((b << 5) | (n))
#define HWIRQ_BANK(i) (i >> 5)
#define HWIRQ_BIT(i) BIT(i & 0x1f)
#define NR_IRQS_BANK0 8
#define BANK0_HWIRQ_MASK 0xff
/* Shortcuts can't be disabled so any unknown new ones need to be masked */
#define SHORTCUT1_MASK 0x00007c00
#define SHORTCUT2_MASK 0x001f8000
#define SHORTCUT_SHIFT 10
#define BANK1_HWIRQ BIT(8)
#define BANK2_HWIRQ BIT(9)
#define BANK0_VALID_MASK (BANK0_HWIRQ_MASK | BANK1_HWIRQ | BANK2_HWIRQ \
| SHORTCUT1_MASK | SHORTCUT2_MASK)
#define REG_FIQ_CONTROL 0x0c
#define FIQ_CONTROL_ENABLE BIT(7)
#define NR_BANKS 3
#define IRQS_PER_BANK 32
static const int reg_pending[] __initconst = { 0x00, 0x04, 0x08 };
static const int reg_enable[] __initconst = { 0x18, 0x10, 0x14 };
static const int reg_disable[] __initconst = { 0x24, 0x1c, 0x20 };
static const int bank_irqs[] __initconst = { 8, 32, 32 };
static const int shortcuts[] = {
7, 9, 10, 18, 19, /* Bank 1 */
21, 22, 23, 24, 25, 30 /* Bank 2 */
};
struct armctrl_ic {
void __iomem *base;
void __iomem *pending[NR_BANKS];
void __iomem *enable[NR_BANKS];
void __iomem *disable[NR_BANKS];
struct irq_domain *domain;
};
static struct armctrl_ic intc __read_mostly;
static void __exception_irq_entry bcm2835_handle_irq(
struct pt_regs *regs);
static void bcm2836_chained_handle_irq(struct irq_desc *desc);
static void armctrl_mask_irq(struct irq_data *d)
{
writel_relaxed(HWIRQ_BIT(d->hwirq), intc.disable[HWIRQ_BANK(d->hwirq)]);
}
static void armctrl_unmask_irq(struct irq_data *d)
{
writel_relaxed(HWIRQ_BIT(d->hwirq), intc.enable[HWIRQ_BANK(d->hwirq)]);
}
static struct irq_chip armctrl_chip = {
.name = "ARMCTRL-level",
.irq_mask = armctrl_mask_irq,
.irq_unmask = armctrl_unmask_irq
};
static int armctrl_xlate(struct irq_domain *d, struct device_node *ctrlr,
const u32 *intspec, unsigned int intsize,
unsigned long *out_hwirq, unsigned int *out_type)
{
if (WARN_ON(intsize != 2))
return -EINVAL;
if (WARN_ON(intspec[0] >= NR_BANKS))
return -EINVAL;
if (WARN_ON(intspec[1] >= IRQS_PER_BANK))
return -EINVAL;
if (WARN_ON(intspec[0] == 0 && intspec[1] >= NR_IRQS_BANK0))
return -EINVAL;
*out_hwirq = MAKE_HWIRQ(intspec[0], intspec[1]);
*out_type = IRQ_TYPE_NONE;
return 0;
}
static const struct irq_domain_ops armctrl_ops = {
.xlate = armctrl_xlate
};
static int __init armctrl_of_init(struct device_node *node,
struct device_node *parent,
bool is_2836)
{
void __iomem *base;
int irq, b, i;
u32 reg;
base = of_iomap(node, 0);
if (!base)
panic("%pOF: unable to map IC registers\n", node);
intc.domain = irq_domain_add_linear(node, MAKE_HWIRQ(NR_BANKS, 0),
&armctrl_ops, NULL);
if (!intc.domain)
panic("%pOF: unable to create IRQ domain\n", node);
for (b = 0; b < NR_BANKS; b++) {
intc.pending[b] = base + reg_pending[b];
intc.enable[b] = base + reg_enable[b];
intc.disable[b] = base + reg_disable[b];
for (i = 0; i < bank_irqs[b]; i++) {
irq = irq_create_mapping(intc.domain, MAKE_HWIRQ(b, i));
BUG_ON(irq <= 0);
irq_set_chip_and_handler(irq, &armctrl_chip,
handle_level_irq);
irq_set_probe(irq);
}
reg = readl_relaxed(intc.enable[b]);
if (reg) {
writel_relaxed(reg, intc.disable[b]);
pr_err(FW_BUG "Bootloader left irq enabled: "
"bank %d irq %*pbl\n", b, IRQS_PER_BANK, ®);
}
}
reg = readl_relaxed(base + REG_FIQ_CONTROL);
if (reg & FIQ_CONTROL_ENABLE) {
writel_relaxed(0, base + REG_FIQ_CONTROL);
pr_err(FW_BUG "Bootloader left fiq enabled\n");
}
if (is_2836) {
int parent_irq = irq_of_parse_and_map(node, 0);
if (!parent_irq) {
panic("%pOF: unable to get parent interrupt.\n",
node);
}
irq_set_chained_handler(parent_irq, bcm2836_chained_handle_irq);
} else {
set_handle_irq(bcm2835_handle_irq);
}
return 0;
}
static int __init bcm2835_armctrl_of_init(struct device_node *node,
struct device_node *parent)
{
return armctrl_of_init(node, parent, false);
}
static int __init bcm2836_armctrl_of_init(struct device_node *node,
struct device_node *parent)
{
return armctrl_of_init(node, parent, true);
}
/*
* Handle each interrupt across the entire interrupt controller. This reads the
* status register before handling each interrupt, which is necessary given that
* handle_IRQ may briefly re-enable interrupts for soft IRQ handling.
*/
static u32 armctrl_translate_bank(int bank)
{
u32 stat = readl_relaxed(intc.pending[bank]);
return MAKE_HWIRQ(bank, ffs(stat) - 1);
}
static u32 armctrl_translate_shortcut(int bank, u32 stat)
{
return MAKE_HWIRQ(bank, shortcuts[ffs(stat >> SHORTCUT_SHIFT) - 1]);
}
static u32 get_next_armctrl_hwirq(void)
{
u32 stat = readl_relaxed(intc.pending[0]) & BANK0_VALID_MASK;
if (stat == 0)
return ~0;
else if (stat & BANK0_HWIRQ_MASK)
return MAKE_HWIRQ(0, ffs(stat & BANK0_HWIRQ_MASK) - 1);
else if (stat & SHORTCUT1_MASK)
return armctrl_translate_shortcut(1, stat & SHORTCUT1_MASK);
else if (stat & SHORTCUT2_MASK)
return armctrl_translate_shortcut(2, stat & SHORTCUT2_MASK);
else if (stat & BANK1_HWIRQ)
return armctrl_translate_bank(1);
else if (stat & BANK2_HWIRQ)
return armctrl_translate_bank(2);
else
BUG();
}
static void __exception_irq_entry bcm2835_handle_irq(
struct pt_regs *regs)
{
u32 hwirq;
while ((hwirq = get_next_armctrl_hwirq()) != ~0)
generic_handle_domain_irq(intc.domain, hwirq);
}
static void bcm2836_chained_handle_irq(struct irq_desc *desc)
{
u32 hwirq;
while ((hwirq = get_next_armctrl_hwirq()) != ~0)
generic_handle_domain_irq(intc.domain, hwirq);
}
IRQCHIP_DECLARE(bcm2835_armctrl_ic, "brcm,bcm2835-armctrl-ic",
bcm2835_armctrl_of_init);
IRQCHIP_DECLARE(bcm2836_armctrl_ic, "brcm,bcm2836-armctrl-ic",
bcm2836_armctrl_of_init);
|
linux-master
|
drivers/irqchip/irq-bcm2835.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* Renesas RZ/A1 IRQC Driver
*
* Copyright (C) 2019 Glider bvba
*/
#include <linux/err.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irqdomain.h>
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#define IRQC_NUM_IRQ 8
#define ICR0 0 /* Interrupt Control Register 0 */
#define ICR0_NMIL BIT(15) /* NMI Input Level (0=low, 1=high) */
#define ICR0_NMIE BIT(8) /* Edge Select (0=falling, 1=rising) */
#define ICR0_NMIF BIT(1) /* NMI Interrupt Request */
#define ICR1 2 /* Interrupt Control Register 1 */
#define ICR1_IRQS(n, sense) ((sense) << ((n) * 2)) /* IRQ Sense Select */
#define ICR1_IRQS_LEVEL_LOW 0
#define ICR1_IRQS_EDGE_FALLING 1
#define ICR1_IRQS_EDGE_RISING 2
#define ICR1_IRQS_EDGE_BOTH 3
#define ICR1_IRQS_MASK(n) ICR1_IRQS((n), 3)
#define IRQRR 4 /* IRQ Interrupt Request Register */
struct rza1_irqc_priv {
struct device *dev;
void __iomem *base;
struct irq_chip chip;
struct irq_domain *irq_domain;
struct of_phandle_args map[IRQC_NUM_IRQ];
};
static struct rza1_irqc_priv *irq_data_to_priv(struct irq_data *data)
{
return data->domain->host_data;
}
static void rza1_irqc_eoi(struct irq_data *d)
{
struct rza1_irqc_priv *priv = irq_data_to_priv(d);
u16 bit = BIT(irqd_to_hwirq(d));
u16 tmp;
tmp = readw_relaxed(priv->base + IRQRR);
if (tmp & bit)
writew_relaxed(GENMASK(IRQC_NUM_IRQ - 1, 0) & ~bit,
priv->base + IRQRR);
irq_chip_eoi_parent(d);
}
static int rza1_irqc_set_type(struct irq_data *d, unsigned int type)
{
struct rza1_irqc_priv *priv = irq_data_to_priv(d);
unsigned int hw_irq = irqd_to_hwirq(d);
u16 sense, tmp;
switch (type & IRQ_TYPE_SENSE_MASK) {
case IRQ_TYPE_LEVEL_LOW:
sense = ICR1_IRQS_LEVEL_LOW;
break;
case IRQ_TYPE_EDGE_FALLING:
sense = ICR1_IRQS_EDGE_FALLING;
break;
case IRQ_TYPE_EDGE_RISING:
sense = ICR1_IRQS_EDGE_RISING;
break;
case IRQ_TYPE_EDGE_BOTH:
sense = ICR1_IRQS_EDGE_BOTH;
break;
default:
return -EINVAL;
}
tmp = readw_relaxed(priv->base + ICR1);
tmp &= ~ICR1_IRQS_MASK(hw_irq);
tmp |= ICR1_IRQS(hw_irq, sense);
writew_relaxed(tmp, priv->base + ICR1);
return 0;
}
static int rza1_irqc_alloc(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs, void *arg)
{
struct rza1_irqc_priv *priv = domain->host_data;
struct irq_fwspec *fwspec = arg;
unsigned int hwirq = fwspec->param[0];
struct irq_fwspec spec;
unsigned int i;
int ret;
ret = irq_domain_set_hwirq_and_chip(domain, virq, hwirq, &priv->chip,
priv);
if (ret)
return ret;
spec.fwnode = &priv->dev->of_node->fwnode;
spec.param_count = priv->map[hwirq].args_count;
for (i = 0; i < spec.param_count; i++)
spec.param[i] = priv->map[hwirq].args[i];
return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &spec);
}
static int rza1_irqc_translate(struct irq_domain *domain,
struct irq_fwspec *fwspec, unsigned long *hwirq,
unsigned int *type)
{
if (fwspec->param_count != 2 || fwspec->param[0] >= IRQC_NUM_IRQ)
return -EINVAL;
*hwirq = fwspec->param[0];
*type = fwspec->param[1];
return 0;
}
static const struct irq_domain_ops rza1_irqc_domain_ops = {
.alloc = rza1_irqc_alloc,
.translate = rza1_irqc_translate,
};
static int rza1_irqc_parse_map(struct rza1_irqc_priv *priv,
struct device_node *gic_node)
{
unsigned int imaplen, i, j, ret;
struct device *dev = priv->dev;
struct device_node *ipar;
const __be32 *imap;
u32 intsize;
imap = of_get_property(dev->of_node, "interrupt-map", &imaplen);
if (!imap)
return -EINVAL;
for (i = 0; i < IRQC_NUM_IRQ; i++) {
if (imaplen < 3)
return -EINVAL;
/* Check interrupt number, ignore sense */
if (be32_to_cpup(imap) != i)
return -EINVAL;
ipar = of_find_node_by_phandle(be32_to_cpup(imap + 2));
if (ipar != gic_node) {
of_node_put(ipar);
return -EINVAL;
}
imap += 3;
imaplen -= 3;
ret = of_property_read_u32(ipar, "#interrupt-cells", &intsize);
of_node_put(ipar);
if (ret)
return ret;
if (imaplen < intsize)
return -EINVAL;
priv->map[i].args_count = intsize;
for (j = 0; j < intsize; j++)
priv->map[i].args[j] = be32_to_cpup(imap++);
imaplen -= intsize;
}
return 0;
}
static int rza1_irqc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct irq_domain *parent = NULL;
struct device_node *gic_node;
struct rza1_irqc_priv *priv;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
platform_set_drvdata(pdev, priv);
priv->dev = dev;
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
gic_node = of_irq_find_parent(np);
if (gic_node)
parent = irq_find_host(gic_node);
if (!parent) {
dev_err(dev, "cannot find parent domain\n");
ret = -ENODEV;
goto out_put_node;
}
ret = rza1_irqc_parse_map(priv, gic_node);
if (ret) {
dev_err(dev, "cannot parse %s: %d\n", "interrupt-map", ret);
goto out_put_node;
}
priv->chip.name = "rza1-irqc";
priv->chip.irq_mask = irq_chip_mask_parent;
priv->chip.irq_unmask = irq_chip_unmask_parent;
priv->chip.irq_eoi = rza1_irqc_eoi;
priv->chip.irq_retrigger = irq_chip_retrigger_hierarchy;
priv->chip.irq_set_type = rza1_irqc_set_type;
priv->chip.flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE;
priv->irq_domain = irq_domain_add_hierarchy(parent, 0, IRQC_NUM_IRQ,
np, &rza1_irqc_domain_ops,
priv);
if (!priv->irq_domain) {
dev_err(dev, "cannot initialize irq domain\n");
ret = -ENOMEM;
}
out_put_node:
of_node_put(gic_node);
return ret;
}
static int rza1_irqc_remove(struct platform_device *pdev)
{
struct rza1_irqc_priv *priv = platform_get_drvdata(pdev);
irq_domain_remove(priv->irq_domain);
return 0;
}
static const struct of_device_id rza1_irqc_dt_ids[] = {
{ .compatible = "renesas,rza1-irqc" },
{},
};
MODULE_DEVICE_TABLE(of, rza1_irqc_dt_ids);
static struct platform_driver rza1_irqc_device_driver = {
.probe = rza1_irqc_probe,
.remove = rza1_irqc_remove,
.driver = {
.name = "renesas_rza1_irqc",
.of_match_table = rza1_irqc_dt_ids,
}
};
static int __init rza1_irqc_init(void)
{
return platform_driver_register(&rza1_irqc_device_driver);
}
postcore_initcall(rza1_irqc_init);
static void __exit rza1_irqc_exit(void)
{
platform_driver_unregister(&rza1_irqc_device_driver);
}
module_exit(rza1_irqc_exit);
MODULE_AUTHOR("Geert Uytterhoeven <[email protected]>");
MODULE_DESCRIPTION("Renesas RZ/A1 IRQC Driver");
|
linux-master
|
drivers/irqchip/irq-renesas-rza1.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Atheros AR71xx/AR724x/AR913x MISC interrupt controller
*
* Copyright (C) 2015 Alban Bedel <[email protected]>
* Copyright (C) 2010-2011 Jaiganesh Narayanan <[email protected]>
* Copyright (C) 2008-2011 Gabor Juhos <[email protected]>
* Copyright (C) 2008 Imre Kaloz <[email protected]>
*
* Parts of this file are based on Atheros' 2.6.15/2.6.31 BSP
*/
#include <linux/irqchip.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#define AR71XX_RESET_REG_MISC_INT_STATUS 0
#define AR71XX_RESET_REG_MISC_INT_ENABLE 4
#define ATH79_MISC_IRQ_COUNT 32
#define ATH79_MISC_PERF_IRQ 5
static int ath79_perfcount_irq;
int get_c0_perfcount_int(void)
{
return ath79_perfcount_irq;
}
EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
static void ath79_misc_irq_handler(struct irq_desc *desc)
{
struct irq_domain *domain = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
void __iomem *base = domain->host_data;
u32 pending;
chained_irq_enter(chip, desc);
pending = __raw_readl(base + AR71XX_RESET_REG_MISC_INT_STATUS) &
__raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE);
if (!pending) {
spurious_interrupt();
chained_irq_exit(chip, desc);
return;
}
while (pending) {
int bit = __ffs(pending);
generic_handle_domain_irq(domain, bit);
pending &= ~BIT(bit);
}
chained_irq_exit(chip, desc);
}
static void ar71xx_misc_irq_unmask(struct irq_data *d)
{
void __iomem *base = irq_data_get_irq_chip_data(d);
unsigned int irq = d->hwirq;
u32 t;
t = __raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE);
__raw_writel(t | BIT(irq), base + AR71XX_RESET_REG_MISC_INT_ENABLE);
/* flush write */
__raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE);
}
static void ar71xx_misc_irq_mask(struct irq_data *d)
{
void __iomem *base = irq_data_get_irq_chip_data(d);
unsigned int irq = d->hwirq;
u32 t;
t = __raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE);
__raw_writel(t & ~BIT(irq), base + AR71XX_RESET_REG_MISC_INT_ENABLE);
/* flush write */
__raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE);
}
static void ar724x_misc_irq_ack(struct irq_data *d)
{
void __iomem *base = irq_data_get_irq_chip_data(d);
unsigned int irq = d->hwirq;
u32 t;
t = __raw_readl(base + AR71XX_RESET_REG_MISC_INT_STATUS);
__raw_writel(t & ~BIT(irq), base + AR71XX_RESET_REG_MISC_INT_STATUS);
/* flush write */
__raw_readl(base + AR71XX_RESET_REG_MISC_INT_STATUS);
}
static struct irq_chip ath79_misc_irq_chip = {
.name = "MISC",
.irq_unmask = ar71xx_misc_irq_unmask,
.irq_mask = ar71xx_misc_irq_mask,
};
static int misc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
{
irq_set_chip_and_handler(irq, &ath79_misc_irq_chip, handle_level_irq);
irq_set_chip_data(irq, d->host_data);
return 0;
}
static const struct irq_domain_ops misc_irq_domain_ops = {
.xlate = irq_domain_xlate_onecell,
.map = misc_map,
};
static void __init ath79_misc_intc_domain_init(
struct irq_domain *domain, int irq)
{
void __iomem *base = domain->host_data;
ath79_perfcount_irq = irq_create_mapping(domain, ATH79_MISC_PERF_IRQ);
/* Disable and clear all interrupts */
__raw_writel(0, base + AR71XX_RESET_REG_MISC_INT_ENABLE);
__raw_writel(0, base + AR71XX_RESET_REG_MISC_INT_STATUS);
irq_set_chained_handler_and_data(irq, ath79_misc_irq_handler, domain);
}
static int __init ath79_misc_intc_of_init(
struct device_node *node, struct device_node *parent)
{
struct irq_domain *domain;
void __iomem *base;
int irq;
irq = irq_of_parse_and_map(node, 0);
if (!irq) {
pr_err("Failed to get MISC IRQ\n");
return -EINVAL;
}
base = of_iomap(node, 0);
if (!base) {
pr_err("Failed to get MISC IRQ registers\n");
return -ENOMEM;
}
domain = irq_domain_add_linear(node, ATH79_MISC_IRQ_COUNT,
&misc_irq_domain_ops, base);
if (!domain) {
pr_err("Failed to add MISC irqdomain\n");
return -EINVAL;
}
ath79_misc_intc_domain_init(domain, irq);
return 0;
}
static int __init ar7100_misc_intc_of_init(
struct device_node *node, struct device_node *parent)
{
ath79_misc_irq_chip.irq_mask_ack = ar71xx_misc_irq_mask;
return ath79_misc_intc_of_init(node, parent);
}
IRQCHIP_DECLARE(ar7100_misc_intc, "qca,ar7100-misc-intc",
ar7100_misc_intc_of_init);
static int __init ar7240_misc_intc_of_init(
struct device_node *node, struct device_node *parent)
{
ath79_misc_irq_chip.irq_ack = ar724x_misc_irq_ack;
return ath79_misc_intc_of_init(node, parent);
}
IRQCHIP_DECLARE(ar7240_misc_intc, "qca,ar7240-misc-intc",
ar7240_misc_intc_of_init);
void __init ath79_misc_irq_init(void __iomem *regs, int irq,
int irq_base, bool is_ar71xx)
{
struct irq_domain *domain;
if (is_ar71xx)
ath79_misc_irq_chip.irq_mask_ack = ar71xx_misc_irq_mask;
else
ath79_misc_irq_chip.irq_ack = ar724x_misc_irq_ack;
domain = irq_domain_add_legacy(NULL, ATH79_MISC_IRQ_COUNT,
irq_base, 0, &misc_irq_domain_ops, regs);
if (!domain)
panic("Failed to create MISC irqdomain");
ath79_misc_intc_domain_init(domain, irq);
}
|
linux-master
|
drivers/irqchip/irq-ath79-misc.c
|
// SPDX-License-Identifier: GPL-2.0-only
/* linux/arch/arm/mach-exynos4/mct.c
*
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* Exynos4 MCT(Multi-Core Timer) support
*/
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/clockchips.h>
#include <linux/cpu.h>
#include <linux/delay.h>
#include <linux/percpu.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <linux/clocksource.h>
#include <linux/sched_clock.h>
#define EXYNOS4_MCTREG(x) (x)
#define EXYNOS4_MCT_G_CNT_L EXYNOS4_MCTREG(0x100)
#define EXYNOS4_MCT_G_CNT_U EXYNOS4_MCTREG(0x104)
#define EXYNOS4_MCT_G_CNT_WSTAT EXYNOS4_MCTREG(0x110)
#define EXYNOS4_MCT_G_COMP0_L EXYNOS4_MCTREG(0x200)
#define EXYNOS4_MCT_G_COMP0_U EXYNOS4_MCTREG(0x204)
#define EXYNOS4_MCT_G_COMP0_ADD_INCR EXYNOS4_MCTREG(0x208)
#define EXYNOS4_MCT_G_TCON EXYNOS4_MCTREG(0x240)
#define EXYNOS4_MCT_G_INT_CSTAT EXYNOS4_MCTREG(0x244)
#define EXYNOS4_MCT_G_INT_ENB EXYNOS4_MCTREG(0x248)
#define EXYNOS4_MCT_G_WSTAT EXYNOS4_MCTREG(0x24C)
#define _EXYNOS4_MCT_L_BASE EXYNOS4_MCTREG(0x300)
#define EXYNOS4_MCT_L_BASE(x) (_EXYNOS4_MCT_L_BASE + (0x100 * (x)))
#define EXYNOS4_MCT_L_MASK (0xffffff00)
#define MCT_L_TCNTB_OFFSET (0x00)
#define MCT_L_ICNTB_OFFSET (0x08)
#define MCT_L_TCON_OFFSET (0x20)
#define MCT_L_INT_CSTAT_OFFSET (0x30)
#define MCT_L_INT_ENB_OFFSET (0x34)
#define MCT_L_WSTAT_OFFSET (0x40)
#define MCT_G_TCON_START (1 << 8)
#define MCT_G_TCON_COMP0_AUTO_INC (1 << 1)
#define MCT_G_TCON_COMP0_ENABLE (1 << 0)
#define MCT_L_TCON_INTERVAL_MODE (1 << 2)
#define MCT_L_TCON_INT_START (1 << 1)
#define MCT_L_TCON_TIMER_START (1 << 0)
#define TICK_BASE_CNT 1
#ifdef CONFIG_ARM
/* Use values higher than ARM arch timer. See 6282edb72bed. */
#define MCT_CLKSOURCE_RATING 450
#define MCT_CLKEVENTS_RATING 500
#else
#define MCT_CLKSOURCE_RATING 350
#define MCT_CLKEVENTS_RATING 350
#endif
/* There are four Global timers starting with 0 offset */
#define MCT_G0_IRQ 0
/* Local timers count starts after global timer count */
#define MCT_L0_IRQ 4
/* Max number of IRQ as per DT binding document */
#define MCT_NR_IRQS 20
/* Max number of local timers */
#define MCT_NR_LOCAL (MCT_NR_IRQS - MCT_L0_IRQ)
enum {
MCT_INT_SPI,
MCT_INT_PPI
};
static void __iomem *reg_base;
static unsigned long clk_rate;
static unsigned int mct_int_type;
static int mct_irqs[MCT_NR_IRQS];
struct mct_clock_event_device {
struct clock_event_device evt;
unsigned long base;
/**
* The length of the name must be adjusted if number of
* local timer interrupts grow over two digits
*/
char name[11];
};
static void exynos4_mct_write(unsigned int value, unsigned long offset)
{
unsigned long stat_addr;
u32 mask;
u32 i;
writel_relaxed(value, reg_base + offset);
if (likely(offset >= EXYNOS4_MCT_L_BASE(0))) {
stat_addr = (offset & EXYNOS4_MCT_L_MASK) + MCT_L_WSTAT_OFFSET;
switch (offset & ~EXYNOS4_MCT_L_MASK) {
case MCT_L_TCON_OFFSET:
mask = 1 << 3; /* L_TCON write status */
break;
case MCT_L_ICNTB_OFFSET:
mask = 1 << 1; /* L_ICNTB write status */
break;
case MCT_L_TCNTB_OFFSET:
mask = 1 << 0; /* L_TCNTB write status */
break;
default:
return;
}
} else {
switch (offset) {
case EXYNOS4_MCT_G_TCON:
stat_addr = EXYNOS4_MCT_G_WSTAT;
mask = 1 << 16; /* G_TCON write status */
break;
case EXYNOS4_MCT_G_COMP0_L:
stat_addr = EXYNOS4_MCT_G_WSTAT;
mask = 1 << 0; /* G_COMP0_L write status */
break;
case EXYNOS4_MCT_G_COMP0_U:
stat_addr = EXYNOS4_MCT_G_WSTAT;
mask = 1 << 1; /* G_COMP0_U write status */
break;
case EXYNOS4_MCT_G_COMP0_ADD_INCR:
stat_addr = EXYNOS4_MCT_G_WSTAT;
mask = 1 << 2; /* G_COMP0_ADD_INCR w status */
break;
case EXYNOS4_MCT_G_CNT_L:
stat_addr = EXYNOS4_MCT_G_CNT_WSTAT;
mask = 1 << 0; /* G_CNT_L write status */
break;
case EXYNOS4_MCT_G_CNT_U:
stat_addr = EXYNOS4_MCT_G_CNT_WSTAT;
mask = 1 << 1; /* G_CNT_U write status */
break;
default:
return;
}
}
/* Wait maximum 1 ms until written values are applied */
for (i = 0; i < loops_per_jiffy / 1000 * HZ; i++)
if (readl_relaxed(reg_base + stat_addr) & mask) {
writel_relaxed(mask, reg_base + stat_addr);
return;
}
panic("MCT hangs after writing %d (offset:0x%lx)\n", value, offset);
}
/* Clocksource handling */
static void exynos4_mct_frc_start(void)
{
u32 reg;
reg = readl_relaxed(reg_base + EXYNOS4_MCT_G_TCON);
reg |= MCT_G_TCON_START;
exynos4_mct_write(reg, EXYNOS4_MCT_G_TCON);
}
/**
* exynos4_read_count_64 - Read all 64-bits of the global counter
*
* This will read all 64-bits of the global counter taking care to make sure
* that the upper and lower half match. Note that reading the MCT can be quite
* slow (hundreds of nanoseconds) so you should use the 32-bit (lower half
* only) version when possible.
*
* Returns the number of cycles in the global counter.
*/
static u64 exynos4_read_count_64(void)
{
unsigned int lo, hi;
u32 hi2 = readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_U);
do {
hi = hi2;
lo = readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_L);
hi2 = readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_U);
} while (hi != hi2);
return ((u64)hi << 32) | lo;
}
/**
* exynos4_read_count_32 - Read the lower 32-bits of the global counter
*
* This will read just the lower 32-bits of the global counter. This is marked
* as notrace so it can be used by the scheduler clock.
*
* Returns the number of cycles in the global counter (lower 32 bits).
*/
static u32 notrace exynos4_read_count_32(void)
{
return readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_L);
}
static u64 exynos4_frc_read(struct clocksource *cs)
{
return exynos4_read_count_32();
}
static void exynos4_frc_resume(struct clocksource *cs)
{
exynos4_mct_frc_start();
}
static struct clocksource mct_frc = {
.name = "mct-frc",
.rating = MCT_CLKSOURCE_RATING,
.read = exynos4_frc_read,
.mask = CLOCKSOURCE_MASK(32),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
.resume = exynos4_frc_resume,
};
static u64 notrace exynos4_read_sched_clock(void)
{
return exynos4_read_count_32();
}
#if defined(CONFIG_ARM)
static struct delay_timer exynos4_delay_timer;
static cycles_t exynos4_read_current_timer(void)
{
BUILD_BUG_ON_MSG(sizeof(cycles_t) != sizeof(u32),
"cycles_t needs to move to 32-bit for ARM64 usage");
return exynos4_read_count_32();
}
#endif
static int __init exynos4_clocksource_init(bool frc_shared)
{
/*
* When the frc is shared, the main processer should have already
* turned it on and we shouldn't be writing to TCON.
*/
if (frc_shared)
mct_frc.resume = NULL;
else
exynos4_mct_frc_start();
#if defined(CONFIG_ARM)
exynos4_delay_timer.read_current_timer = &exynos4_read_current_timer;
exynos4_delay_timer.freq = clk_rate;
register_current_timer_delay(&exynos4_delay_timer);
#endif
if (clocksource_register_hz(&mct_frc, clk_rate))
panic("%s: can't register clocksource\n", mct_frc.name);
sched_clock_register(exynos4_read_sched_clock, 32, clk_rate);
return 0;
}
static void exynos4_mct_comp0_stop(void)
{
unsigned int tcon;
tcon = readl_relaxed(reg_base + EXYNOS4_MCT_G_TCON);
tcon &= ~(MCT_G_TCON_COMP0_ENABLE | MCT_G_TCON_COMP0_AUTO_INC);
exynos4_mct_write(tcon, EXYNOS4_MCT_G_TCON);
exynos4_mct_write(0, EXYNOS4_MCT_G_INT_ENB);
}
static void exynos4_mct_comp0_start(bool periodic, unsigned long cycles)
{
unsigned int tcon;
u64 comp_cycle;
tcon = readl_relaxed(reg_base + EXYNOS4_MCT_G_TCON);
if (periodic) {
tcon |= MCT_G_TCON_COMP0_AUTO_INC;
exynos4_mct_write(cycles, EXYNOS4_MCT_G_COMP0_ADD_INCR);
}
comp_cycle = exynos4_read_count_64() + cycles;
exynos4_mct_write((u32)comp_cycle, EXYNOS4_MCT_G_COMP0_L);
exynos4_mct_write((u32)(comp_cycle >> 32), EXYNOS4_MCT_G_COMP0_U);
exynos4_mct_write(0x1, EXYNOS4_MCT_G_INT_ENB);
tcon |= MCT_G_TCON_COMP0_ENABLE;
exynos4_mct_write(tcon , EXYNOS4_MCT_G_TCON);
}
static int exynos4_comp_set_next_event(unsigned long cycles,
struct clock_event_device *evt)
{
exynos4_mct_comp0_start(false, cycles);
return 0;
}
static int mct_set_state_shutdown(struct clock_event_device *evt)
{
exynos4_mct_comp0_stop();
return 0;
}
static int mct_set_state_periodic(struct clock_event_device *evt)
{
unsigned long cycles_per_jiffy;
cycles_per_jiffy = (((unsigned long long)NSEC_PER_SEC / HZ * evt->mult)
>> evt->shift);
exynos4_mct_comp0_stop();
exynos4_mct_comp0_start(true, cycles_per_jiffy);
return 0;
}
static struct clock_event_device mct_comp_device = {
.name = "mct-comp",
.features = CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_ONESHOT,
.rating = 250,
.set_next_event = exynos4_comp_set_next_event,
.set_state_periodic = mct_set_state_periodic,
.set_state_shutdown = mct_set_state_shutdown,
.set_state_oneshot = mct_set_state_shutdown,
.set_state_oneshot_stopped = mct_set_state_shutdown,
.tick_resume = mct_set_state_shutdown,
};
static irqreturn_t exynos4_mct_comp_isr(int irq, void *dev_id)
{
struct clock_event_device *evt = dev_id;
exynos4_mct_write(0x1, EXYNOS4_MCT_G_INT_CSTAT);
evt->event_handler(evt);
return IRQ_HANDLED;
}
static int exynos4_clockevent_init(void)
{
mct_comp_device.cpumask = cpumask_of(0);
clockevents_config_and_register(&mct_comp_device, clk_rate,
0xf, 0xffffffff);
if (request_irq(mct_irqs[MCT_G0_IRQ], exynos4_mct_comp_isr,
IRQF_TIMER | IRQF_IRQPOLL, "mct_comp_irq",
&mct_comp_device))
pr_err("%s: request_irq() failed\n", "mct_comp_irq");
return 0;
}
static DEFINE_PER_CPU(struct mct_clock_event_device, percpu_mct_tick);
/* Clock event handling */
static void exynos4_mct_tick_stop(struct mct_clock_event_device *mevt)
{
unsigned long tmp;
unsigned long mask = MCT_L_TCON_INT_START | MCT_L_TCON_TIMER_START;
unsigned long offset = mevt->base + MCT_L_TCON_OFFSET;
tmp = readl_relaxed(reg_base + offset);
if (tmp & mask) {
tmp &= ~mask;
exynos4_mct_write(tmp, offset);
}
}
static void exynos4_mct_tick_start(unsigned long cycles,
struct mct_clock_event_device *mevt)
{
unsigned long tmp;
exynos4_mct_tick_stop(mevt);
tmp = (1 << 31) | cycles; /* MCT_L_UPDATE_ICNTB */
/* update interrupt count buffer */
exynos4_mct_write(tmp, mevt->base + MCT_L_ICNTB_OFFSET);
/* enable MCT tick interrupt */
exynos4_mct_write(0x1, mevt->base + MCT_L_INT_ENB_OFFSET);
tmp = readl_relaxed(reg_base + mevt->base + MCT_L_TCON_OFFSET);
tmp |= MCT_L_TCON_INT_START | MCT_L_TCON_TIMER_START |
MCT_L_TCON_INTERVAL_MODE;
exynos4_mct_write(tmp, mevt->base + MCT_L_TCON_OFFSET);
}
static void exynos4_mct_tick_clear(struct mct_clock_event_device *mevt)
{
/* Clear the MCT tick interrupt */
if (readl_relaxed(reg_base + mevt->base + MCT_L_INT_CSTAT_OFFSET) & 1)
exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET);
}
static int exynos4_tick_set_next_event(unsigned long cycles,
struct clock_event_device *evt)
{
struct mct_clock_event_device *mevt;
mevt = container_of(evt, struct mct_clock_event_device, evt);
exynos4_mct_tick_start(cycles, mevt);
return 0;
}
static int set_state_shutdown(struct clock_event_device *evt)
{
struct mct_clock_event_device *mevt;
mevt = container_of(evt, struct mct_clock_event_device, evt);
exynos4_mct_tick_stop(mevt);
exynos4_mct_tick_clear(mevt);
return 0;
}
static int set_state_periodic(struct clock_event_device *evt)
{
struct mct_clock_event_device *mevt;
unsigned long cycles_per_jiffy;
mevt = container_of(evt, struct mct_clock_event_device, evt);
cycles_per_jiffy = (((unsigned long long)NSEC_PER_SEC / HZ * evt->mult)
>> evt->shift);
exynos4_mct_tick_stop(mevt);
exynos4_mct_tick_start(cycles_per_jiffy, mevt);
return 0;
}
static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id)
{
struct mct_clock_event_device *mevt = dev_id;
struct clock_event_device *evt = &mevt->evt;
/*
* This is for supporting oneshot mode.
* Mct would generate interrupt periodically
* without explicit stopping.
*/
if (!clockevent_state_periodic(&mevt->evt))
exynos4_mct_tick_stop(mevt);
exynos4_mct_tick_clear(mevt);
evt->event_handler(evt);
return IRQ_HANDLED;
}
static int exynos4_mct_starting_cpu(unsigned int cpu)
{
struct mct_clock_event_device *mevt =
per_cpu_ptr(&percpu_mct_tick, cpu);
struct clock_event_device *evt = &mevt->evt;
snprintf(mevt->name, sizeof(mevt->name), "mct_tick%d", cpu);
evt->name = mevt->name;
evt->cpumask = cpumask_of(cpu);
evt->set_next_event = exynos4_tick_set_next_event;
evt->set_state_periodic = set_state_periodic;
evt->set_state_shutdown = set_state_shutdown;
evt->set_state_oneshot = set_state_shutdown;
evt->set_state_oneshot_stopped = set_state_shutdown;
evt->tick_resume = set_state_shutdown;
evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT |
CLOCK_EVT_FEAT_PERCPU;
evt->rating = MCT_CLKEVENTS_RATING;
exynos4_mct_write(TICK_BASE_CNT, mevt->base + MCT_L_TCNTB_OFFSET);
if (mct_int_type == MCT_INT_SPI) {
if (evt->irq == -1)
return -EIO;
irq_force_affinity(evt->irq, cpumask_of(cpu));
enable_irq(evt->irq);
} else {
enable_percpu_irq(mct_irqs[MCT_L0_IRQ], 0);
}
clockevents_config_and_register(evt, clk_rate / (TICK_BASE_CNT + 1),
0xf, 0x7fffffff);
return 0;
}
static int exynos4_mct_dying_cpu(unsigned int cpu)
{
struct mct_clock_event_device *mevt =
per_cpu_ptr(&percpu_mct_tick, cpu);
struct clock_event_device *evt = &mevt->evt;
evt->set_state_shutdown(evt);
if (mct_int_type == MCT_INT_SPI) {
if (evt->irq != -1)
disable_irq_nosync(evt->irq);
exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET);
} else {
disable_percpu_irq(mct_irqs[MCT_L0_IRQ]);
}
return 0;
}
static int __init exynos4_timer_resources(struct device_node *np)
{
struct clk *mct_clk, *tick_clk;
reg_base = of_iomap(np, 0);
if (!reg_base)
panic("%s: unable to ioremap mct address space\n", __func__);
tick_clk = of_clk_get_by_name(np, "fin_pll");
if (IS_ERR(tick_clk))
panic("%s: unable to determine tick clock rate\n", __func__);
clk_rate = clk_get_rate(tick_clk);
mct_clk = of_clk_get_by_name(np, "mct");
if (IS_ERR(mct_clk))
panic("%s: unable to retrieve mct clock instance\n", __func__);
clk_prepare_enable(mct_clk);
return 0;
}
/**
* exynos4_timer_interrupts - initialize MCT interrupts
* @np: device node for MCT
* @int_type: interrupt type, MCT_INT_PPI or MCT_INT_SPI
* @local_idx: array mapping CPU numbers to local timer indices
* @nr_local: size of @local_idx array
*/
static int __init exynos4_timer_interrupts(struct device_node *np,
unsigned int int_type,
const u32 *local_idx,
size_t nr_local)
{
int nr_irqs, i, err, cpu;
mct_int_type = int_type;
/* This driver uses only one global timer interrupt */
mct_irqs[MCT_G0_IRQ] = irq_of_parse_and_map(np, MCT_G0_IRQ);
/*
* Find out the number of local irqs specified. The local
* timer irqs are specified after the four global timer
* irqs are specified.
*/
nr_irqs = of_irq_count(np);
if (nr_irqs > ARRAY_SIZE(mct_irqs)) {
pr_err("exynos-mct: too many (%d) interrupts configured in DT\n",
nr_irqs);
nr_irqs = ARRAY_SIZE(mct_irqs);
}
for (i = MCT_L0_IRQ; i < nr_irqs; i++)
mct_irqs[i] = irq_of_parse_and_map(np, i);
if (mct_int_type == MCT_INT_PPI) {
err = request_percpu_irq(mct_irqs[MCT_L0_IRQ],
exynos4_mct_tick_isr, "MCT",
&percpu_mct_tick);
WARN(err, "MCT: can't request IRQ %d (%d)\n",
mct_irqs[MCT_L0_IRQ], err);
} else {
for_each_possible_cpu(cpu) {
int mct_irq;
unsigned int irq_idx;
struct mct_clock_event_device *pcpu_mevt =
per_cpu_ptr(&percpu_mct_tick, cpu);
if (cpu >= nr_local) {
err = -EINVAL;
goto out_irq;
}
irq_idx = MCT_L0_IRQ + local_idx[cpu];
pcpu_mevt->evt.irq = -1;
if (irq_idx >= ARRAY_SIZE(mct_irqs))
break;
mct_irq = mct_irqs[irq_idx];
irq_set_status_flags(mct_irq, IRQ_NOAUTOEN);
if (request_irq(mct_irq,
exynos4_mct_tick_isr,
IRQF_TIMER | IRQF_NOBALANCING,
pcpu_mevt->name, pcpu_mevt)) {
pr_err("exynos-mct: cannot register IRQ (cpu%d)\n",
cpu);
continue;
}
pcpu_mevt->evt.irq = mct_irq;
}
}
for_each_possible_cpu(cpu) {
struct mct_clock_event_device *mevt = per_cpu_ptr(&percpu_mct_tick, cpu);
if (cpu >= nr_local) {
err = -EINVAL;
goto out_irq;
}
mevt->base = EXYNOS4_MCT_L_BASE(local_idx[cpu]);
}
/* Install hotplug callbacks which configure the timer on this CPU */
err = cpuhp_setup_state(CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING,
"clockevents/exynos4/mct_timer:starting",
exynos4_mct_starting_cpu,
exynos4_mct_dying_cpu);
if (err)
goto out_irq;
return 0;
out_irq:
if (mct_int_type == MCT_INT_PPI) {
free_percpu_irq(mct_irqs[MCT_L0_IRQ], &percpu_mct_tick);
} else {
for_each_possible_cpu(cpu) {
struct mct_clock_event_device *pcpu_mevt =
per_cpu_ptr(&percpu_mct_tick, cpu);
if (pcpu_mevt->evt.irq != -1) {
free_irq(pcpu_mevt->evt.irq, pcpu_mevt);
pcpu_mevt->evt.irq = -1;
}
}
}
return err;
}
static int __init mct_init_dt(struct device_node *np, unsigned int int_type)
{
bool frc_shared = of_property_read_bool(np, "samsung,frc-shared");
u32 local_idx[MCT_NR_LOCAL] = {0};
int nr_local;
int ret;
nr_local = of_property_count_u32_elems(np, "samsung,local-timers");
if (nr_local == 0)
return -EINVAL;
if (nr_local > 0) {
if (nr_local > ARRAY_SIZE(local_idx))
return -EINVAL;
ret = of_property_read_u32_array(np, "samsung,local-timers",
local_idx, nr_local);
if (ret)
return ret;
} else {
int i;
nr_local = ARRAY_SIZE(local_idx);
for (i = 0; i < nr_local; i++)
local_idx[i] = i;
}
ret = exynos4_timer_resources(np);
if (ret)
return ret;
ret = exynos4_timer_interrupts(np, int_type, local_idx, nr_local);
if (ret)
return ret;
ret = exynos4_clocksource_init(frc_shared);
if (ret)
return ret;
/*
* When the FRC is shared with a main processor, this secondary
* processor cannot use the global comparator.
*/
if (frc_shared)
return 0;
return exynos4_clockevent_init();
}
static int __init mct_init_spi(struct device_node *np)
{
return mct_init_dt(np, MCT_INT_SPI);
}
static int __init mct_init_ppi(struct device_node *np)
{
return mct_init_dt(np, MCT_INT_PPI);
}
TIMER_OF_DECLARE(exynos4210, "samsung,exynos4210-mct", mct_init_spi);
TIMER_OF_DECLARE(exynos4412, "samsung,exynos4412-mct", mct_init_ppi);
|
linux-master
|
drivers/clocksource/exynos_mct.c
|
// SPDX-License-Identifier: GPL-2.0
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/goldfish.h>
#include <clocksource/timer-goldfish.h>
struct goldfish_timer {
struct clocksource cs;
struct clock_event_device ced;
struct resource res;
void __iomem *base;
};
static struct goldfish_timer *ced_to_gf(struct clock_event_device *ced)
{
return container_of(ced, struct goldfish_timer, ced);
}
static struct goldfish_timer *cs_to_gf(struct clocksource *cs)
{
return container_of(cs, struct goldfish_timer, cs);
}
static u64 goldfish_timer_read(struct clocksource *cs)
{
struct goldfish_timer *timerdrv = cs_to_gf(cs);
void __iomem *base = timerdrv->base;
u32 time_low, time_high;
u64 ticks;
/*
* time_low: get low bits of current time and update time_high
* time_high: get high bits of time at last time_low read
*/
time_low = gf_ioread32(base + TIMER_TIME_LOW);
time_high = gf_ioread32(base + TIMER_TIME_HIGH);
ticks = ((u64)time_high << 32) | time_low;
return ticks;
}
static int goldfish_timer_set_oneshot(struct clock_event_device *evt)
{
struct goldfish_timer *timerdrv = ced_to_gf(evt);
void __iomem *base = timerdrv->base;
gf_iowrite32(0, base + TIMER_ALARM_HIGH);
gf_iowrite32(0, base + TIMER_ALARM_LOW);
gf_iowrite32(1, base + TIMER_IRQ_ENABLED);
return 0;
}
static int goldfish_timer_shutdown(struct clock_event_device *evt)
{
struct goldfish_timer *timerdrv = ced_to_gf(evt);
void __iomem *base = timerdrv->base;
gf_iowrite32(0, base + TIMER_IRQ_ENABLED);
return 0;
}
static int goldfish_timer_next_event(unsigned long delta,
struct clock_event_device *evt)
{
struct goldfish_timer *timerdrv = ced_to_gf(evt);
void __iomem *base = timerdrv->base;
u64 now;
now = goldfish_timer_read(&timerdrv->cs);
now += delta;
gf_iowrite32(upper_32_bits(now), base + TIMER_ALARM_HIGH);
gf_iowrite32(lower_32_bits(now), base + TIMER_ALARM_LOW);
return 0;
}
static irqreturn_t goldfish_timer_irq(int irq, void *dev_id)
{
struct goldfish_timer *timerdrv = dev_id;
struct clock_event_device *evt = &timerdrv->ced;
void __iomem *base = timerdrv->base;
gf_iowrite32(1, base + TIMER_CLEAR_INTERRUPT);
evt->event_handler(evt);
return IRQ_HANDLED;
}
int __init goldfish_timer_init(int irq, void __iomem *base)
{
struct goldfish_timer *timerdrv;
int ret;
timerdrv = kzalloc(sizeof(*timerdrv), GFP_KERNEL);
if (!timerdrv)
return -ENOMEM;
timerdrv->base = base;
timerdrv->ced = (struct clock_event_device){
.name = "goldfish_timer",
.features = CLOCK_EVT_FEAT_ONESHOT,
.set_state_shutdown = goldfish_timer_shutdown,
.set_state_oneshot = goldfish_timer_set_oneshot,
.set_next_event = goldfish_timer_next_event,
};
timerdrv->res = (struct resource){
.name = "goldfish_timer",
.start = (unsigned long)base,
.end = (unsigned long)base + 0xfff,
};
ret = request_resource(&iomem_resource, &timerdrv->res);
if (ret) {
pr_err("Cannot allocate '%s' resource\n", timerdrv->res.name);
return ret;
}
timerdrv->cs = (struct clocksource){
.name = "goldfish_timer",
.rating = 400,
.read = goldfish_timer_read,
.mask = CLOCKSOURCE_MASK(64),
.flags = 0,
.max_idle_ns = LONG_MAX,
};
clocksource_register_hz(&timerdrv->cs, NSEC_PER_SEC);
ret = request_irq(irq, goldfish_timer_irq, IRQF_TIMER,
"goldfish_timer", timerdrv);
if (ret) {
pr_err("Couldn't register goldfish-timer interrupt\n");
return ret;
}
clockevents_config_and_register(&timerdrv->ced, NSEC_PER_SEC,
1, 0xffffffff);
return 0;
}
|
linux-master
|
drivers/clocksource/timer-goldfish.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* Conexant Digicolor timer driver
*
* Author: Baruch Siach <[email protected]>
*
* Copyright (C) 2014 Paradox Innovation Ltd.
*
* Based on:
* Allwinner SoCs hstimer driver
*
* Copyright (C) 2013 Maxime Ripard
*
* Maxime Ripard <[email protected]>
*/
/*
* Conexant Digicolor SoCs have 8 configurable timers, named from "Timer A" to
* "Timer H". Timer A is the only one with watchdog support, so it is dedicated
* to the watchdog driver. This driver uses Timer B for sched_clock(), and
* Timer C for clockevents.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/clk.h>
#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqreturn.h>
#include <linux/sched/clock.h>
#include <linux/sched_clock.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
enum {
TIMER_A,
TIMER_B,
TIMER_C,
TIMER_D,
TIMER_E,
TIMER_F,
TIMER_G,
TIMER_H,
};
#define CONTROL(t) ((t)*8)
#define COUNT(t) ((t)*8 + 4)
#define CONTROL_DISABLE 0
#define CONTROL_ENABLE BIT(0)
#define CONTROL_MODE(m) ((m) << 4)
#define CONTROL_MODE_ONESHOT CONTROL_MODE(1)
#define CONTROL_MODE_PERIODIC CONTROL_MODE(2)
struct digicolor_timer {
struct clock_event_device ce;
void __iomem *base;
u32 ticks_per_jiffy;
int timer_id; /* one of TIMER_* */
};
static struct digicolor_timer *dc_timer(struct clock_event_device *ce)
{
return container_of(ce, struct digicolor_timer, ce);
}
static inline void dc_timer_disable(struct clock_event_device *ce)
{
struct digicolor_timer *dt = dc_timer(ce);
writeb(CONTROL_DISABLE, dt->base + CONTROL(dt->timer_id));
}
static inline void dc_timer_enable(struct clock_event_device *ce, u32 mode)
{
struct digicolor_timer *dt = dc_timer(ce);
writeb(CONTROL_ENABLE | mode, dt->base + CONTROL(dt->timer_id));
}
static inline void dc_timer_set_count(struct clock_event_device *ce,
unsigned long count)
{
struct digicolor_timer *dt = dc_timer(ce);
writel(count, dt->base + COUNT(dt->timer_id));
}
static int digicolor_clkevt_shutdown(struct clock_event_device *ce)
{
dc_timer_disable(ce);
return 0;
}
static int digicolor_clkevt_set_oneshot(struct clock_event_device *ce)
{
dc_timer_disable(ce);
dc_timer_enable(ce, CONTROL_MODE_ONESHOT);
return 0;
}
static int digicolor_clkevt_set_periodic(struct clock_event_device *ce)
{
struct digicolor_timer *dt = dc_timer(ce);
dc_timer_disable(ce);
dc_timer_set_count(ce, dt->ticks_per_jiffy);
dc_timer_enable(ce, CONTROL_MODE_PERIODIC);
return 0;
}
static int digicolor_clkevt_next_event(unsigned long evt,
struct clock_event_device *ce)
{
dc_timer_disable(ce);
dc_timer_set_count(ce, evt);
dc_timer_enable(ce, CONTROL_MODE_ONESHOT);
return 0;
}
static struct digicolor_timer dc_timer_dev = {
.ce = {
.name = "digicolor_tick",
.rating = 340,
.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
.set_state_shutdown = digicolor_clkevt_shutdown,
.set_state_periodic = digicolor_clkevt_set_periodic,
.set_state_oneshot = digicolor_clkevt_set_oneshot,
.tick_resume = digicolor_clkevt_shutdown,
.set_next_event = digicolor_clkevt_next_event,
},
.timer_id = TIMER_C,
};
static irqreturn_t digicolor_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evt = dev_id;
evt->event_handler(evt);
return IRQ_HANDLED;
}
static u64 notrace digicolor_timer_sched_read(void)
{
return ~readl(dc_timer_dev.base + COUNT(TIMER_B));
}
static int __init digicolor_timer_init(struct device_node *node)
{
unsigned long rate;
struct clk *clk;
int ret, irq;
/*
* timer registers are shared with the watchdog timer;
* don't map exclusively
*/
dc_timer_dev.base = of_iomap(node, 0);
if (!dc_timer_dev.base) {
pr_err("Can't map registers\n");
return -ENXIO;
}
irq = irq_of_parse_and_map(node, dc_timer_dev.timer_id);
if (irq <= 0) {
pr_err("Can't parse IRQ\n");
return -EINVAL;
}
clk = of_clk_get(node, 0);
if (IS_ERR(clk)) {
pr_err("Can't get timer clock\n");
return PTR_ERR(clk);
}
clk_prepare_enable(clk);
rate = clk_get_rate(clk);
dc_timer_dev.ticks_per_jiffy = DIV_ROUND_UP(rate, HZ);
writeb(CONTROL_DISABLE, dc_timer_dev.base + CONTROL(TIMER_B));
writel(UINT_MAX, dc_timer_dev.base + COUNT(TIMER_B));
writeb(CONTROL_ENABLE, dc_timer_dev.base + CONTROL(TIMER_B));
sched_clock_register(digicolor_timer_sched_read, 32, rate);
clocksource_mmio_init(dc_timer_dev.base + COUNT(TIMER_B), node->name,
rate, 340, 32, clocksource_mmio_readl_down);
ret = request_irq(irq, digicolor_timer_interrupt,
IRQF_TIMER | IRQF_IRQPOLL, "digicolor_timerC",
&dc_timer_dev.ce);
if (ret) {
pr_warn("request of timer irq %d failed (%d)\n", irq, ret);
return ret;
}
dc_timer_dev.ce.cpumask = cpu_possible_mask;
dc_timer_dev.ce.irq = irq;
clockevents_config_and_register(&dc_timer_dev.ce, rate, 0, 0xffffffff);
return 0;
}
TIMER_OF_DECLARE(conexant_digicolor, "cnxt,cx92755-timer",
digicolor_timer_init);
|
linux-master
|
drivers/clocksource/timer-digicolor.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* Ingenic SoCs TCU IRQ driver
* Copyright (C) 2019 Paul Cercueil <[email protected]>
* Copyright (C) 2020 周琰杰 (Zhou Yanjie) <[email protected]>
*/
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/clockchips.h>
#include <linux/clocksource.h>
#include <linux/cpuhotplug.h>
#include <linux/interrupt.h>
#include <linux/mfd/ingenic-tcu.h>
#include <linux/mfd/syscon.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/overflow.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/sched_clock.h>
#include <dt-bindings/clock/ingenic,tcu.h>
static DEFINE_PER_CPU(call_single_data_t, ingenic_cevt_csd);
struct ingenic_soc_info {
unsigned int num_channels;
};
struct ingenic_tcu_timer {
unsigned int cpu;
unsigned int channel;
struct clock_event_device cevt;
struct clk *clk;
char name[8];
};
struct ingenic_tcu {
struct regmap *map;
struct device_node *np;
struct clk *cs_clk;
unsigned int cs_channel;
struct clocksource cs;
unsigned long pwm_channels_mask;
struct ingenic_tcu_timer timers[];
};
static struct ingenic_tcu *ingenic_tcu;
static u64 notrace ingenic_tcu_timer_read(void)
{
struct ingenic_tcu *tcu = ingenic_tcu;
unsigned int count;
regmap_read(tcu->map, TCU_REG_TCNTc(tcu->cs_channel), &count);
return count;
}
static u64 notrace ingenic_tcu_timer_cs_read(struct clocksource *cs)
{
return ingenic_tcu_timer_read();
}
static inline struct ingenic_tcu *
to_ingenic_tcu(struct ingenic_tcu_timer *timer)
{
return container_of(timer, struct ingenic_tcu, timers[timer->cpu]);
}
static inline struct ingenic_tcu_timer *
to_ingenic_tcu_timer(struct clock_event_device *evt)
{
return container_of(evt, struct ingenic_tcu_timer, cevt);
}
static int ingenic_tcu_cevt_set_state_shutdown(struct clock_event_device *evt)
{
struct ingenic_tcu_timer *timer = to_ingenic_tcu_timer(evt);
struct ingenic_tcu *tcu = to_ingenic_tcu(timer);
regmap_write(tcu->map, TCU_REG_TECR, BIT(timer->channel));
return 0;
}
static int ingenic_tcu_cevt_set_next(unsigned long next,
struct clock_event_device *evt)
{
struct ingenic_tcu_timer *timer = to_ingenic_tcu_timer(evt);
struct ingenic_tcu *tcu = to_ingenic_tcu(timer);
if (next > 0xffff)
return -EINVAL;
regmap_write(tcu->map, TCU_REG_TDFRc(timer->channel), next);
regmap_write(tcu->map, TCU_REG_TCNTc(timer->channel), 0);
regmap_write(tcu->map, TCU_REG_TESR, BIT(timer->channel));
return 0;
}
static void ingenic_per_cpu_event_handler(void *info)
{
struct clock_event_device *cevt = (struct clock_event_device *) info;
cevt->event_handler(cevt);
}
static irqreturn_t ingenic_tcu_cevt_cb(int irq, void *dev_id)
{
struct ingenic_tcu_timer *timer = dev_id;
struct ingenic_tcu *tcu = to_ingenic_tcu(timer);
call_single_data_t *csd;
regmap_write(tcu->map, TCU_REG_TECR, BIT(timer->channel));
if (timer->cevt.event_handler) {
csd = &per_cpu(ingenic_cevt_csd, timer->cpu);
csd->info = (void *) &timer->cevt;
csd->func = ingenic_per_cpu_event_handler;
smp_call_function_single_async(timer->cpu, csd);
}
return IRQ_HANDLED;
}
static struct clk *ingenic_tcu_get_clock(struct device_node *np, int id)
{
struct of_phandle_args args;
args.np = np;
args.args_count = 1;
args.args[0] = id;
return of_clk_get_from_provider(&args);
}
static int ingenic_tcu_setup_cevt(unsigned int cpu)
{
struct ingenic_tcu *tcu = ingenic_tcu;
struct ingenic_tcu_timer *timer = &tcu->timers[cpu];
unsigned int timer_virq;
struct irq_domain *domain;
unsigned long rate;
int err;
timer->clk = ingenic_tcu_get_clock(tcu->np, timer->channel);
if (IS_ERR(timer->clk))
return PTR_ERR(timer->clk);
err = clk_prepare_enable(timer->clk);
if (err)
goto err_clk_put;
rate = clk_get_rate(timer->clk);
if (!rate) {
err = -EINVAL;
goto err_clk_disable;
}
domain = irq_find_host(tcu->np);
if (!domain) {
err = -ENODEV;
goto err_clk_disable;
}
timer_virq = irq_create_mapping(domain, timer->channel);
if (!timer_virq) {
err = -EINVAL;
goto err_clk_disable;
}
snprintf(timer->name, sizeof(timer->name), "TCU%u", timer->channel);
err = request_irq(timer_virq, ingenic_tcu_cevt_cb, IRQF_TIMER,
timer->name, timer);
if (err)
goto err_irq_dispose_mapping;
timer->cpu = smp_processor_id();
timer->cevt.cpumask = cpumask_of(smp_processor_id());
timer->cevt.features = CLOCK_EVT_FEAT_ONESHOT;
timer->cevt.name = timer->name;
timer->cevt.rating = 200;
timer->cevt.set_state_shutdown = ingenic_tcu_cevt_set_state_shutdown;
timer->cevt.set_next_event = ingenic_tcu_cevt_set_next;
clockevents_config_and_register(&timer->cevt, rate, 10, 0xffff);
return 0;
err_irq_dispose_mapping:
irq_dispose_mapping(timer_virq);
err_clk_disable:
clk_disable_unprepare(timer->clk);
err_clk_put:
clk_put(timer->clk);
return err;
}
static int __init ingenic_tcu_clocksource_init(struct device_node *np,
struct ingenic_tcu *tcu)
{
unsigned int channel = tcu->cs_channel;
struct clocksource *cs = &tcu->cs;
unsigned long rate;
int err;
tcu->cs_clk = ingenic_tcu_get_clock(np, channel);
if (IS_ERR(tcu->cs_clk))
return PTR_ERR(tcu->cs_clk);
err = clk_prepare_enable(tcu->cs_clk);
if (err)
goto err_clk_put;
rate = clk_get_rate(tcu->cs_clk);
if (!rate) {
err = -EINVAL;
goto err_clk_disable;
}
/* Reset channel */
regmap_update_bits(tcu->map, TCU_REG_TCSRc(channel),
0xffff & ~TCU_TCSR_RESERVED_BITS, 0);
/* Reset counter */
regmap_write(tcu->map, TCU_REG_TDFRc(channel), 0xffff);
regmap_write(tcu->map, TCU_REG_TCNTc(channel), 0);
/* Enable channel */
regmap_write(tcu->map, TCU_REG_TESR, BIT(channel));
cs->name = "ingenic-timer";
cs->rating = 200;
cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
cs->mask = CLOCKSOURCE_MASK(16);
cs->read = ingenic_tcu_timer_cs_read;
err = clocksource_register_hz(cs, rate);
if (err)
goto err_clk_disable;
return 0;
err_clk_disable:
clk_disable_unprepare(tcu->cs_clk);
err_clk_put:
clk_put(tcu->cs_clk);
return err;
}
static const struct ingenic_soc_info jz4740_soc_info = {
.num_channels = 8,
};
static const struct ingenic_soc_info jz4725b_soc_info = {
.num_channels = 6,
};
static const struct of_device_id ingenic_tcu_of_match[] = {
{ .compatible = "ingenic,jz4740-tcu", .data = &jz4740_soc_info, },
{ .compatible = "ingenic,jz4725b-tcu", .data = &jz4725b_soc_info, },
{ .compatible = "ingenic,jz4760-tcu", .data = &jz4740_soc_info, },
{ .compatible = "ingenic,jz4770-tcu", .data = &jz4740_soc_info, },
{ .compatible = "ingenic,x1000-tcu", .data = &jz4740_soc_info, },
{ /* sentinel */ }
};
static int __init ingenic_tcu_init(struct device_node *np)
{
const struct of_device_id *id = of_match_node(ingenic_tcu_of_match, np);
const struct ingenic_soc_info *soc_info = id->data;
struct ingenic_tcu_timer *timer;
struct ingenic_tcu *tcu;
struct regmap *map;
unsigned int cpu;
int ret, last_bit = -1;
long rate;
of_node_clear_flag(np, OF_POPULATED);
map = device_node_to_regmap(np);
if (IS_ERR(map))
return PTR_ERR(map);
tcu = kzalloc(struct_size(tcu, timers, num_possible_cpus()),
GFP_KERNEL);
if (!tcu)
return -ENOMEM;
/*
* Enable all TCU channels for PWM use by default except channels 0/1,
* and channel 2 if target CPU is JZ4780/X2000 and SMP is selected.
*/
tcu->pwm_channels_mask = GENMASK(soc_info->num_channels - 1,
num_possible_cpus() + 1);
of_property_read_u32(np, "ingenic,pwm-channels-mask",
(u32 *)&tcu->pwm_channels_mask);
/* Verify that we have at least num_possible_cpus() + 1 free channels */
if (hweight8(tcu->pwm_channels_mask) >
soc_info->num_channels - num_possible_cpus() + 1) {
pr_crit("%s: Invalid PWM channel mask: 0x%02lx\n", __func__,
tcu->pwm_channels_mask);
ret = -EINVAL;
goto err_free_ingenic_tcu;
}
tcu->map = map;
tcu->np = np;
ingenic_tcu = tcu;
for (cpu = 0; cpu < num_possible_cpus(); cpu++) {
timer = &tcu->timers[cpu];
timer->cpu = cpu;
timer->channel = find_next_zero_bit(&tcu->pwm_channels_mask,
soc_info->num_channels,
last_bit + 1);
last_bit = timer->channel;
}
tcu->cs_channel = find_next_zero_bit(&tcu->pwm_channels_mask,
soc_info->num_channels,
last_bit + 1);
ret = ingenic_tcu_clocksource_init(np, tcu);
if (ret) {
pr_crit("%s: Unable to init clocksource: %d\n", __func__, ret);
goto err_free_ingenic_tcu;
}
/* Setup clock events on each CPU core */
ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "Ingenic XBurst: online",
ingenic_tcu_setup_cevt, NULL);
if (ret < 0) {
pr_crit("%s: Unable to start CPU timers: %d\n", __func__, ret);
goto err_tcu_clocksource_cleanup;
}
/* Register the sched_clock at the end as there's no way to undo it */
rate = clk_get_rate(tcu->cs_clk);
sched_clock_register(ingenic_tcu_timer_read, 16, rate);
return 0;
err_tcu_clocksource_cleanup:
clocksource_unregister(&tcu->cs);
clk_disable_unprepare(tcu->cs_clk);
clk_put(tcu->cs_clk);
err_free_ingenic_tcu:
kfree(tcu);
return ret;
}
TIMER_OF_DECLARE(jz4740_tcu_intc, "ingenic,jz4740-tcu", ingenic_tcu_init);
TIMER_OF_DECLARE(jz4725b_tcu_intc, "ingenic,jz4725b-tcu", ingenic_tcu_init);
TIMER_OF_DECLARE(jz4760_tcu_intc, "ingenic,jz4760-tcu", ingenic_tcu_init);
TIMER_OF_DECLARE(jz4770_tcu_intc, "ingenic,jz4770-tcu", ingenic_tcu_init);
TIMER_OF_DECLARE(x1000_tcu_intc, "ingenic,x1000-tcu", ingenic_tcu_init);
static int __init ingenic_tcu_probe(struct platform_device *pdev)
{
platform_set_drvdata(pdev, ingenic_tcu);
return 0;
}
static int ingenic_tcu_suspend(struct device *dev)
{
struct ingenic_tcu *tcu = dev_get_drvdata(dev);
unsigned int cpu;
clk_disable(tcu->cs_clk);
for (cpu = 0; cpu < num_online_cpus(); cpu++)
clk_disable(tcu->timers[cpu].clk);
return 0;
}
static int ingenic_tcu_resume(struct device *dev)
{
struct ingenic_tcu *tcu = dev_get_drvdata(dev);
unsigned int cpu;
int ret;
for (cpu = 0; cpu < num_online_cpus(); cpu++) {
ret = clk_enable(tcu->timers[cpu].clk);
if (ret)
goto err_timer_clk_disable;
}
ret = clk_enable(tcu->cs_clk);
if (ret)
goto err_timer_clk_disable;
return 0;
err_timer_clk_disable:
for (; cpu > 0; cpu--)
clk_disable(tcu->timers[cpu - 1].clk);
return ret;
}
static const struct dev_pm_ops ingenic_tcu_pm_ops = {
/* _noirq: We want the TCU clocks to be gated last / ungated first */
.suspend_noirq = ingenic_tcu_suspend,
.resume_noirq = ingenic_tcu_resume,
};
static struct platform_driver ingenic_tcu_driver = {
.driver = {
.name = "ingenic-tcu-timer",
.pm = pm_sleep_ptr(&ingenic_tcu_pm_ops),
.of_match_table = ingenic_tcu_of_match,
},
};
builtin_platform_driver_probe(ingenic_tcu_driver, ingenic_tcu_probe);
|
linux-master
|
drivers/clocksource/ingenic-timer.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* at91sam926x_time.c - Periodic Interval Timer (PIT) for at91sam926x
*
* Copyright (C) 2005-2006 M. Amine SAYA, ATMEL Rousset, France
* Revision 2005 M. Nicolas Diremdjian, ATMEL Rousset, France
* Converted to ClockSource/ClockEvents by David Brownell.
*/
#define pr_fmt(fmt) "AT91: PIT: " fmt
#include <linux/clk.h>
#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/slab.h>
#define AT91_PIT_MR 0x00 /* Mode Register */
#define AT91_PIT_PITIEN BIT(25) /* Timer Interrupt Enable */
#define AT91_PIT_PITEN BIT(24) /* Timer Enabled */
#define AT91_PIT_PIV GENMASK(19, 0) /* Periodic Interval Value */
#define AT91_PIT_SR 0x04 /* Status Register */
#define AT91_PIT_PITS BIT(0) /* Timer Status */
#define AT91_PIT_PIVR 0x08 /* Periodic Interval Value Register */
#define AT91_PIT_PIIR 0x0c /* Periodic Interval Image Register */
#define AT91_PIT_PICNT GENMASK(31, 20) /* Interval Counter */
#define AT91_PIT_CPIV GENMASK(19, 0) /* Inverval Value */
#define PIT_CPIV(x) ((x) & AT91_PIT_CPIV)
#define PIT_PICNT(x) (((x) & AT91_PIT_PICNT) >> 20)
struct pit_data {
struct clock_event_device clkevt;
struct clocksource clksrc;
void __iomem *base;
u32 cycle;
u32 cnt;
unsigned int irq;
struct clk *mck;
};
static inline struct pit_data *clksrc_to_pit_data(struct clocksource *clksrc)
{
return container_of(clksrc, struct pit_data, clksrc);
}
static inline struct pit_data *clkevt_to_pit_data(struct clock_event_device *clkevt)
{
return container_of(clkevt, struct pit_data, clkevt);
}
static inline unsigned int pit_read(void __iomem *base, unsigned int reg_offset)
{
return readl_relaxed(base + reg_offset);
}
static inline void pit_write(void __iomem *base, unsigned int reg_offset, unsigned long value)
{
writel_relaxed(value, base + reg_offset);
}
/*
* Clocksource: just a monotonic counter of MCK/16 cycles.
* We don't care whether or not PIT irqs are enabled.
*/
static u64 read_pit_clk(struct clocksource *cs)
{
struct pit_data *data = clksrc_to_pit_data(cs);
unsigned long flags;
u32 elapsed;
u32 t;
raw_local_irq_save(flags);
elapsed = data->cnt;
t = pit_read(data->base, AT91_PIT_PIIR);
raw_local_irq_restore(flags);
elapsed += PIT_PICNT(t) * data->cycle;
elapsed += PIT_CPIV(t);
return elapsed;
}
static int pit_clkevt_shutdown(struct clock_event_device *dev)
{
struct pit_data *data = clkevt_to_pit_data(dev);
/* disable irq, leaving the clocksource active */
pit_write(data->base, AT91_PIT_MR, (data->cycle - 1) | AT91_PIT_PITEN);
return 0;
}
/*
* Clockevent device: interrupts every 1/HZ (== pit_cycles * MCK/16)
*/
static int pit_clkevt_set_periodic(struct clock_event_device *dev)
{
struct pit_data *data = clkevt_to_pit_data(dev);
/* update clocksource counter */
data->cnt += data->cycle * PIT_PICNT(pit_read(data->base, AT91_PIT_PIVR));
pit_write(data->base, AT91_PIT_MR,
(data->cycle - 1) | AT91_PIT_PITEN | AT91_PIT_PITIEN);
return 0;
}
static void at91sam926x_pit_suspend(struct clock_event_device *cedev)
{
struct pit_data *data = clkevt_to_pit_data(cedev);
/* Disable timer */
pit_write(data->base, AT91_PIT_MR, 0);
}
static void at91sam926x_pit_reset(struct pit_data *data)
{
/* Disable timer and irqs */
pit_write(data->base, AT91_PIT_MR, 0);
/* Clear any pending interrupts, wait for PIT to stop counting */
while (PIT_CPIV(pit_read(data->base, AT91_PIT_PIVR)) != 0)
cpu_relax();
/* Start PIT but don't enable IRQ */
pit_write(data->base, AT91_PIT_MR,
(data->cycle - 1) | AT91_PIT_PITEN);
}
static void at91sam926x_pit_resume(struct clock_event_device *cedev)
{
struct pit_data *data = clkevt_to_pit_data(cedev);
at91sam926x_pit_reset(data);
}
/*
* IRQ handler for the timer.
*/
static irqreturn_t at91sam926x_pit_interrupt(int irq, void *dev_id)
{
struct pit_data *data = dev_id;
/* The PIT interrupt may be disabled, and is shared */
if (clockevent_state_periodic(&data->clkevt) &&
(pit_read(data->base, AT91_PIT_SR) & AT91_PIT_PITS)) {
/* Get number of ticks performed before irq, and ack it */
data->cnt += data->cycle * PIT_PICNT(pit_read(data->base,
AT91_PIT_PIVR));
data->clkevt.event_handler(&data->clkevt);
return IRQ_HANDLED;
}
return IRQ_NONE;
}
/*
* Set up both clocksource and clockevent support.
*/
static int __init at91sam926x_pit_dt_init(struct device_node *node)
{
unsigned long pit_rate;
unsigned bits;
int ret;
struct pit_data *data;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
data->base = of_iomap(node, 0);
if (!data->base) {
pr_err("Could not map PIT address\n");
ret = -ENXIO;
goto exit;
}
data->mck = of_clk_get(node, 0);
if (IS_ERR(data->mck)) {
pr_err("Unable to get mck clk\n");
ret = PTR_ERR(data->mck);
goto exit;
}
ret = clk_prepare_enable(data->mck);
if (ret) {
pr_err("Unable to enable mck\n");
goto exit;
}
/* Get the interrupts property */
data->irq = irq_of_parse_and_map(node, 0);
if (!data->irq) {
pr_err("Unable to get IRQ from DT\n");
ret = -EINVAL;
goto exit;
}
/*
* Use our actual MCK to figure out how many MCK/16 ticks per
* 1/HZ period (instead of a compile-time constant LATCH).
*/
pit_rate = clk_get_rate(data->mck) / 16;
data->cycle = DIV_ROUND_CLOSEST(pit_rate, HZ);
WARN_ON(((data->cycle - 1) & ~AT91_PIT_PIV) != 0);
/* Initialize and enable the timer */
at91sam926x_pit_reset(data);
/*
* Register clocksource. The high order bits of PIV are unused,
* so this isn't a 32-bit counter unless we get clockevent irqs.
*/
bits = 12 /* PICNT */ + ilog2(data->cycle) /* PIV */;
data->clksrc.mask = CLOCKSOURCE_MASK(bits);
data->clksrc.name = "pit";
data->clksrc.rating = 175;
data->clksrc.read = read_pit_clk;
data->clksrc.flags = CLOCK_SOURCE_IS_CONTINUOUS;
ret = clocksource_register_hz(&data->clksrc, pit_rate);
if (ret) {
pr_err("Failed to register clocksource\n");
goto exit;
}
/* Set up irq handler */
ret = request_irq(data->irq, at91sam926x_pit_interrupt,
IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL,
"at91_tick", data);
if (ret) {
pr_err("Unable to setup IRQ\n");
clocksource_unregister(&data->clksrc);
goto exit;
}
/* Set up and register clockevents */
data->clkevt.name = "pit";
data->clkevt.features = CLOCK_EVT_FEAT_PERIODIC;
data->clkevt.shift = 32;
data->clkevt.mult = div_sc(pit_rate, NSEC_PER_SEC, data->clkevt.shift);
data->clkevt.rating = 100;
data->clkevt.cpumask = cpumask_of(0);
data->clkevt.set_state_shutdown = pit_clkevt_shutdown;
data->clkevt.set_state_periodic = pit_clkevt_set_periodic;
data->clkevt.resume = at91sam926x_pit_resume;
data->clkevt.suspend = at91sam926x_pit_suspend;
clockevents_register_device(&data->clkevt);
return 0;
exit:
kfree(data);
return ret;
}
TIMER_OF_DECLARE(at91sam926x_pit, "atmel,at91sam9260-pit",
at91sam926x_pit_dt_init);
|
linux-master
|
drivers/clocksource/timer-atmel-pit.c
|
// SPDX-License-Identifier: GPL-2.0+
/*
* linux/arch/arm/plat-omap/dmtimer.c
*
* OMAP Dual-Mode Timers
*
* Copyright (C) 2010 Texas Instruments Incorporated - https://www.ti.com/
* Tarun Kanti DebBarma <[email protected]>
* Thara Gopinath <[email protected]>
*
* dmtimer adaptation to platform_driver.
*
* Copyright (C) 2005 Nokia Corporation
* OMAP2 support by Juha Yrjola
* API improvements and OMAP2 clock framework support by Timo Teras
*
* Copyright (C) 2009 Texas Instruments
* Added OMAP4 support - Santosh Shilimkar <[email protected]>
*/
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/cpu_pm.h>
#include <linux/module.h>
#include <linux/io.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/pm_runtime.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/platform_data/dmtimer-omap.h>
#include <clocksource/timer-ti-dm.h>
/*
* timer errata flags
*
* Errata i103/i767 impacts all OMAP3/4/5 devices including AM33xx. This
* errata prevents us from using posted mode on these devices, unless the
* timer counter register is never read. For more details please refer to
* the OMAP3/4/5 errata documents.
*/
#define OMAP_TIMER_ERRATA_I103_I767 0x80000000
/* posted mode types */
#define OMAP_TIMER_NONPOSTED 0x00
#define OMAP_TIMER_POSTED 0x01
/* register offsets with the write pending bit encoded */
#define WPSHIFT 16
#define OMAP_TIMER_WAKEUP_EN_REG (_OMAP_TIMER_WAKEUP_EN_OFFSET \
| (WP_NONE << WPSHIFT))
#define OMAP_TIMER_CTRL_REG (_OMAP_TIMER_CTRL_OFFSET \
| (WP_TCLR << WPSHIFT))
#define OMAP_TIMER_COUNTER_REG (_OMAP_TIMER_COUNTER_OFFSET \
| (WP_TCRR << WPSHIFT))
#define OMAP_TIMER_LOAD_REG (_OMAP_TIMER_LOAD_OFFSET \
| (WP_TLDR << WPSHIFT))
#define OMAP_TIMER_TRIGGER_REG (_OMAP_TIMER_TRIGGER_OFFSET \
| (WP_TTGR << WPSHIFT))
#define OMAP_TIMER_WRITE_PEND_REG (_OMAP_TIMER_WRITE_PEND_OFFSET \
| (WP_NONE << WPSHIFT))
#define OMAP_TIMER_MATCH_REG (_OMAP_TIMER_MATCH_OFFSET \
| (WP_TMAR << WPSHIFT))
#define OMAP_TIMER_CAPTURE_REG (_OMAP_TIMER_CAPTURE_OFFSET \
| (WP_NONE << WPSHIFT))
#define OMAP_TIMER_IF_CTRL_REG (_OMAP_TIMER_IF_CTRL_OFFSET \
| (WP_NONE << WPSHIFT))
#define OMAP_TIMER_CAPTURE2_REG (_OMAP_TIMER_CAPTURE2_OFFSET \
| (WP_NONE << WPSHIFT))
#define OMAP_TIMER_TICK_POS_REG (_OMAP_TIMER_TICK_POS_OFFSET \
| (WP_TPIR << WPSHIFT))
#define OMAP_TIMER_TICK_NEG_REG (_OMAP_TIMER_TICK_NEG_OFFSET \
| (WP_TNIR << WPSHIFT))
#define OMAP_TIMER_TICK_COUNT_REG (_OMAP_TIMER_TICK_COUNT_OFFSET \
| (WP_TCVR << WPSHIFT))
#define OMAP_TIMER_TICK_INT_MASK_SET_REG \
(_OMAP_TIMER_TICK_INT_MASK_SET_OFFSET | (WP_TOCR << WPSHIFT))
#define OMAP_TIMER_TICK_INT_MASK_COUNT_REG \
(_OMAP_TIMER_TICK_INT_MASK_COUNT_OFFSET | (WP_TOWR << WPSHIFT))
struct timer_regs {
u32 ocp_cfg;
u32 tidr;
u32 tier;
u32 twer;
u32 tclr;
u32 tcrr;
u32 tldr;
u32 ttrg;
u32 twps;
u32 tmar;
u32 tcar1;
u32 tsicr;
u32 tcar2;
u32 tpir;
u32 tnir;
u32 tcvr;
u32 tocr;
u32 towr;
};
struct dmtimer {
struct omap_dm_timer cookie;
int id;
int irq;
struct clk *fclk;
void __iomem *io_base;
int irq_stat; /* TISR/IRQSTATUS interrupt status */
int irq_ena; /* irq enable */
int irq_dis; /* irq disable, only on v2 ip */
void __iomem *pend; /* write pending */
void __iomem *func_base; /* function register base */
atomic_t enabled;
unsigned long rate;
unsigned reserved:1;
unsigned posted:1;
unsigned omap1:1;
struct timer_regs context;
int revision;
u32 capability;
u32 errata;
struct platform_device *pdev;
struct list_head node;
struct notifier_block nb;
};
static u32 omap_reserved_systimers;
static LIST_HEAD(omap_timer_list);
static DEFINE_SPINLOCK(dm_timer_lock);
enum {
REQUEST_ANY = 0,
REQUEST_BY_ID,
REQUEST_BY_CAP,
REQUEST_BY_NODE,
};
/**
* dmtimer_read - read timer registers in posted and non-posted mode
* @timer: timer pointer over which read operation to perform
* @reg: lowest byte holds the register offset
*
* The posted mode bit is encoded in reg. Note that in posted mode, write
* pending bit must be checked. Otherwise a read of a non completed write
* will produce an error.
*/
static inline u32 dmtimer_read(struct dmtimer *timer, u32 reg)
{
u16 wp, offset;
wp = reg >> WPSHIFT;
offset = reg & 0xff;
/* Wait for a possible write pending bit in posted mode */
if (wp && timer->posted)
while (readl_relaxed(timer->pend) & wp)
cpu_relax();
return readl_relaxed(timer->func_base + offset);
}
/**
* dmtimer_write - write timer registers in posted and non-posted mode
* @timer: timer pointer over which write operation is to perform
* @reg: lowest byte holds the register offset
* @value: data to write into the register
*
* The posted mode bit is encoded in reg. Note that in posted mode, the write
* pending bit must be checked. Otherwise a write on a register which has a
* pending write will be lost.
*/
static inline void dmtimer_write(struct dmtimer *timer, u32 reg, u32 val)
{
u16 wp, offset;
wp = reg >> WPSHIFT;
offset = reg & 0xff;
/* Wait for a possible write pending bit in posted mode */
if (wp && timer->posted)
while (readl_relaxed(timer->pend) & wp)
cpu_relax();
writel_relaxed(val, timer->func_base + offset);
}
static inline void __omap_dm_timer_init_regs(struct dmtimer *timer)
{
u32 tidr;
/* Assume v1 ip if bits [31:16] are zero */
tidr = readl_relaxed(timer->io_base);
if (!(tidr >> 16)) {
timer->revision = 1;
timer->irq_stat = OMAP_TIMER_V1_STAT_OFFSET;
timer->irq_ena = OMAP_TIMER_V1_INT_EN_OFFSET;
timer->irq_dis = OMAP_TIMER_V1_INT_EN_OFFSET;
timer->pend = timer->io_base + _OMAP_TIMER_WRITE_PEND_OFFSET;
timer->func_base = timer->io_base;
} else {
timer->revision = 2;
timer->irq_stat = OMAP_TIMER_V2_IRQSTATUS - OMAP_TIMER_V2_FUNC_OFFSET;
timer->irq_ena = OMAP_TIMER_V2_IRQENABLE_SET - OMAP_TIMER_V2_FUNC_OFFSET;
timer->irq_dis = OMAP_TIMER_V2_IRQENABLE_CLR - OMAP_TIMER_V2_FUNC_OFFSET;
timer->pend = timer->io_base +
_OMAP_TIMER_WRITE_PEND_OFFSET +
OMAP_TIMER_V2_FUNC_OFFSET;
timer->func_base = timer->io_base + OMAP_TIMER_V2_FUNC_OFFSET;
}
}
/*
* __omap_dm_timer_enable_posted - enables write posted mode
* @timer: pointer to timer instance handle
*
* Enables the write posted mode for the timer. When posted mode is enabled
* writes to certain timer registers are immediately acknowledged by the
* internal bus and hence prevents stalling the CPU waiting for the write to
* complete. Enabling this feature can improve performance for writing to the
* timer registers.
*/
static inline void __omap_dm_timer_enable_posted(struct dmtimer *timer)
{
if (timer->posted)
return;
if (timer->errata & OMAP_TIMER_ERRATA_I103_I767) {
timer->posted = OMAP_TIMER_NONPOSTED;
dmtimer_write(timer, OMAP_TIMER_IF_CTRL_REG, 0);
return;
}
dmtimer_write(timer, OMAP_TIMER_IF_CTRL_REG, OMAP_TIMER_CTRL_POSTED);
timer->context.tsicr = OMAP_TIMER_CTRL_POSTED;
timer->posted = OMAP_TIMER_POSTED;
}
static inline void __omap_dm_timer_stop(struct dmtimer *timer,
unsigned long rate)
{
u32 l;
l = dmtimer_read(timer, OMAP_TIMER_CTRL_REG);
if (l & OMAP_TIMER_CTRL_ST) {
l &= ~0x1;
dmtimer_write(timer, OMAP_TIMER_CTRL_REG, l);
#ifdef CONFIG_ARCH_OMAP2PLUS
/* Readback to make sure write has completed */
dmtimer_read(timer, OMAP_TIMER_CTRL_REG);
/*
* Wait for functional clock period x 3.5 to make sure that
* timer is stopped
*/
udelay(3500000 / rate + 1);
#endif
}
/* Ack possibly pending interrupt */
dmtimer_write(timer, timer->irq_stat, OMAP_TIMER_INT_OVERFLOW);
}
static inline void __omap_dm_timer_int_enable(struct dmtimer *timer,
unsigned int value)
{
dmtimer_write(timer, timer->irq_ena, value);
dmtimer_write(timer, OMAP_TIMER_WAKEUP_EN_REG, value);
}
static inline unsigned int
__omap_dm_timer_read_counter(struct dmtimer *timer)
{
return dmtimer_read(timer, OMAP_TIMER_COUNTER_REG);
}
static inline void __omap_dm_timer_write_status(struct dmtimer *timer,
unsigned int value)
{
dmtimer_write(timer, timer->irq_stat, value);
}
static void omap_timer_restore_context(struct dmtimer *timer)
{
dmtimer_write(timer, OMAP_TIMER_OCP_CFG_OFFSET, timer->context.ocp_cfg);
dmtimer_write(timer, OMAP_TIMER_WAKEUP_EN_REG, timer->context.twer);
dmtimer_write(timer, OMAP_TIMER_COUNTER_REG, timer->context.tcrr);
dmtimer_write(timer, OMAP_TIMER_LOAD_REG, timer->context.tldr);
dmtimer_write(timer, OMAP_TIMER_MATCH_REG, timer->context.tmar);
dmtimer_write(timer, OMAP_TIMER_IF_CTRL_REG, timer->context.tsicr);
dmtimer_write(timer, timer->irq_ena, timer->context.tier);
dmtimer_write(timer, OMAP_TIMER_CTRL_REG, timer->context.tclr);
}
static void omap_timer_save_context(struct dmtimer *timer)
{
timer->context.ocp_cfg = dmtimer_read(timer, OMAP_TIMER_OCP_CFG_OFFSET);
timer->context.tclr = dmtimer_read(timer, OMAP_TIMER_CTRL_REG);
timer->context.twer = dmtimer_read(timer, OMAP_TIMER_WAKEUP_EN_REG);
timer->context.tldr = dmtimer_read(timer, OMAP_TIMER_LOAD_REG);
timer->context.tmar = dmtimer_read(timer, OMAP_TIMER_MATCH_REG);
timer->context.tier = dmtimer_read(timer, timer->irq_ena);
timer->context.tsicr = dmtimer_read(timer, OMAP_TIMER_IF_CTRL_REG);
}
static int omap_timer_context_notifier(struct notifier_block *nb,
unsigned long cmd, void *v)
{
struct dmtimer *timer;
timer = container_of(nb, struct dmtimer, nb);
switch (cmd) {
case CPU_CLUSTER_PM_ENTER:
if ((timer->capability & OMAP_TIMER_ALWON) ||
!atomic_read(&timer->enabled))
break;
omap_timer_save_context(timer);
break;
case CPU_CLUSTER_PM_ENTER_FAILED: /* No need to restore context */
break;
case CPU_CLUSTER_PM_EXIT:
if ((timer->capability & OMAP_TIMER_ALWON) ||
!atomic_read(&timer->enabled))
break;
omap_timer_restore_context(timer);
break;
}
return NOTIFY_OK;
}
static int omap_dm_timer_reset(struct dmtimer *timer)
{
u32 l, timeout = 100000;
if (timer->revision != 1)
return -EINVAL;
dmtimer_write(timer, OMAP_TIMER_IF_CTRL_REG, 0x06);
do {
l = dmtimer_read(timer, OMAP_TIMER_V1_SYS_STAT_OFFSET);
} while (!l && timeout--);
if (!timeout) {
dev_err(&timer->pdev->dev, "Timer failed to reset\n");
return -ETIMEDOUT;
}
/* Configure timer for smart-idle mode */
l = dmtimer_read(timer, OMAP_TIMER_OCP_CFG_OFFSET);
l |= 0x2 << 0x3;
dmtimer_write(timer, OMAP_TIMER_OCP_CFG_OFFSET, l);
timer->posted = 0;
return 0;
}
/*
* Functions exposed to PWM and remoteproc drivers via platform_data.
* Do not use these in the driver, these will get deprecated and will
* will be replaced by Linux generic framework functions such as
* chained interrupts and clock framework.
*/
static struct dmtimer *to_dmtimer(struct omap_dm_timer *cookie)
{
if (!cookie)
return NULL;
return container_of(cookie, struct dmtimer, cookie);
}
static int omap_dm_timer_set_source(struct omap_dm_timer *cookie, int source)
{
int ret;
const char *parent_name;
struct clk *parent;
struct dmtimer_platform_data *pdata;
struct dmtimer *timer;
timer = to_dmtimer(cookie);
if (unlikely(!timer) || IS_ERR(timer->fclk))
return -EINVAL;
switch (source) {
case OMAP_TIMER_SRC_SYS_CLK:
parent_name = "timer_sys_ck";
break;
case OMAP_TIMER_SRC_32_KHZ:
parent_name = "timer_32k_ck";
break;
case OMAP_TIMER_SRC_EXT_CLK:
parent_name = "timer_ext_ck";
break;
default:
return -EINVAL;
}
pdata = timer->pdev->dev.platform_data;
/*
* FIXME: Used for OMAP1 devices only because they do not currently
* use the clock framework to set the parent clock. To be removed
* once OMAP1 migrated to using clock framework for dmtimers
*/
if (timer->omap1 && pdata && pdata->set_timer_src)
return pdata->set_timer_src(timer->pdev, source);
#if defined(CONFIG_COMMON_CLK)
/* Check if the clock has configurable parents */
if (clk_hw_get_num_parents(__clk_get_hw(timer->fclk)) < 2)
return 0;
#endif
parent = clk_get(&timer->pdev->dev, parent_name);
if (IS_ERR(parent)) {
pr_err("%s: %s not found\n", __func__, parent_name);
return -EINVAL;
}
ret = clk_set_parent(timer->fclk, parent);
if (ret < 0)
pr_err("%s: failed to set %s as parent\n", __func__,
parent_name);
clk_put(parent);
return ret;
}
static void omap_dm_timer_enable(struct omap_dm_timer *cookie)
{
struct dmtimer *timer = to_dmtimer(cookie);
struct device *dev = &timer->pdev->dev;
int rc;
rc = pm_runtime_resume_and_get(dev);
if (rc)
dev_err(dev, "could not enable timer\n");
}
static void omap_dm_timer_disable(struct omap_dm_timer *cookie)
{
struct dmtimer *timer = to_dmtimer(cookie);
struct device *dev = &timer->pdev->dev;
pm_runtime_put_sync(dev);
}
static int omap_dm_timer_prepare(struct dmtimer *timer)
{
struct device *dev = &timer->pdev->dev;
int rc;
rc = pm_runtime_resume_and_get(dev);
if (rc)
return rc;
if (timer->capability & OMAP_TIMER_NEEDS_RESET) {
rc = omap_dm_timer_reset(timer);
if (rc) {
pm_runtime_put_sync(dev);
return rc;
}
}
__omap_dm_timer_enable_posted(timer);
pm_runtime_put_sync(dev);
return 0;
}
static inline u32 omap_dm_timer_reserved_systimer(int id)
{
return (omap_reserved_systimers & (1 << (id - 1))) ? 1 : 0;
}
static struct dmtimer *_omap_dm_timer_request(int req_type, void *data)
{
struct dmtimer *timer = NULL, *t;
struct device_node *np = NULL;
unsigned long flags;
u32 cap = 0;
int id = 0;
switch (req_type) {
case REQUEST_BY_ID:
id = *(int *)data;
break;
case REQUEST_BY_CAP:
cap = *(u32 *)data;
break;
case REQUEST_BY_NODE:
np = (struct device_node *)data;
break;
default:
/* REQUEST_ANY */
break;
}
spin_lock_irqsave(&dm_timer_lock, flags);
list_for_each_entry(t, &omap_timer_list, node) {
if (t->reserved)
continue;
switch (req_type) {
case REQUEST_BY_ID:
if (id == t->pdev->id) {
timer = t;
timer->reserved = 1;
goto found;
}
break;
case REQUEST_BY_CAP:
if (cap == (t->capability & cap)) {
/*
* If timer is not NULL, we have already found
* one timer. But it was not an exact match
* because it had more capabilities than what
* was required. Therefore, unreserve the last
* timer found and see if this one is a better
* match.
*/
if (timer)
timer->reserved = 0;
timer = t;
timer->reserved = 1;
/* Exit loop early if we find an exact match */
if (t->capability == cap)
goto found;
}
break;
case REQUEST_BY_NODE:
if (np == t->pdev->dev.of_node) {
timer = t;
timer->reserved = 1;
goto found;
}
break;
default:
/* REQUEST_ANY */
timer = t;
timer->reserved = 1;
goto found;
}
}
found:
spin_unlock_irqrestore(&dm_timer_lock, flags);
if (timer && omap_dm_timer_prepare(timer)) {
timer->reserved = 0;
timer = NULL;
}
if (!timer)
pr_debug("%s: timer request failed!\n", __func__);
return timer;
}
static struct omap_dm_timer *omap_dm_timer_request(void)
{
struct dmtimer *timer;
timer = _omap_dm_timer_request(REQUEST_ANY, NULL);
if (!timer)
return NULL;
return &timer->cookie;
}
static struct omap_dm_timer *omap_dm_timer_request_specific(int id)
{
struct dmtimer *timer;
/* Requesting timer by ID is not supported when device tree is used */
if (of_have_populated_dt()) {
pr_warn("%s: Please use omap_dm_timer_request_by_node()\n",
__func__);
return NULL;
}
timer = _omap_dm_timer_request(REQUEST_BY_ID, &id);
if (!timer)
return NULL;
return &timer->cookie;
}
/**
* omap_dm_timer_request_by_node - Request a timer by device-tree node
* @np: Pointer to device-tree timer node
*
* Request a timer based upon a device node pointer. Returns pointer to
* timer handle on success and a NULL pointer on failure.
*/
static struct omap_dm_timer *omap_dm_timer_request_by_node(struct device_node *np)
{
struct dmtimer *timer;
if (!np)
return NULL;
timer = _omap_dm_timer_request(REQUEST_BY_NODE, np);
if (!timer)
return NULL;
return &timer->cookie;
}
static int omap_dm_timer_free(struct omap_dm_timer *cookie)
{
struct dmtimer *timer;
struct device *dev;
int rc;
timer = to_dmtimer(cookie);
if (unlikely(!timer))
return -EINVAL;
WARN_ON(!timer->reserved);
timer->reserved = 0;
dev = &timer->pdev->dev;
rc = pm_runtime_resume_and_get(dev);
if (rc)
return rc;
/* Clear timer configuration */
dmtimer_write(timer, OMAP_TIMER_CTRL_REG, 0);
pm_runtime_put_sync(dev);
return 0;
}
static int omap_dm_timer_get_irq(struct omap_dm_timer *cookie)
{
struct dmtimer *timer = to_dmtimer(cookie);
if (timer)
return timer->irq;
return -EINVAL;
}
#if defined(CONFIG_ARCH_OMAP1)
#include <linux/soc/ti/omap1-io.h>
static struct clk *omap_dm_timer_get_fclk(struct omap_dm_timer *cookie)
{
return NULL;
}
/**
* omap_dm_timer_modify_idlect_mask - Check if any running timers use ARMXOR
* @inputmask: current value of idlect mask
*/
__u32 omap_dm_timer_modify_idlect_mask(__u32 inputmask)
{
int i = 0;
struct dmtimer *timer = NULL;
unsigned long flags;
/* If ARMXOR cannot be idled this function call is unnecessary */
if (!(inputmask & (1 << 1)))
return inputmask;
/* If any active timer is using ARMXOR return modified mask */
spin_lock_irqsave(&dm_timer_lock, flags);
list_for_each_entry(timer, &omap_timer_list, node) {
u32 l;
l = dmtimer_read(timer, OMAP_TIMER_CTRL_REG);
if (l & OMAP_TIMER_CTRL_ST) {
if (((omap_readl(MOD_CONF_CTRL_1) >> (i * 2)) & 0x03) == 0)
inputmask &= ~(1 << 1);
else
inputmask &= ~(1 << 2);
}
i++;
}
spin_unlock_irqrestore(&dm_timer_lock, flags);
return inputmask;
}
#else
static struct clk *omap_dm_timer_get_fclk(struct omap_dm_timer *cookie)
{
struct dmtimer *timer = to_dmtimer(cookie);
if (timer && !IS_ERR(timer->fclk))
return timer->fclk;
return NULL;
}
__u32 omap_dm_timer_modify_idlect_mask(__u32 inputmask)
{
BUG();
return 0;
}
#endif
static int omap_dm_timer_start(struct omap_dm_timer *cookie)
{
struct dmtimer *timer;
struct device *dev;
int rc;
u32 l;
timer = to_dmtimer(cookie);
if (unlikely(!timer))
return -EINVAL;
dev = &timer->pdev->dev;
rc = pm_runtime_resume_and_get(dev);
if (rc)
return rc;
l = dmtimer_read(timer, OMAP_TIMER_CTRL_REG);
if (!(l & OMAP_TIMER_CTRL_ST)) {
l |= OMAP_TIMER_CTRL_ST;
dmtimer_write(timer, OMAP_TIMER_CTRL_REG, l);
}
return 0;
}
static int omap_dm_timer_stop(struct omap_dm_timer *cookie)
{
struct dmtimer *timer;
struct device *dev;
unsigned long rate = 0;
timer = to_dmtimer(cookie);
if (unlikely(!timer))
return -EINVAL;
dev = &timer->pdev->dev;
if (!timer->omap1)
rate = clk_get_rate(timer->fclk);
__omap_dm_timer_stop(timer, rate);
pm_runtime_put_sync(dev);
return 0;
}
static int omap_dm_timer_set_load(struct omap_dm_timer *cookie,
unsigned int load)
{
struct dmtimer *timer;
struct device *dev;
int rc;
timer = to_dmtimer(cookie);
if (unlikely(!timer))
return -EINVAL;
dev = &timer->pdev->dev;
rc = pm_runtime_resume_and_get(dev);
if (rc)
return rc;
dmtimer_write(timer, OMAP_TIMER_LOAD_REG, load);
pm_runtime_put_sync(dev);
return 0;
}
static int omap_dm_timer_set_match(struct omap_dm_timer *cookie, int enable,
unsigned int match)
{
struct dmtimer *timer;
struct device *dev;
int rc;
u32 l;
timer = to_dmtimer(cookie);
if (unlikely(!timer))
return -EINVAL;
dev = &timer->pdev->dev;
rc = pm_runtime_resume_and_get(dev);
if (rc)
return rc;
l = dmtimer_read(timer, OMAP_TIMER_CTRL_REG);
if (enable)
l |= OMAP_TIMER_CTRL_CE;
else
l &= ~OMAP_TIMER_CTRL_CE;
dmtimer_write(timer, OMAP_TIMER_MATCH_REG, match);
dmtimer_write(timer, OMAP_TIMER_CTRL_REG, l);
pm_runtime_put_sync(dev);
return 0;
}
static int omap_dm_timer_set_pwm(struct omap_dm_timer *cookie, int def_on,
int toggle, int trigger, int autoreload)
{
struct dmtimer *timer;
struct device *dev;
int rc;
u32 l;
timer = to_dmtimer(cookie);
if (unlikely(!timer))
return -EINVAL;
dev = &timer->pdev->dev;
rc = pm_runtime_resume_and_get(dev);
if (rc)
return rc;
l = dmtimer_read(timer, OMAP_TIMER_CTRL_REG);
l &= ~(OMAP_TIMER_CTRL_GPOCFG | OMAP_TIMER_CTRL_SCPWM |
OMAP_TIMER_CTRL_PT | (0x03 << 10) | OMAP_TIMER_CTRL_AR);
if (def_on)
l |= OMAP_TIMER_CTRL_SCPWM;
if (toggle)
l |= OMAP_TIMER_CTRL_PT;
l |= trigger << 10;
if (autoreload)
l |= OMAP_TIMER_CTRL_AR;
dmtimer_write(timer, OMAP_TIMER_CTRL_REG, l);
pm_runtime_put_sync(dev);
return 0;
}
static int omap_dm_timer_get_pwm_status(struct omap_dm_timer *cookie)
{
struct dmtimer *timer;
struct device *dev;
int rc;
u32 l;
timer = to_dmtimer(cookie);
if (unlikely(!timer))
return -EINVAL;
dev = &timer->pdev->dev;
rc = pm_runtime_resume_and_get(dev);
if (rc)
return rc;
l = dmtimer_read(timer, OMAP_TIMER_CTRL_REG);
pm_runtime_put_sync(dev);
return l;
}
static int omap_dm_timer_set_prescaler(struct omap_dm_timer *cookie,
int prescaler)
{
struct dmtimer *timer;
struct device *dev;
int rc;
u32 l;
timer = to_dmtimer(cookie);
if (unlikely(!timer) || prescaler < -1 || prescaler > 7)
return -EINVAL;
dev = &timer->pdev->dev;
rc = pm_runtime_resume_and_get(dev);
if (rc)
return rc;
l = dmtimer_read(timer, OMAP_TIMER_CTRL_REG);
l &= ~(OMAP_TIMER_CTRL_PRE | (0x07 << 2));
if (prescaler >= 0) {
l |= OMAP_TIMER_CTRL_PRE;
l |= prescaler << 2;
}
dmtimer_write(timer, OMAP_TIMER_CTRL_REG, l);
pm_runtime_put_sync(dev);
return 0;
}
static int omap_dm_timer_set_int_enable(struct omap_dm_timer *cookie,
unsigned int value)
{
struct dmtimer *timer;
struct device *dev;
int rc;
timer = to_dmtimer(cookie);
if (unlikely(!timer))
return -EINVAL;
dev = &timer->pdev->dev;
rc = pm_runtime_resume_and_get(dev);
if (rc)
return rc;
__omap_dm_timer_int_enable(timer, value);
pm_runtime_put_sync(dev);
return 0;
}
/**
* omap_dm_timer_set_int_disable - disable timer interrupts
* @timer: pointer to timer handle
* @mask: bit mask of interrupts to be disabled
*
* Disables the specified timer interrupts for a timer.
*/
static int omap_dm_timer_set_int_disable(struct omap_dm_timer *cookie, u32 mask)
{
struct dmtimer *timer;
struct device *dev;
u32 l = mask;
int rc;
timer = to_dmtimer(cookie);
if (unlikely(!timer))
return -EINVAL;
dev = &timer->pdev->dev;
rc = pm_runtime_resume_and_get(dev);
if (rc)
return rc;
if (timer->revision == 1)
l = dmtimer_read(timer, timer->irq_ena) & ~mask;
dmtimer_write(timer, timer->irq_dis, l);
l = dmtimer_read(timer, OMAP_TIMER_WAKEUP_EN_REG) & ~mask;
dmtimer_write(timer, OMAP_TIMER_WAKEUP_EN_REG, l);
pm_runtime_put_sync(dev);
return 0;
}
static unsigned int omap_dm_timer_read_status(struct omap_dm_timer *cookie)
{
struct dmtimer *timer;
unsigned int l;
timer = to_dmtimer(cookie);
if (unlikely(!timer || !atomic_read(&timer->enabled))) {
pr_err("%s: timer not available or enabled.\n", __func__);
return 0;
}
l = dmtimer_read(timer, timer->irq_stat);
return l;
}
static int omap_dm_timer_write_status(struct omap_dm_timer *cookie, unsigned int value)
{
struct dmtimer *timer;
timer = to_dmtimer(cookie);
if (unlikely(!timer || !atomic_read(&timer->enabled)))
return -EINVAL;
__omap_dm_timer_write_status(timer, value);
return 0;
}
static unsigned int omap_dm_timer_read_counter(struct omap_dm_timer *cookie)
{
struct dmtimer *timer;
timer = to_dmtimer(cookie);
if (unlikely(!timer || !atomic_read(&timer->enabled))) {
pr_err("%s: timer not iavailable or enabled.\n", __func__);
return 0;
}
return __omap_dm_timer_read_counter(timer);
}
static int omap_dm_timer_write_counter(struct omap_dm_timer *cookie, unsigned int value)
{
struct dmtimer *timer;
timer = to_dmtimer(cookie);
if (unlikely(!timer || !atomic_read(&timer->enabled))) {
pr_err("%s: timer not available or enabled.\n", __func__);
return -EINVAL;
}
dmtimer_write(timer, OMAP_TIMER_COUNTER_REG, value);
/* Save the context */
timer->context.tcrr = value;
return 0;
}
static int __maybe_unused omap_dm_timer_runtime_suspend(struct device *dev)
{
struct dmtimer *timer = dev_get_drvdata(dev);
atomic_set(&timer->enabled, 0);
if (timer->capability & OMAP_TIMER_ALWON || !timer->func_base)
return 0;
omap_timer_save_context(timer);
return 0;
}
static int __maybe_unused omap_dm_timer_runtime_resume(struct device *dev)
{
struct dmtimer *timer = dev_get_drvdata(dev);
if (!(timer->capability & OMAP_TIMER_ALWON) && timer->func_base)
omap_timer_restore_context(timer);
atomic_set(&timer->enabled, 1);
return 0;
}
static const struct dev_pm_ops omap_dm_timer_pm_ops = {
SET_RUNTIME_PM_OPS(omap_dm_timer_runtime_suspend,
omap_dm_timer_runtime_resume, NULL)
};
static const struct of_device_id omap_timer_match[];
/**
* omap_dm_timer_probe - probe function called for every registered device
* @pdev: pointer to current timer platform device
*
* Called by driver framework at the end of device registration for all
* timer devices.
*/
static int omap_dm_timer_probe(struct platform_device *pdev)
{
unsigned long flags;
struct dmtimer *timer;
struct device *dev = &pdev->dev;
const struct dmtimer_platform_data *pdata;
int ret;
pdata = of_device_get_match_data(dev);
if (!pdata)
pdata = dev_get_platdata(dev);
else
dev->platform_data = (void *)pdata;
if (!pdata) {
dev_err(dev, "%s: no platform data.\n", __func__);
return -ENODEV;
}
timer = devm_kzalloc(dev, sizeof(*timer), GFP_KERNEL);
if (!timer)
return -ENOMEM;
timer->irq = platform_get_irq(pdev, 0);
if (timer->irq < 0)
return timer->irq;
timer->io_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(timer->io_base))
return PTR_ERR(timer->io_base);
platform_set_drvdata(pdev, timer);
if (dev->of_node) {
if (of_property_read_bool(dev->of_node, "ti,timer-alwon"))
timer->capability |= OMAP_TIMER_ALWON;
if (of_property_read_bool(dev->of_node, "ti,timer-dsp"))
timer->capability |= OMAP_TIMER_HAS_DSP_IRQ;
if (of_property_read_bool(dev->of_node, "ti,timer-pwm"))
timer->capability |= OMAP_TIMER_HAS_PWM;
if (of_property_read_bool(dev->of_node, "ti,timer-secure"))
timer->capability |= OMAP_TIMER_SECURE;
} else {
timer->id = pdev->id;
timer->capability = pdata->timer_capability;
timer->reserved = omap_dm_timer_reserved_systimer(timer->id);
}
timer->omap1 = timer->capability & OMAP_TIMER_NEEDS_RESET;
/* OMAP1 devices do not yet use the clock framework for dmtimers */
if (!timer->omap1) {
timer->fclk = devm_clk_get(dev, "fck");
if (IS_ERR(timer->fclk))
return PTR_ERR(timer->fclk);
} else {
timer->fclk = ERR_PTR(-ENODEV);
}
if (!(timer->capability & OMAP_TIMER_ALWON)) {
timer->nb.notifier_call = omap_timer_context_notifier;
cpu_pm_register_notifier(&timer->nb);
}
timer->errata = pdata->timer_errata;
timer->pdev = pdev;
pm_runtime_enable(dev);
if (!timer->reserved) {
ret = pm_runtime_resume_and_get(dev);
if (ret) {
dev_err(dev, "%s: pm_runtime_get_sync failed!\n",
__func__);
goto err_disable;
}
__omap_dm_timer_init_regs(timer);
/* Clear timer configuration */
dmtimer_write(timer, OMAP_TIMER_CTRL_REG, 0);
pm_runtime_put(dev);
}
/* add the timer element to the list */
spin_lock_irqsave(&dm_timer_lock, flags);
list_add_tail(&timer->node, &omap_timer_list);
spin_unlock_irqrestore(&dm_timer_lock, flags);
dev_dbg(dev, "Device Probed.\n");
return 0;
err_disable:
pm_runtime_disable(dev);
return ret;
}
/**
* omap_dm_timer_remove - cleanup a registered timer device
* @pdev: pointer to current timer platform device
*
* Called by driver framework whenever a timer device is unregistered.
* In addition to freeing platform resources it also deletes the timer
* entry from the local list.
*/
static void omap_dm_timer_remove(struct platform_device *pdev)
{
struct dmtimer *timer;
unsigned long flags;
int ret = -EINVAL;
spin_lock_irqsave(&dm_timer_lock, flags);
list_for_each_entry(timer, &omap_timer_list, node)
if (!strcmp(dev_name(&timer->pdev->dev),
dev_name(&pdev->dev))) {
if (!(timer->capability & OMAP_TIMER_ALWON))
cpu_pm_unregister_notifier(&timer->nb);
list_del(&timer->node);
ret = 0;
break;
}
spin_unlock_irqrestore(&dm_timer_lock, flags);
pm_runtime_disable(&pdev->dev);
if (ret)
dev_err(&pdev->dev, "Unable to determine timer entry in list of drivers on remove\n");
}
static const struct omap_dm_timer_ops dmtimer_ops = {
.request_by_node = omap_dm_timer_request_by_node,
.request_specific = omap_dm_timer_request_specific,
.request = omap_dm_timer_request,
.set_source = omap_dm_timer_set_source,
.get_irq = omap_dm_timer_get_irq,
.set_int_enable = omap_dm_timer_set_int_enable,
.set_int_disable = omap_dm_timer_set_int_disable,
.free = omap_dm_timer_free,
.enable = omap_dm_timer_enable,
.disable = omap_dm_timer_disable,
.get_fclk = omap_dm_timer_get_fclk,
.start = omap_dm_timer_start,
.stop = omap_dm_timer_stop,
.set_load = omap_dm_timer_set_load,
.set_match = omap_dm_timer_set_match,
.set_pwm = omap_dm_timer_set_pwm,
.get_pwm_status = omap_dm_timer_get_pwm_status,
.set_prescaler = omap_dm_timer_set_prescaler,
.read_counter = omap_dm_timer_read_counter,
.write_counter = omap_dm_timer_write_counter,
.read_status = omap_dm_timer_read_status,
.write_status = omap_dm_timer_write_status,
};
static const struct dmtimer_platform_data omap3plus_pdata = {
.timer_errata = OMAP_TIMER_ERRATA_I103_I767,
.timer_ops = &dmtimer_ops,
};
static const struct dmtimer_platform_data am6_pdata = {
.timer_ops = &dmtimer_ops,
};
static const struct of_device_id omap_timer_match[] = {
{
.compatible = "ti,omap2420-timer",
},
{
.compatible = "ti,omap3430-timer",
.data = &omap3plus_pdata,
},
{
.compatible = "ti,omap4430-timer",
.data = &omap3plus_pdata,
},
{
.compatible = "ti,omap5430-timer",
.data = &omap3plus_pdata,
},
{
.compatible = "ti,am335x-timer",
.data = &omap3plus_pdata,
},
{
.compatible = "ti,am335x-timer-1ms",
.data = &omap3plus_pdata,
},
{
.compatible = "ti,dm816-timer",
.data = &omap3plus_pdata,
},
{
.compatible = "ti,am654-timer",
.data = &am6_pdata,
},
{},
};
MODULE_DEVICE_TABLE(of, omap_timer_match);
static struct platform_driver omap_dm_timer_driver = {
.probe = omap_dm_timer_probe,
.remove_new = omap_dm_timer_remove,
.driver = {
.name = "omap_timer",
.of_match_table = omap_timer_match,
.pm = &omap_dm_timer_pm_ops,
},
};
module_platform_driver(omap_dm_timer_driver);
MODULE_DESCRIPTION("OMAP Dual-Mode Timer Driver");
MODULE_AUTHOR("Texas Instruments Inc");
|
linux-master
|
drivers/clocksource/timer-ti-dm.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2010 Google, Inc.
*
* Author:
* Colin Cross <[email protected]>
*/
#define pr_fmt(fmt) "tegra-timer: " fmt
#include <linux/clk.h>
#include <linux/clockchips.h>
#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/percpu.h>
#include <linux/sched_clock.h>
#include <linux/time.h>
#include "timer-of.h"
#define RTC_SECONDS 0x08
#define RTC_SHADOW_SECONDS 0x0c
#define RTC_MILLISECONDS 0x10
#define TIMERUS_CNTR_1US 0x10
#define TIMERUS_USEC_CFG 0x14
#define TIMERUS_CNTR_FREEZE 0x4c
#define TIMER_PTV 0x0
#define TIMER_PTV_EN BIT(31)
#define TIMER_PTV_PER BIT(30)
#define TIMER_PCR 0x4
#define TIMER_PCR_INTR_CLR BIT(30)
#define TIMER1_BASE 0x00
#define TIMER2_BASE 0x08
#define TIMER3_BASE 0x50
#define TIMER4_BASE 0x58
#define TIMER10_BASE 0x90
#define TIMER1_IRQ_IDX 0
#define TIMER10_IRQ_IDX 10
#define TIMER_1MHz 1000000
static u32 usec_config;
static void __iomem *timer_reg_base;
static int tegra_timer_set_next_event(unsigned long cycles,
struct clock_event_device *evt)
{
void __iomem *reg_base = timer_of_base(to_timer_of(evt));
/*
* Tegra's timer uses n+1 scheme for the counter, i.e. timer will
* fire after one tick if 0 is loaded.
*
* The minimum and maximum numbers of oneshot ticks are defined
* by clockevents_config_and_register(1, 0x1fffffff + 1) invocation
* below in the code. Hence the cycles (ticks) can't be outside of
* a range supportable by hardware.
*/
writel_relaxed(TIMER_PTV_EN | (cycles - 1), reg_base + TIMER_PTV);
return 0;
}
static int tegra_timer_shutdown(struct clock_event_device *evt)
{
void __iomem *reg_base = timer_of_base(to_timer_of(evt));
writel_relaxed(0, reg_base + TIMER_PTV);
return 0;
}
static int tegra_timer_set_periodic(struct clock_event_device *evt)
{
void __iomem *reg_base = timer_of_base(to_timer_of(evt));
unsigned long period = timer_of_period(to_timer_of(evt));
writel_relaxed(TIMER_PTV_EN | TIMER_PTV_PER | (period - 1),
reg_base + TIMER_PTV);
return 0;
}
static irqreturn_t tegra_timer_isr(int irq, void *dev_id)
{
struct clock_event_device *evt = dev_id;
void __iomem *reg_base = timer_of_base(to_timer_of(evt));
writel_relaxed(TIMER_PCR_INTR_CLR, reg_base + TIMER_PCR);
evt->event_handler(evt);
return IRQ_HANDLED;
}
static void tegra_timer_suspend(struct clock_event_device *evt)
{
void __iomem *reg_base = timer_of_base(to_timer_of(evt));
writel_relaxed(TIMER_PCR_INTR_CLR, reg_base + TIMER_PCR);
}
static void tegra_timer_resume(struct clock_event_device *evt)
{
writel_relaxed(usec_config, timer_reg_base + TIMERUS_USEC_CFG);
}
static DEFINE_PER_CPU(struct timer_of, tegra_to) = {
.flags = TIMER_OF_CLOCK | TIMER_OF_BASE,
.clkevt = {
.name = "tegra_timer",
.features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
.set_next_event = tegra_timer_set_next_event,
.set_state_shutdown = tegra_timer_shutdown,
.set_state_periodic = tegra_timer_set_periodic,
.set_state_oneshot = tegra_timer_shutdown,
.tick_resume = tegra_timer_shutdown,
.suspend = tegra_timer_suspend,
.resume = tegra_timer_resume,
},
};
static int tegra_timer_setup(unsigned int cpu)
{
struct timer_of *to = per_cpu_ptr(&tegra_to, cpu);
writel_relaxed(0, timer_of_base(to) + TIMER_PTV);
writel_relaxed(TIMER_PCR_INTR_CLR, timer_of_base(to) + TIMER_PCR);
irq_force_affinity(to->clkevt.irq, cpumask_of(cpu));
enable_irq(to->clkevt.irq);
/*
* Tegra's timer uses n+1 scheme for the counter, i.e. timer will
* fire after one tick if 0 is loaded and thus minimum number of
* ticks is 1. In result both of the clocksource's tick limits are
* higher than a minimum and maximum that hardware register can
* take by 1, this is then taken into account by set_next_event
* callback.
*/
clockevents_config_and_register(&to->clkevt, timer_of_rate(to),
1, /* min */
0x1fffffff + 1); /* max 29 bits + 1 */
return 0;
}
static int tegra_timer_stop(unsigned int cpu)
{
struct timer_of *to = per_cpu_ptr(&tegra_to, cpu);
to->clkevt.set_state_shutdown(&to->clkevt);
disable_irq_nosync(to->clkevt.irq);
return 0;
}
static u64 notrace tegra_read_sched_clock(void)
{
return readl_relaxed(timer_reg_base + TIMERUS_CNTR_1US);
}
#ifdef CONFIG_ARM
static unsigned long tegra_delay_timer_read_counter_long(void)
{
return readl_relaxed(timer_reg_base + TIMERUS_CNTR_1US);
}
static struct delay_timer tegra_delay_timer = {
.read_current_timer = tegra_delay_timer_read_counter_long,
.freq = TIMER_1MHz,
};
#endif
static struct timer_of suspend_rtc_to = {
.flags = TIMER_OF_BASE | TIMER_OF_CLOCK,
};
/*
* tegra_rtc_read - Reads the Tegra RTC registers
* Care must be taken that this function is not called while the
* tegra_rtc driver could be executing to avoid race conditions
* on the RTC shadow register
*/
static u64 tegra_rtc_read_ms(struct clocksource *cs)
{
void __iomem *reg_base = timer_of_base(&suspend_rtc_to);
u32 ms = readl_relaxed(reg_base + RTC_MILLISECONDS);
u32 s = readl_relaxed(reg_base + RTC_SHADOW_SECONDS);
return (u64)s * MSEC_PER_SEC + ms;
}
static struct clocksource suspend_rtc_clocksource = {
.name = "tegra_suspend_timer",
.rating = 200,
.read = tegra_rtc_read_ms,
.mask = CLOCKSOURCE_MASK(32),
.flags = CLOCK_SOURCE_IS_CONTINUOUS | CLOCK_SOURCE_SUSPEND_NONSTOP,
};
static inline unsigned int tegra_base_for_cpu(int cpu, bool tegra20)
{
if (tegra20) {
switch (cpu) {
case 0:
return TIMER1_BASE;
case 1:
return TIMER2_BASE;
case 2:
return TIMER3_BASE;
default:
return TIMER4_BASE;
}
}
return TIMER10_BASE + cpu * 8;
}
static inline unsigned int tegra_irq_idx_for_cpu(int cpu, bool tegra20)
{
if (tegra20)
return TIMER1_IRQ_IDX + cpu;
return TIMER10_IRQ_IDX + cpu;
}
static inline unsigned long tegra_rate_for_timer(struct timer_of *to,
bool tegra20)
{
/*
* TIMER1-9 are fixed to 1MHz, TIMER10-13 are running off the
* parent clock.
*/
if (tegra20)
return TIMER_1MHz;
return timer_of_rate(to);
}
static int __init tegra_init_timer(struct device_node *np, bool tegra20,
int rating)
{
struct timer_of *to;
int cpu, ret;
to = this_cpu_ptr(&tegra_to);
ret = timer_of_init(np, to);
if (ret)
goto out;
timer_reg_base = timer_of_base(to);
/*
* Configure microsecond timers to have 1MHz clock
* Config register is 0xqqww, where qq is "dividend", ww is "divisor"
* Uses n+1 scheme
*/
switch (timer_of_rate(to)) {
case 12000000:
usec_config = 0x000b; /* (11+1)/(0+1) */
break;
case 12800000:
usec_config = 0x043f; /* (63+1)/(4+1) */
break;
case 13000000:
usec_config = 0x000c; /* (12+1)/(0+1) */
break;
case 16800000:
usec_config = 0x0453; /* (83+1)/(4+1) */
break;
case 19200000:
usec_config = 0x045f; /* (95+1)/(4+1) */
break;
case 26000000:
usec_config = 0x0019; /* (25+1)/(0+1) */
break;
case 38400000:
usec_config = 0x04bf; /* (191+1)/(4+1) */
break;
case 48000000:
usec_config = 0x002f; /* (47+1)/(0+1) */
break;
default:
ret = -EINVAL;
goto out;
}
writel_relaxed(usec_config, timer_reg_base + TIMERUS_USEC_CFG);
for_each_possible_cpu(cpu) {
struct timer_of *cpu_to = per_cpu_ptr(&tegra_to, cpu);
unsigned long flags = IRQF_TIMER | IRQF_NOBALANCING;
unsigned long rate = tegra_rate_for_timer(to, tegra20);
unsigned int base = tegra_base_for_cpu(cpu, tegra20);
unsigned int idx = tegra_irq_idx_for_cpu(cpu, tegra20);
unsigned int irq = irq_of_parse_and_map(np, idx);
if (!irq) {
pr_err("failed to map irq for cpu%d\n", cpu);
ret = -EINVAL;
goto out_irq;
}
cpu_to->clkevt.irq = irq;
cpu_to->clkevt.rating = rating;
cpu_to->clkevt.cpumask = cpumask_of(cpu);
cpu_to->of_base.base = timer_reg_base + base;
cpu_to->of_clk.period = rate / HZ;
cpu_to->of_clk.rate = rate;
irq_set_status_flags(cpu_to->clkevt.irq, IRQ_NOAUTOEN);
ret = request_irq(cpu_to->clkevt.irq, tegra_timer_isr, flags,
cpu_to->clkevt.name, &cpu_to->clkevt);
if (ret) {
pr_err("failed to set up irq for cpu%d: %d\n",
cpu, ret);
irq_dispose_mapping(cpu_to->clkevt.irq);
cpu_to->clkevt.irq = 0;
goto out_irq;
}
}
sched_clock_register(tegra_read_sched_clock, 32, TIMER_1MHz);
ret = clocksource_mmio_init(timer_reg_base + TIMERUS_CNTR_1US,
"timer_us", TIMER_1MHz, 300, 32,
clocksource_mmio_readl_up);
if (ret)
pr_err("failed to register clocksource: %d\n", ret);
#ifdef CONFIG_ARM
register_current_timer_delay(&tegra_delay_timer);
#endif
ret = cpuhp_setup_state(CPUHP_AP_TEGRA_TIMER_STARTING,
"AP_TEGRA_TIMER_STARTING", tegra_timer_setup,
tegra_timer_stop);
if (ret)
pr_err("failed to set up cpu hp state: %d\n", ret);
return ret;
out_irq:
for_each_possible_cpu(cpu) {
struct timer_of *cpu_to;
cpu_to = per_cpu_ptr(&tegra_to, cpu);
if (cpu_to->clkevt.irq) {
free_irq(cpu_to->clkevt.irq, &cpu_to->clkevt);
irq_dispose_mapping(cpu_to->clkevt.irq);
}
}
to->of_base.base = timer_reg_base;
out:
timer_of_cleanup(to);
return ret;
}
static int __init tegra210_init_timer(struct device_node *np)
{
/*
* Arch-timer can't survive across power cycle of CPU core and
* after CPUPORESET signal due to a system design shortcoming,
* hence tegra-timer is more preferable on Tegra210.
*/
return tegra_init_timer(np, false, 460);
}
TIMER_OF_DECLARE(tegra210_timer, "nvidia,tegra210-timer", tegra210_init_timer);
static int __init tegra20_init_timer(struct device_node *np)
{
int rating;
/*
* Tegra20 and Tegra30 have Cortex A9 CPU that has a TWD timer,
* that timer runs off the CPU clock and hence is subjected to
* a jitter caused by DVFS clock rate changes. Tegra-timer is
* more preferable for older Tegra's, while later SoC generations
* have arch-timer as a main per-CPU timer and it is not affected
* by DVFS changes.
*/
if (of_machine_is_compatible("nvidia,tegra20") ||
of_machine_is_compatible("nvidia,tegra30"))
rating = 460;
else
rating = 330;
return tegra_init_timer(np, true, rating);
}
TIMER_OF_DECLARE(tegra20_timer, "nvidia,tegra20-timer", tegra20_init_timer);
static int __init tegra20_init_rtc(struct device_node *np)
{
int ret;
ret = timer_of_init(np, &suspend_rtc_to);
if (ret)
return ret;
return clocksource_register_hz(&suspend_rtc_clocksource, 1000);
}
TIMER_OF_DECLARE(tegra20_rtc, "nvidia,tegra20-rtc", tegra20_init_rtc);
|
linux-master
|
drivers/clocksource/timer-tegra.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Clocksource using the Low Power Timer found in the Low Power Controller (LPC)
*
* Copyright (C) 2015 STMicroelectronics – All Rights Reserved
*
* Author(s): Francesco Virlinzi <[email protected]>
* Ajit Pal Singh <[email protected]>
*/
#include <linux/clk.h>
#include <linux/clocksource.h>
#include <linux/init.h>
#include <linux/of_address.h>
#include <linux/sched_clock.h>
#include <linux/slab.h>
#include <dt-bindings/mfd/st-lpc.h>
/* Low Power Timer */
#define LPC_LPT_LSB_OFF 0x400
#define LPC_LPT_MSB_OFF 0x404
#define LPC_LPT_START_OFF 0x408
static struct st_clksrc_ddata {
struct clk *clk;
void __iomem *base;
} ddata;
static void __init st_clksrc_reset(void)
{
writel_relaxed(0, ddata.base + LPC_LPT_START_OFF);
writel_relaxed(0, ddata.base + LPC_LPT_MSB_OFF);
writel_relaxed(0, ddata.base + LPC_LPT_LSB_OFF);
writel_relaxed(1, ddata.base + LPC_LPT_START_OFF);
}
static u64 notrace st_clksrc_sched_clock_read(void)
{
return (u64)readl_relaxed(ddata.base + LPC_LPT_LSB_OFF);
}
static int __init st_clksrc_init(void)
{
unsigned long rate;
int ret;
st_clksrc_reset();
rate = clk_get_rate(ddata.clk);
sched_clock_register(st_clksrc_sched_clock_read, 32, rate);
ret = clocksource_mmio_init(ddata.base + LPC_LPT_LSB_OFF,
"clksrc-st-lpc", rate, 300, 32,
clocksource_mmio_readl_up);
if (ret) {
pr_err("clksrc-st-lpc: Failed to register clocksource\n");
return ret;
}
return 0;
}
static int __init st_clksrc_setup_clk(struct device_node *np)
{
struct clk *clk;
clk = of_clk_get(np, 0);
if (IS_ERR(clk)) {
pr_err("clksrc-st-lpc: Failed to get LPC clock\n");
return PTR_ERR(clk);
}
if (clk_prepare_enable(clk)) {
pr_err("clksrc-st-lpc: Failed to enable LPC clock\n");
return -EINVAL;
}
if (!clk_get_rate(clk)) {
pr_err("clksrc-st-lpc: Failed to get LPC clock rate\n");
clk_disable_unprepare(clk);
return -EINVAL;
}
ddata.clk = clk;
return 0;
}
static int __init st_clksrc_of_register(struct device_node *np)
{
int ret;
uint32_t mode;
ret = of_property_read_u32(np, "st,lpc-mode", &mode);
if (ret) {
pr_err("clksrc-st-lpc: An LPC mode must be provided\n");
return ret;
}
/* LPC can either run as a Clocksource or in RTC or WDT mode */
if (mode != ST_LPC_MODE_CLKSRC)
return 0;
ddata.base = of_iomap(np, 0);
if (!ddata.base) {
pr_err("clksrc-st-lpc: Unable to map iomem\n");
return -ENXIO;
}
ret = st_clksrc_setup_clk(np);
if (ret) {
iounmap(ddata.base);
return ret;
}
ret = st_clksrc_init();
if (ret) {
clk_disable_unprepare(ddata.clk);
clk_put(ddata.clk);
iounmap(ddata.base);
return ret;
}
pr_info("clksrc-st-lpc: clocksource initialised - running @ %luHz\n",
clk_get_rate(ddata.clk));
return ret;
}
TIMER_OF_DECLARE(ddata, "st,stih407-lpc", st_clksrc_of_register);
|
linux-master
|
drivers/clocksource/clksrc_st_lpc.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* Allwinner SoCs hstimer driver.
*
* Copyright (C) 2013 Maxime Ripard
*
* Maxime Ripard <[email protected]>
*/
#include <linux/clk.h>
#include <linux/clockchips.h>
#include <linux/clocksource.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqreturn.h>
#include <linux/reset.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#define TIMER_IRQ_EN_REG 0x00
#define TIMER_IRQ_EN(val) BIT(val)
#define TIMER_IRQ_ST_REG 0x04
#define TIMER_CTL_REG(val) (0x20 * (val) + 0x10)
#define TIMER_CTL_ENABLE BIT(0)
#define TIMER_CTL_RELOAD BIT(1)
#define TIMER_CTL_CLK_PRES(val) (((val) & 0x7) << 4)
#define TIMER_CTL_ONESHOT BIT(7)
#define TIMER_INTVAL_LO_REG(val) (0x20 * (val) + 0x14)
#define TIMER_INTVAL_HI_REG(val) (0x20 * (val) + 0x18)
#define TIMER_CNTVAL_LO_REG(val) (0x20 * (val) + 0x1c)
#define TIMER_CNTVAL_HI_REG(val) (0x20 * (val) + 0x20)
#define TIMER_SYNC_TICKS 3
struct sun5i_timer {
void __iomem *base;
struct clk *clk;
struct notifier_block clk_rate_cb;
u32 ticks_per_jiffy;
struct clocksource clksrc;
struct clock_event_device clkevt;
};
#define nb_to_sun5i_timer(x) \
container_of(x, struct sun5i_timer, clk_rate_cb)
#define clksrc_to_sun5i_timer(x) \
container_of(x, struct sun5i_timer, clksrc)
#define clkevt_to_sun5i_timer(x) \
container_of(x, struct sun5i_timer, clkevt)
/*
* When we disable a timer, we need to wait at least for 2 cycles of
* the timer source clock. We will use for that the clocksource timer
* that is already setup and runs at the same frequency than the other
* timers, and we never will be disabled.
*/
static void sun5i_clkevt_sync(struct sun5i_timer *ce)
{
u32 old = readl(ce->base + TIMER_CNTVAL_LO_REG(1));
while ((old - readl(ce->base + TIMER_CNTVAL_LO_REG(1))) < TIMER_SYNC_TICKS)
cpu_relax();
}
static void sun5i_clkevt_time_stop(struct sun5i_timer *ce, u8 timer)
{
u32 val = readl(ce->base + TIMER_CTL_REG(timer));
writel(val & ~TIMER_CTL_ENABLE, ce->base + TIMER_CTL_REG(timer));
sun5i_clkevt_sync(ce);
}
static void sun5i_clkevt_time_setup(struct sun5i_timer *ce, u8 timer, u32 delay)
{
writel(delay, ce->base + TIMER_INTVAL_LO_REG(timer));
}
static void sun5i_clkevt_time_start(struct sun5i_timer *ce, u8 timer, bool periodic)
{
u32 val = readl(ce->base + TIMER_CTL_REG(timer));
if (periodic)
val &= ~TIMER_CTL_ONESHOT;
else
val |= TIMER_CTL_ONESHOT;
writel(val | TIMER_CTL_ENABLE | TIMER_CTL_RELOAD,
ce->base + TIMER_CTL_REG(timer));
}
static int sun5i_clkevt_shutdown(struct clock_event_device *clkevt)
{
struct sun5i_timer *ce = clkevt_to_sun5i_timer(clkevt);
sun5i_clkevt_time_stop(ce, 0);
return 0;
}
static int sun5i_clkevt_set_oneshot(struct clock_event_device *clkevt)
{
struct sun5i_timer *ce = clkevt_to_sun5i_timer(clkevt);
sun5i_clkevt_time_stop(ce, 0);
sun5i_clkevt_time_start(ce, 0, false);
return 0;
}
static int sun5i_clkevt_set_periodic(struct clock_event_device *clkevt)
{
struct sun5i_timer *ce = clkevt_to_sun5i_timer(clkevt);
sun5i_clkevt_time_stop(ce, 0);
sun5i_clkevt_time_setup(ce, 0, ce->ticks_per_jiffy);
sun5i_clkevt_time_start(ce, 0, true);
return 0;
}
static int sun5i_clkevt_next_event(unsigned long evt,
struct clock_event_device *clkevt)
{
struct sun5i_timer *ce = clkevt_to_sun5i_timer(clkevt);
sun5i_clkevt_time_stop(ce, 0);
sun5i_clkevt_time_setup(ce, 0, evt - TIMER_SYNC_TICKS);
sun5i_clkevt_time_start(ce, 0, false);
return 0;
}
static irqreturn_t sun5i_timer_interrupt(int irq, void *dev_id)
{
struct sun5i_timer *ce = dev_id;
writel(0x1, ce->base + TIMER_IRQ_ST_REG);
ce->clkevt.event_handler(&ce->clkevt);
return IRQ_HANDLED;
}
static u64 sun5i_clksrc_read(struct clocksource *clksrc)
{
struct sun5i_timer *cs = clksrc_to_sun5i_timer(clksrc);
return ~readl(cs->base + TIMER_CNTVAL_LO_REG(1));
}
static int sun5i_rate_cb(struct notifier_block *nb,
unsigned long event, void *data)
{
struct clk_notifier_data *ndata = data;
struct sun5i_timer *cs = nb_to_sun5i_timer(nb);
switch (event) {
case PRE_RATE_CHANGE:
clocksource_unregister(&cs->clksrc);
break;
case POST_RATE_CHANGE:
clocksource_register_hz(&cs->clksrc, ndata->new_rate);
clockevents_update_freq(&cs->clkevt, ndata->new_rate);
cs->ticks_per_jiffy = DIV_ROUND_UP(ndata->new_rate, HZ);
break;
default:
break;
}
return NOTIFY_DONE;
}
static int sun5i_setup_clocksource(struct platform_device *pdev,
unsigned long rate)
{
struct sun5i_timer *cs = platform_get_drvdata(pdev);
void __iomem *base = cs->base;
int ret;
writel(~0, base + TIMER_INTVAL_LO_REG(1));
writel(TIMER_CTL_ENABLE | TIMER_CTL_RELOAD,
base + TIMER_CTL_REG(1));
cs->clksrc.name = pdev->dev.of_node->name;
cs->clksrc.rating = 340;
cs->clksrc.read = sun5i_clksrc_read;
cs->clksrc.mask = CLOCKSOURCE_MASK(32);
cs->clksrc.flags = CLOCK_SOURCE_IS_CONTINUOUS;
ret = clocksource_register_hz(&cs->clksrc, rate);
if (ret) {
dev_err(&pdev->dev, "Couldn't register clock source.\n");
return ret;
}
return 0;
}
static int sun5i_setup_clockevent(struct platform_device *pdev,
unsigned long rate, int irq)
{
struct device *dev = &pdev->dev;
struct sun5i_timer *ce = platform_get_drvdata(pdev);
void __iomem *base = ce->base;
int ret;
u32 val;
ce->clkevt.name = dev->of_node->name;
ce->clkevt.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
ce->clkevt.set_next_event = sun5i_clkevt_next_event;
ce->clkevt.set_state_shutdown = sun5i_clkevt_shutdown;
ce->clkevt.set_state_periodic = sun5i_clkevt_set_periodic;
ce->clkevt.set_state_oneshot = sun5i_clkevt_set_oneshot;
ce->clkevt.tick_resume = sun5i_clkevt_shutdown;
ce->clkevt.rating = 340;
ce->clkevt.irq = irq;
ce->clkevt.cpumask = cpu_possible_mask;
/* Enable timer0 interrupt */
val = readl(base + TIMER_IRQ_EN_REG);
writel(val | TIMER_IRQ_EN(0), base + TIMER_IRQ_EN_REG);
clockevents_config_and_register(&ce->clkevt, rate,
TIMER_SYNC_TICKS, 0xffffffff);
ret = devm_request_irq(dev, irq, sun5i_timer_interrupt,
IRQF_TIMER | IRQF_IRQPOLL,
"sun5i_timer0", ce);
if (ret) {
dev_err(dev, "Unable to register interrupt\n");
return ret;
}
return 0;
}
static int sun5i_timer_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct sun5i_timer *st;
struct reset_control *rstc;
void __iomem *timer_base;
struct clk *clk;
unsigned long rate;
int irq, ret;
st = devm_kzalloc(dev, sizeof(*st), GFP_KERNEL);
if (!st)
return -ENOMEM;
platform_set_drvdata(pdev, st);
timer_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(timer_base)) {
dev_err(dev, "Can't map registers\n");
return PTR_ERR(timer_base);
}
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(dev, "Can't get IRQ\n");
return irq;
}
clk = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(clk)) {
dev_err(dev, "Can't get timer clock\n");
return PTR_ERR(clk);
}
rate = clk_get_rate(clk);
if (!rate) {
dev_err(dev, "Couldn't get parent clock rate\n");
return -EINVAL;
}
st->base = timer_base;
st->ticks_per_jiffy = DIV_ROUND_UP(rate, HZ);
st->clk = clk;
st->clk_rate_cb.notifier_call = sun5i_rate_cb;
st->clk_rate_cb.next = NULL;
ret = devm_clk_notifier_register(dev, clk, &st->clk_rate_cb);
if (ret) {
dev_err(dev, "Unable to register clock notifier.\n");
return ret;
}
rstc = devm_reset_control_get_optional_exclusive(dev, NULL);
if (rstc)
reset_control_deassert(rstc);
ret = sun5i_setup_clocksource(pdev, rate);
if (ret)
return ret;
ret = sun5i_setup_clockevent(pdev, rate, irq);
if (ret)
goto err_unreg_clocksource;
return 0;
err_unreg_clocksource:
clocksource_unregister(&st->clksrc);
return ret;
}
static void sun5i_timer_remove(struct platform_device *pdev)
{
struct sun5i_timer *st = platform_get_drvdata(pdev);
clocksource_unregister(&st->clksrc);
}
static const struct of_device_id sun5i_timer_of_match[] = {
{ .compatible = "allwinner,sun5i-a13-hstimer" },
{ .compatible = "allwinner,sun7i-a20-hstimer" },
{},
};
MODULE_DEVICE_TABLE(of, sun5i_timer_of_match);
static struct platform_driver sun5i_timer_driver = {
.probe = sun5i_timer_probe,
.remove_new = sun5i_timer_remove,
.driver = {
.name = "sun5i-timer",
.of_match_table = sun5i_timer_of_match,
.suppress_bind_attrs = true,
},
};
module_platform_driver(sun5i_timer_driver);
|
linux-master
|
drivers/clocksource/timer-sun5i.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Clocksource driver for Loongson-1 SoC
*
* Copyright (c) 2023 Keguang Zhang <[email protected]>
*/
#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/sizes.h>
#include "timer-of.h"
/* Loongson-1 PWM Timer Register Definitions */
#define PWM_CNTR 0x0
#define PWM_HRC 0x4
#define PWM_LRC 0x8
#define PWM_CTRL 0xc
/* PWM Control Register Bits */
#define INT_LRC_EN BIT(11)
#define INT_HRC_EN BIT(10)
#define CNTR_RST BIT(7)
#define INT_SR BIT(6)
#define INT_EN BIT(5)
#define PWM_SINGLE BIT(4)
#define PWM_OE BIT(3)
#define CNT_EN BIT(0)
#define CNTR_WIDTH 24
static DEFINE_RAW_SPINLOCK(ls1x_timer_lock);
struct ls1x_clocksource {
void __iomem *reg_base;
unsigned long ticks_per_jiffy;
struct clocksource clksrc;
};
static inline struct ls1x_clocksource *to_ls1x_clksrc(struct clocksource *c)
{
return container_of(c, struct ls1x_clocksource, clksrc);
}
static inline void ls1x_pwmtimer_set_period(unsigned int period,
struct timer_of *to)
{
writel(period, timer_of_base(to) + PWM_LRC);
writel(period, timer_of_base(to) + PWM_HRC);
}
static inline void ls1x_pwmtimer_clear(struct timer_of *to)
{
writel(0, timer_of_base(to) + PWM_CNTR);
}
static inline void ls1x_pwmtimer_start(struct timer_of *to)
{
writel((INT_EN | PWM_OE | CNT_EN), timer_of_base(to) + PWM_CTRL);
}
static inline void ls1x_pwmtimer_stop(struct timer_of *to)
{
writel(0, timer_of_base(to) + PWM_CTRL);
}
static inline void ls1x_pwmtimer_irq_ack(struct timer_of *to)
{
int val;
val = readl(timer_of_base(to) + PWM_CTRL);
val |= INT_SR;
writel(val, timer_of_base(to) + PWM_CTRL);
}
static irqreturn_t ls1x_clockevent_isr(int irq, void *dev_id)
{
struct clock_event_device *clkevt = dev_id;
struct timer_of *to = to_timer_of(clkevt);
ls1x_pwmtimer_irq_ack(to);
ls1x_pwmtimer_clear(to);
ls1x_pwmtimer_start(to);
clkevt->event_handler(clkevt);
return IRQ_HANDLED;
}
static int ls1x_clockevent_set_state_periodic(struct clock_event_device *clkevt)
{
struct timer_of *to = to_timer_of(clkevt);
raw_spin_lock(&ls1x_timer_lock);
ls1x_pwmtimer_set_period(timer_of_period(to), to);
ls1x_pwmtimer_clear(to);
ls1x_pwmtimer_start(to);
raw_spin_unlock(&ls1x_timer_lock);
return 0;
}
static int ls1x_clockevent_tick_resume(struct clock_event_device *clkevt)
{
raw_spin_lock(&ls1x_timer_lock);
ls1x_pwmtimer_start(to_timer_of(clkevt));
raw_spin_unlock(&ls1x_timer_lock);
return 0;
}
static int ls1x_clockevent_set_state_shutdown(struct clock_event_device *clkevt)
{
raw_spin_lock(&ls1x_timer_lock);
ls1x_pwmtimer_stop(to_timer_of(clkevt));
raw_spin_unlock(&ls1x_timer_lock);
return 0;
}
static int ls1x_clockevent_set_next(unsigned long evt,
struct clock_event_device *clkevt)
{
struct timer_of *to = to_timer_of(clkevt);
raw_spin_lock(&ls1x_timer_lock);
ls1x_pwmtimer_set_period(evt, to);
ls1x_pwmtimer_clear(to);
ls1x_pwmtimer_start(to);
raw_spin_unlock(&ls1x_timer_lock);
return 0;
}
static struct timer_of ls1x_to = {
.flags = TIMER_OF_IRQ | TIMER_OF_BASE | TIMER_OF_CLOCK,
.clkevt = {
.name = "ls1x-pwmtimer",
.features = CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_ONESHOT,
.rating = 300,
.set_next_event = ls1x_clockevent_set_next,
.set_state_periodic = ls1x_clockevent_set_state_periodic,
.set_state_oneshot = ls1x_clockevent_set_state_shutdown,
.set_state_shutdown = ls1x_clockevent_set_state_shutdown,
.tick_resume = ls1x_clockevent_tick_resume,
},
.of_irq = {
.handler = ls1x_clockevent_isr,
.flags = IRQF_TIMER,
},
};
/*
* Since the PWM timer overflows every two ticks, its not very useful
* to just read by itself. So use jiffies to emulate a free
* running counter:
*/
static u64 ls1x_clocksource_read(struct clocksource *cs)
{
struct ls1x_clocksource *ls1x_cs = to_ls1x_clksrc(cs);
unsigned long flags;
int count;
u32 jifs;
static int old_count;
static u32 old_jifs;
raw_spin_lock_irqsave(&ls1x_timer_lock, flags);
/*
* Although our caller may have the read side of xtime_lock,
* this is now a seqlock, and we are cheating in this routine
* by having side effects on state that we cannot undo if
* there is a collision on the seqlock and our caller has to
* retry. (Namely, old_jifs and old_count.) So we must treat
* jiffies as volatile despite the lock. We read jiffies
* before latching the timer count to guarantee that although
* the jiffies value might be older than the count (that is,
* the counter may underflow between the last point where
* jiffies was incremented and the point where we latch the
* count), it cannot be newer.
*/
jifs = jiffies;
/* read the count */
count = readl(ls1x_cs->reg_base + PWM_CNTR);
/*
* It's possible for count to appear to go the wrong way for this
* reason:
*
* The timer counter underflows, but we haven't handled the resulting
* interrupt and incremented jiffies yet.
*
* Previous attempts to handle these cases intelligently were buggy, so
* we just do the simple thing now.
*/
if (count < old_count && jifs == old_jifs)
count = old_count;
old_count = count;
old_jifs = jifs;
raw_spin_unlock_irqrestore(&ls1x_timer_lock, flags);
return (u64)(jifs * ls1x_cs->ticks_per_jiffy) + count;
}
static struct ls1x_clocksource ls1x_clocksource = {
.clksrc = {
.name = "ls1x-pwmtimer",
.rating = 300,
.read = ls1x_clocksource_read,
.mask = CLOCKSOURCE_MASK(CNTR_WIDTH),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
},
};
static int __init ls1x_pwm_clocksource_init(struct device_node *np)
{
struct timer_of *to = &ls1x_to;
int ret;
ret = timer_of_init(np, to);
if (ret)
return ret;
clockevents_config_and_register(&to->clkevt, timer_of_rate(to),
0x1, GENMASK(CNTR_WIDTH - 1, 0));
ls1x_clocksource.reg_base = timer_of_base(to);
ls1x_clocksource.ticks_per_jiffy = timer_of_period(to);
return clocksource_register_hz(&ls1x_clocksource.clksrc,
timer_of_rate(to));
}
TIMER_OF_DECLARE(ls1x_pwm_clocksource, "loongson,ls1b-pwmtimer",
ls1x_pwm_clocksource_init);
|
linux-master
|
drivers/clocksource/timer-loongson1-pwm.c
|
// SPDX-License-Identifier: GPL-2.0+
//
// Copyright 2016 Freescale Semiconductor, Inc.
// Copyright 2017 NXP
#include <linux/clk.h>
#include <linux/clockchips.h>
#include <linux/clocksource.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/sched_clock.h>
#include "timer-of.h"
#define TPM_PARAM 0x4
#define TPM_PARAM_WIDTH_SHIFT 16
#define TPM_PARAM_WIDTH_MASK (0xff << 16)
#define TPM_SC 0x10
#define TPM_SC_CMOD_INC_PER_CNT (0x1 << 3)
#define TPM_SC_CMOD_DIV_DEFAULT 0x3
#define TPM_SC_CMOD_DIV_MAX 0x7
#define TPM_SC_TOF_MASK (0x1 << 7)
#define TPM_CNT 0x14
#define TPM_MOD 0x18
#define TPM_STATUS 0x1c
#define TPM_STATUS_CH0F BIT(0)
#define TPM_C0SC 0x20
#define TPM_C0SC_CHIE BIT(6)
#define TPM_C0SC_MODE_SHIFT 2
#define TPM_C0SC_MODE_MASK 0x3c
#define TPM_C0SC_MODE_SW_COMPARE 0x4
#define TPM_C0SC_CHF_MASK (0x1 << 7)
#define TPM_C0V 0x24
static int counter_width __ro_after_init;
static void __iomem *timer_base __ro_after_init;
static inline void tpm_timer_disable(void)
{
unsigned int val;
/* channel disable */
val = readl(timer_base + TPM_C0SC);
val &= ~(TPM_C0SC_MODE_MASK | TPM_C0SC_CHIE);
writel(val, timer_base + TPM_C0SC);
}
static inline void tpm_timer_enable(void)
{
unsigned int val;
/* channel enabled in sw compare mode */
val = readl(timer_base + TPM_C0SC);
val |= (TPM_C0SC_MODE_SW_COMPARE << TPM_C0SC_MODE_SHIFT) |
TPM_C0SC_CHIE;
writel(val, timer_base + TPM_C0SC);
}
static inline void tpm_irq_acknowledge(void)
{
writel(TPM_STATUS_CH0F, timer_base + TPM_STATUS);
}
static inline unsigned long tpm_read_counter(void)
{
return readl(timer_base + TPM_CNT);
}
#if defined(CONFIG_ARM)
static struct delay_timer tpm_delay_timer;
static unsigned long tpm_read_current_timer(void)
{
return tpm_read_counter();
}
static u64 notrace tpm_read_sched_clock(void)
{
return tpm_read_counter();
}
#endif
static int tpm_set_next_event(unsigned long delta,
struct clock_event_device *evt)
{
unsigned long next, now;
next = tpm_read_counter();
next += delta;
writel(next, timer_base + TPM_C0V);
now = tpm_read_counter();
/*
* NOTE: We observed in a very small probability, the bus fabric
* contention between GPU and A7 may results a few cycles delay
* of writing CNT registers which may cause the min_delta event got
* missed, so we need add a ETIME check here in case it happened.
*/
return (int)(next - now) <= 0 ? -ETIME : 0;
}
static int tpm_set_state_oneshot(struct clock_event_device *evt)
{
tpm_timer_enable();
return 0;
}
static int tpm_set_state_shutdown(struct clock_event_device *evt)
{
tpm_timer_disable();
return 0;
}
static irqreturn_t tpm_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evt = dev_id;
tpm_irq_acknowledge();
evt->event_handler(evt);
return IRQ_HANDLED;
}
static struct timer_of to_tpm = {
.flags = TIMER_OF_IRQ | TIMER_OF_BASE | TIMER_OF_CLOCK,
.clkevt = {
.name = "i.MX TPM Timer",
.rating = 200,
.features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_DYNIRQ,
.set_state_shutdown = tpm_set_state_shutdown,
.set_state_oneshot = tpm_set_state_oneshot,
.set_next_event = tpm_set_next_event,
.cpumask = cpu_possible_mask,
},
.of_irq = {
.handler = tpm_timer_interrupt,
.flags = IRQF_TIMER,
},
.of_clk = {
.name = "per",
},
};
static int __init tpm_clocksource_init(void)
{
#if defined(CONFIG_ARM)
tpm_delay_timer.read_current_timer = &tpm_read_current_timer;
tpm_delay_timer.freq = timer_of_rate(&to_tpm) >> 3;
register_current_timer_delay(&tpm_delay_timer);
sched_clock_register(tpm_read_sched_clock, counter_width,
timer_of_rate(&to_tpm) >> 3);
#endif
return clocksource_mmio_init(timer_base + TPM_CNT,
"imx-tpm",
timer_of_rate(&to_tpm) >> 3,
to_tpm.clkevt.rating,
counter_width,
clocksource_mmio_readl_up);
}
static void __init tpm_clockevent_init(void)
{
clockevents_config_and_register(&to_tpm.clkevt,
timer_of_rate(&to_tpm) >> 3,
300,
GENMASK(counter_width - 1,
1));
}
static int __init tpm_timer_init(struct device_node *np)
{
struct clk *ipg;
int ret;
ipg = of_clk_get_by_name(np, "ipg");
if (IS_ERR(ipg)) {
pr_err("tpm: failed to get ipg clk\n");
return -ENODEV;
}
/* enable clk before accessing registers */
ret = clk_prepare_enable(ipg);
if (ret) {
pr_err("tpm: ipg clock enable failed (%d)\n", ret);
clk_put(ipg);
return ret;
}
ret = timer_of_init(np, &to_tpm);
if (ret)
return ret;
timer_base = timer_of_base(&to_tpm);
counter_width = (readl(timer_base + TPM_PARAM)
& TPM_PARAM_WIDTH_MASK) >> TPM_PARAM_WIDTH_SHIFT;
/* use rating 200 for 32-bit counter and 150 for 16-bit counter */
to_tpm.clkevt.rating = counter_width == 0x20 ? 200 : 150;
/*
* Initialize tpm module to a known state
* 1) Counter disabled
* 2) TPM counter operates in up counting mode
* 3) Timer Overflow Interrupt disabled
* 4) Channel0 disabled
* 5) DMA transfers disabled
*/
/* make sure counter is disabled */
writel(0, timer_base + TPM_SC);
/* TOF is W1C */
writel(TPM_SC_TOF_MASK, timer_base + TPM_SC);
writel(0, timer_base + TPM_CNT);
/* CHF is W1C */
writel(TPM_C0SC_CHF_MASK, timer_base + TPM_C0SC);
/*
* increase per cnt,
* div 8 for 32-bit counter and div 128 for 16-bit counter
*/
writel(TPM_SC_CMOD_INC_PER_CNT |
(counter_width == 0x20 ?
TPM_SC_CMOD_DIV_DEFAULT : TPM_SC_CMOD_DIV_MAX),
timer_base + TPM_SC);
/* set MOD register to maximum for free running mode */
writel(GENMASK(counter_width - 1, 0), timer_base + TPM_MOD);
tpm_clockevent_init();
return tpm_clocksource_init();
}
TIMER_OF_DECLARE(imx7ulp, "fsl,imx7ulp-tpm", tpm_timer_init);
|
linux-master
|
drivers/clocksource/timer-imx-tpm.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* SuperH Timer Support - CMT
*
* Copyright (C) 2008 Magnus Damm
*/
#include <linux/clk.h>
#include <linux/clockchips.h>
#include <linux/clocksource.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/ioport.h>
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
#include <linux/sh_timer.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#ifdef CONFIG_SUPERH
#include <asm/platform_early.h>
#endif
struct sh_cmt_device;
/*
* The CMT comes in 5 different identified flavours, depending not only on the
* SoC but also on the particular instance. The following table lists the main
* characteristics of those flavours.
*
* 16B 32B 32B-F 48B R-Car Gen2
* -----------------------------------------------------------------------------
* Channels 2 1/4 1 6 2/8
* Control Width 16 16 16 16 32
* Counter Width 16 32 32 32/48 32/48
* Shared Start/Stop Y Y Y Y N
*
* The r8a73a4 / R-Car Gen2 version has a per-channel start/stop register
* located in the channel registers block. All other versions have a shared
* start/stop register located in the global space.
*
* Channels are indexed from 0 to N-1 in the documentation. The channel index
* infers the start/stop bit position in the control register and the channel
* registers block address. Some CMT instances have a subset of channels
* available, in which case the index in the documentation doesn't match the
* "real" index as implemented in hardware. This is for instance the case with
* CMT0 on r8a7740, which is a 32-bit variant with a single channel numbered 0
* in the documentation but using start/stop bit 5 and having its registers
* block at 0x60.
*
* Similarly CMT0 on r8a73a4, r8a7790 and r8a7791, while implementing 32-bit
* channels only, is a 48-bit gen2 CMT with the 48-bit channels unavailable.
*/
enum sh_cmt_model {
SH_CMT_16BIT,
SH_CMT_32BIT,
SH_CMT_48BIT,
SH_CMT0_RCAR_GEN2,
SH_CMT1_RCAR_GEN2,
};
struct sh_cmt_info {
enum sh_cmt_model model;
unsigned int channels_mask;
unsigned long width; /* 16 or 32 bit version of hardware block */
u32 overflow_bit;
u32 clear_bits;
/* callbacks for CMSTR and CMCSR access */
u32 (*read_control)(void __iomem *base, unsigned long offs);
void (*write_control)(void __iomem *base, unsigned long offs,
u32 value);
/* callbacks for CMCNT and CMCOR access */
u32 (*read_count)(void __iomem *base, unsigned long offs);
void (*write_count)(void __iomem *base, unsigned long offs, u32 value);
};
struct sh_cmt_channel {
struct sh_cmt_device *cmt;
unsigned int index; /* Index in the documentation */
unsigned int hwidx; /* Real hardware index */
void __iomem *iostart;
void __iomem *ioctrl;
unsigned int timer_bit;
unsigned long flags;
u32 match_value;
u32 next_match_value;
u32 max_match_value;
raw_spinlock_t lock;
struct clock_event_device ced;
struct clocksource cs;
u64 total_cycles;
bool cs_enabled;
};
struct sh_cmt_device {
struct platform_device *pdev;
const struct sh_cmt_info *info;
void __iomem *mapbase;
struct clk *clk;
unsigned long rate;
unsigned int reg_delay;
raw_spinlock_t lock; /* Protect the shared start/stop register */
struct sh_cmt_channel *channels;
unsigned int num_channels;
unsigned int hw_channels;
bool has_clockevent;
bool has_clocksource;
};
#define SH_CMT16_CMCSR_CMF (1 << 7)
#define SH_CMT16_CMCSR_CMIE (1 << 6)
#define SH_CMT16_CMCSR_CKS8 (0 << 0)
#define SH_CMT16_CMCSR_CKS32 (1 << 0)
#define SH_CMT16_CMCSR_CKS128 (2 << 0)
#define SH_CMT16_CMCSR_CKS512 (3 << 0)
#define SH_CMT16_CMCSR_CKS_MASK (3 << 0)
#define SH_CMT32_CMCSR_CMF (1 << 15)
#define SH_CMT32_CMCSR_OVF (1 << 14)
#define SH_CMT32_CMCSR_WRFLG (1 << 13)
#define SH_CMT32_CMCSR_STTF (1 << 12)
#define SH_CMT32_CMCSR_STPF (1 << 11)
#define SH_CMT32_CMCSR_SSIE (1 << 10)
#define SH_CMT32_CMCSR_CMS (1 << 9)
#define SH_CMT32_CMCSR_CMM (1 << 8)
#define SH_CMT32_CMCSR_CMTOUT_IE (1 << 7)
#define SH_CMT32_CMCSR_CMR_NONE (0 << 4)
#define SH_CMT32_CMCSR_CMR_DMA (1 << 4)
#define SH_CMT32_CMCSR_CMR_IRQ (2 << 4)
#define SH_CMT32_CMCSR_CMR_MASK (3 << 4)
#define SH_CMT32_CMCSR_DBGIVD (1 << 3)
#define SH_CMT32_CMCSR_CKS_RCLK8 (4 << 0)
#define SH_CMT32_CMCSR_CKS_RCLK32 (5 << 0)
#define SH_CMT32_CMCSR_CKS_RCLK128 (6 << 0)
#define SH_CMT32_CMCSR_CKS_RCLK1 (7 << 0)
#define SH_CMT32_CMCSR_CKS_MASK (7 << 0)
static u32 sh_cmt_read16(void __iomem *base, unsigned long offs)
{
return ioread16(base + (offs << 1));
}
static u32 sh_cmt_read32(void __iomem *base, unsigned long offs)
{
return ioread32(base + (offs << 2));
}
static void sh_cmt_write16(void __iomem *base, unsigned long offs, u32 value)
{
iowrite16(value, base + (offs << 1));
}
static void sh_cmt_write32(void __iomem *base, unsigned long offs, u32 value)
{
iowrite32(value, base + (offs << 2));
}
static const struct sh_cmt_info sh_cmt_info[] = {
[SH_CMT_16BIT] = {
.model = SH_CMT_16BIT,
.width = 16,
.overflow_bit = SH_CMT16_CMCSR_CMF,
.clear_bits = ~SH_CMT16_CMCSR_CMF,
.read_control = sh_cmt_read16,
.write_control = sh_cmt_write16,
.read_count = sh_cmt_read16,
.write_count = sh_cmt_write16,
},
[SH_CMT_32BIT] = {
.model = SH_CMT_32BIT,
.width = 32,
.overflow_bit = SH_CMT32_CMCSR_CMF,
.clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF),
.read_control = sh_cmt_read16,
.write_control = sh_cmt_write16,
.read_count = sh_cmt_read32,
.write_count = sh_cmt_write32,
},
[SH_CMT_48BIT] = {
.model = SH_CMT_48BIT,
.channels_mask = 0x3f,
.width = 32,
.overflow_bit = SH_CMT32_CMCSR_CMF,
.clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF),
.read_control = sh_cmt_read32,
.write_control = sh_cmt_write32,
.read_count = sh_cmt_read32,
.write_count = sh_cmt_write32,
},
[SH_CMT0_RCAR_GEN2] = {
.model = SH_CMT0_RCAR_GEN2,
.channels_mask = 0x60,
.width = 32,
.overflow_bit = SH_CMT32_CMCSR_CMF,
.clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF),
.read_control = sh_cmt_read32,
.write_control = sh_cmt_write32,
.read_count = sh_cmt_read32,
.write_count = sh_cmt_write32,
},
[SH_CMT1_RCAR_GEN2] = {
.model = SH_CMT1_RCAR_GEN2,
.channels_mask = 0xff,
.width = 32,
.overflow_bit = SH_CMT32_CMCSR_CMF,
.clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF),
.read_control = sh_cmt_read32,
.write_control = sh_cmt_write32,
.read_count = sh_cmt_read32,
.write_count = sh_cmt_write32,
},
};
#define CMCSR 0 /* channel register */
#define CMCNT 1 /* channel register */
#define CMCOR 2 /* channel register */
#define CMCLKE 0x1000 /* CLK Enable Register (R-Car Gen2) */
static inline u32 sh_cmt_read_cmstr(struct sh_cmt_channel *ch)
{
if (ch->iostart)
return ch->cmt->info->read_control(ch->iostart, 0);
else
return ch->cmt->info->read_control(ch->cmt->mapbase, 0);
}
static inline void sh_cmt_write_cmstr(struct sh_cmt_channel *ch, u32 value)
{
u32 old_value = sh_cmt_read_cmstr(ch);
if (value != old_value) {
if (ch->iostart) {
ch->cmt->info->write_control(ch->iostart, 0, value);
udelay(ch->cmt->reg_delay);
} else {
ch->cmt->info->write_control(ch->cmt->mapbase, 0, value);
udelay(ch->cmt->reg_delay);
}
}
}
static inline u32 sh_cmt_read_cmcsr(struct sh_cmt_channel *ch)
{
return ch->cmt->info->read_control(ch->ioctrl, CMCSR);
}
static inline void sh_cmt_write_cmcsr(struct sh_cmt_channel *ch, u32 value)
{
u32 old_value = sh_cmt_read_cmcsr(ch);
if (value != old_value) {
ch->cmt->info->write_control(ch->ioctrl, CMCSR, value);
udelay(ch->cmt->reg_delay);
}
}
static inline u32 sh_cmt_read_cmcnt(struct sh_cmt_channel *ch)
{
return ch->cmt->info->read_count(ch->ioctrl, CMCNT);
}
static inline int sh_cmt_write_cmcnt(struct sh_cmt_channel *ch, u32 value)
{
/* Tests showed that we need to wait 3 clocks here */
unsigned int cmcnt_delay = DIV_ROUND_UP(3 * ch->cmt->reg_delay, 2);
u32 reg;
if (ch->cmt->info->model > SH_CMT_16BIT) {
int ret = read_poll_timeout_atomic(sh_cmt_read_cmcsr, reg,
!(reg & SH_CMT32_CMCSR_WRFLG),
1, cmcnt_delay, false, ch);
if (ret < 0)
return ret;
}
ch->cmt->info->write_count(ch->ioctrl, CMCNT, value);
udelay(cmcnt_delay);
return 0;
}
static inline void sh_cmt_write_cmcor(struct sh_cmt_channel *ch, u32 value)
{
u32 old_value = ch->cmt->info->read_count(ch->ioctrl, CMCOR);
if (value != old_value) {
ch->cmt->info->write_count(ch->ioctrl, CMCOR, value);
udelay(ch->cmt->reg_delay);
}
}
static u32 sh_cmt_get_counter(struct sh_cmt_channel *ch, u32 *has_wrapped)
{
u32 v1, v2, v3;
u32 o1, o2;
o1 = sh_cmt_read_cmcsr(ch) & ch->cmt->info->overflow_bit;
/* Make sure the timer value is stable. Stolen from acpi_pm.c */
do {
o2 = o1;
v1 = sh_cmt_read_cmcnt(ch);
v2 = sh_cmt_read_cmcnt(ch);
v3 = sh_cmt_read_cmcnt(ch);
o1 = sh_cmt_read_cmcsr(ch) & ch->cmt->info->overflow_bit;
} while (unlikely((o1 != o2) || (v1 > v2 && v1 < v3)
|| (v2 > v3 && v2 < v1) || (v3 > v1 && v3 < v2)));
*has_wrapped = o1;
return v2;
}
static void sh_cmt_start_stop_ch(struct sh_cmt_channel *ch, int start)
{
unsigned long flags;
u32 value;
/* start stop register shared by multiple timer channels */
raw_spin_lock_irqsave(&ch->cmt->lock, flags);
value = sh_cmt_read_cmstr(ch);
if (start)
value |= 1 << ch->timer_bit;
else
value &= ~(1 << ch->timer_bit);
sh_cmt_write_cmstr(ch, value);
raw_spin_unlock_irqrestore(&ch->cmt->lock, flags);
}
static int sh_cmt_enable(struct sh_cmt_channel *ch)
{
int ret;
dev_pm_syscore_device(&ch->cmt->pdev->dev, true);
/* enable clock */
ret = clk_enable(ch->cmt->clk);
if (ret) {
dev_err(&ch->cmt->pdev->dev, "ch%u: cannot enable clock\n",
ch->index);
goto err0;
}
/* make sure channel is disabled */
sh_cmt_start_stop_ch(ch, 0);
/* configure channel, periodic mode and maximum timeout */
if (ch->cmt->info->width == 16) {
sh_cmt_write_cmcsr(ch, SH_CMT16_CMCSR_CMIE |
SH_CMT16_CMCSR_CKS512);
} else {
u32 cmtout = ch->cmt->info->model <= SH_CMT_48BIT ?
SH_CMT32_CMCSR_CMTOUT_IE : 0;
sh_cmt_write_cmcsr(ch, cmtout | SH_CMT32_CMCSR_CMM |
SH_CMT32_CMCSR_CMR_IRQ |
SH_CMT32_CMCSR_CKS_RCLK8);
}
sh_cmt_write_cmcor(ch, 0xffffffff);
ret = sh_cmt_write_cmcnt(ch, 0);
if (ret || sh_cmt_read_cmcnt(ch)) {
dev_err(&ch->cmt->pdev->dev, "ch%u: cannot clear CMCNT\n",
ch->index);
ret = -ETIMEDOUT;
goto err1;
}
/* enable channel */
sh_cmt_start_stop_ch(ch, 1);
return 0;
err1:
/* stop clock */
clk_disable(ch->cmt->clk);
err0:
return ret;
}
static void sh_cmt_disable(struct sh_cmt_channel *ch)
{
/* disable channel */
sh_cmt_start_stop_ch(ch, 0);
/* disable interrupts in CMT block */
sh_cmt_write_cmcsr(ch, 0);
/* stop clock */
clk_disable(ch->cmt->clk);
dev_pm_syscore_device(&ch->cmt->pdev->dev, false);
}
/* private flags */
#define FLAG_CLOCKEVENT (1 << 0)
#define FLAG_CLOCKSOURCE (1 << 1)
#define FLAG_REPROGRAM (1 << 2)
#define FLAG_SKIPEVENT (1 << 3)
#define FLAG_IRQCONTEXT (1 << 4)
static void sh_cmt_clock_event_program_verify(struct sh_cmt_channel *ch,
int absolute)
{
u32 value = ch->next_match_value;
u32 new_match;
u32 delay = 0;
u32 now = 0;
u32 has_wrapped;
now = sh_cmt_get_counter(ch, &has_wrapped);
ch->flags |= FLAG_REPROGRAM; /* force reprogram */
if (has_wrapped) {
/* we're competing with the interrupt handler.
* -> let the interrupt handler reprogram the timer.
* -> interrupt number two handles the event.
*/
ch->flags |= FLAG_SKIPEVENT;
return;
}
if (absolute)
now = 0;
do {
/* reprogram the timer hardware,
* but don't save the new match value yet.
*/
new_match = now + value + delay;
if (new_match > ch->max_match_value)
new_match = ch->max_match_value;
sh_cmt_write_cmcor(ch, new_match);
now = sh_cmt_get_counter(ch, &has_wrapped);
if (has_wrapped && (new_match > ch->match_value)) {
/* we are changing to a greater match value,
* so this wrap must be caused by the counter
* matching the old value.
* -> first interrupt reprograms the timer.
* -> interrupt number two handles the event.
*/
ch->flags |= FLAG_SKIPEVENT;
break;
}
if (has_wrapped) {
/* we are changing to a smaller match value,
* so the wrap must be caused by the counter
* matching the new value.
* -> save programmed match value.
* -> let isr handle the event.
*/
ch->match_value = new_match;
break;
}
/* be safe: verify hardware settings */
if (now < new_match) {
/* timer value is below match value, all good.
* this makes sure we won't miss any match events.
* -> save programmed match value.
* -> let isr handle the event.
*/
ch->match_value = new_match;
break;
}
/* the counter has reached a value greater
* than our new match value. and since the
* has_wrapped flag isn't set we must have
* programmed a too close event.
* -> increase delay and retry.
*/
if (delay)
delay <<= 1;
else
delay = 1;
if (!delay)
dev_warn(&ch->cmt->pdev->dev, "ch%u: too long delay\n",
ch->index);
} while (delay);
}
static void __sh_cmt_set_next(struct sh_cmt_channel *ch, unsigned long delta)
{
if (delta > ch->max_match_value)
dev_warn(&ch->cmt->pdev->dev, "ch%u: delta out of range\n",
ch->index);
ch->next_match_value = delta;
sh_cmt_clock_event_program_verify(ch, 0);
}
static void sh_cmt_set_next(struct sh_cmt_channel *ch, unsigned long delta)
{
unsigned long flags;
raw_spin_lock_irqsave(&ch->lock, flags);
__sh_cmt_set_next(ch, delta);
raw_spin_unlock_irqrestore(&ch->lock, flags);
}
static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id)
{
struct sh_cmt_channel *ch = dev_id;
/* clear flags */
sh_cmt_write_cmcsr(ch, sh_cmt_read_cmcsr(ch) &
ch->cmt->info->clear_bits);
/* update clock source counter to begin with if enabled
* the wrap flag should be cleared by the timer specific
* isr before we end up here.
*/
if (ch->flags & FLAG_CLOCKSOURCE)
ch->total_cycles += ch->match_value + 1;
if (!(ch->flags & FLAG_REPROGRAM))
ch->next_match_value = ch->max_match_value;
ch->flags |= FLAG_IRQCONTEXT;
if (ch->flags & FLAG_CLOCKEVENT) {
if (!(ch->flags & FLAG_SKIPEVENT)) {
if (clockevent_state_oneshot(&ch->ced)) {
ch->next_match_value = ch->max_match_value;
ch->flags |= FLAG_REPROGRAM;
}
ch->ced.event_handler(&ch->ced);
}
}
ch->flags &= ~FLAG_SKIPEVENT;
if (ch->flags & FLAG_REPROGRAM) {
ch->flags &= ~FLAG_REPROGRAM;
sh_cmt_clock_event_program_verify(ch, 1);
if (ch->flags & FLAG_CLOCKEVENT)
if ((clockevent_state_shutdown(&ch->ced))
|| (ch->match_value == ch->next_match_value))
ch->flags &= ~FLAG_REPROGRAM;
}
ch->flags &= ~FLAG_IRQCONTEXT;
return IRQ_HANDLED;
}
static int sh_cmt_start(struct sh_cmt_channel *ch, unsigned long flag)
{
int ret = 0;
unsigned long flags;
if (flag & FLAG_CLOCKSOURCE)
pm_runtime_get_sync(&ch->cmt->pdev->dev);
raw_spin_lock_irqsave(&ch->lock, flags);
if (!(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) {
if (flag & FLAG_CLOCKEVENT)
pm_runtime_get_sync(&ch->cmt->pdev->dev);
ret = sh_cmt_enable(ch);
}
if (ret)
goto out;
ch->flags |= flag;
/* setup timeout if no clockevent */
if (ch->cmt->num_channels == 1 &&
flag == FLAG_CLOCKSOURCE && (!(ch->flags & FLAG_CLOCKEVENT)))
__sh_cmt_set_next(ch, ch->max_match_value);
out:
raw_spin_unlock_irqrestore(&ch->lock, flags);
return ret;
}
static void sh_cmt_stop(struct sh_cmt_channel *ch, unsigned long flag)
{
unsigned long flags;
unsigned long f;
raw_spin_lock_irqsave(&ch->lock, flags);
f = ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE);
ch->flags &= ~flag;
if (f && !(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) {
sh_cmt_disable(ch);
if (flag & FLAG_CLOCKEVENT)
pm_runtime_put(&ch->cmt->pdev->dev);
}
/* adjust the timeout to maximum if only clocksource left */
if ((flag == FLAG_CLOCKEVENT) && (ch->flags & FLAG_CLOCKSOURCE))
__sh_cmt_set_next(ch, ch->max_match_value);
raw_spin_unlock_irqrestore(&ch->lock, flags);
if (flag & FLAG_CLOCKSOURCE)
pm_runtime_put(&ch->cmt->pdev->dev);
}
static struct sh_cmt_channel *cs_to_sh_cmt(struct clocksource *cs)
{
return container_of(cs, struct sh_cmt_channel, cs);
}
static u64 sh_cmt_clocksource_read(struct clocksource *cs)
{
struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
u32 has_wrapped;
if (ch->cmt->num_channels == 1) {
unsigned long flags;
u64 value;
u32 raw;
raw_spin_lock_irqsave(&ch->lock, flags);
value = ch->total_cycles;
raw = sh_cmt_get_counter(ch, &has_wrapped);
if (unlikely(has_wrapped))
raw += ch->match_value + 1;
raw_spin_unlock_irqrestore(&ch->lock, flags);
return value + raw;
}
return sh_cmt_get_counter(ch, &has_wrapped);
}
static int sh_cmt_clocksource_enable(struct clocksource *cs)
{
int ret;
struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
WARN_ON(ch->cs_enabled);
ch->total_cycles = 0;
ret = sh_cmt_start(ch, FLAG_CLOCKSOURCE);
if (!ret)
ch->cs_enabled = true;
return ret;
}
static void sh_cmt_clocksource_disable(struct clocksource *cs)
{
struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
WARN_ON(!ch->cs_enabled);
sh_cmt_stop(ch, FLAG_CLOCKSOURCE);
ch->cs_enabled = false;
}
static void sh_cmt_clocksource_suspend(struct clocksource *cs)
{
struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
if (!ch->cs_enabled)
return;
sh_cmt_stop(ch, FLAG_CLOCKSOURCE);
dev_pm_genpd_suspend(&ch->cmt->pdev->dev);
}
static void sh_cmt_clocksource_resume(struct clocksource *cs)
{
struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
if (!ch->cs_enabled)
return;
dev_pm_genpd_resume(&ch->cmt->pdev->dev);
sh_cmt_start(ch, FLAG_CLOCKSOURCE);
}
static int sh_cmt_register_clocksource(struct sh_cmt_channel *ch,
const char *name)
{
struct clocksource *cs = &ch->cs;
cs->name = name;
cs->rating = 125;
cs->read = sh_cmt_clocksource_read;
cs->enable = sh_cmt_clocksource_enable;
cs->disable = sh_cmt_clocksource_disable;
cs->suspend = sh_cmt_clocksource_suspend;
cs->resume = sh_cmt_clocksource_resume;
cs->mask = CLOCKSOURCE_MASK(ch->cmt->info->width);
cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
dev_info(&ch->cmt->pdev->dev, "ch%u: used as clock source\n",
ch->index);
clocksource_register_hz(cs, ch->cmt->rate);
return 0;
}
static struct sh_cmt_channel *ced_to_sh_cmt(struct clock_event_device *ced)
{
return container_of(ced, struct sh_cmt_channel, ced);
}
static void sh_cmt_clock_event_start(struct sh_cmt_channel *ch, int periodic)
{
sh_cmt_start(ch, FLAG_CLOCKEVENT);
if (periodic)
sh_cmt_set_next(ch, ((ch->cmt->rate + HZ/2) / HZ) - 1);
else
sh_cmt_set_next(ch, ch->max_match_value);
}
static int sh_cmt_clock_event_shutdown(struct clock_event_device *ced)
{
struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
sh_cmt_stop(ch, FLAG_CLOCKEVENT);
return 0;
}
static int sh_cmt_clock_event_set_state(struct clock_event_device *ced,
int periodic)
{
struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
/* deal with old setting first */
if (clockevent_state_oneshot(ced) || clockevent_state_periodic(ced))
sh_cmt_stop(ch, FLAG_CLOCKEVENT);
dev_info(&ch->cmt->pdev->dev, "ch%u: used for %s clock events\n",
ch->index, periodic ? "periodic" : "oneshot");
sh_cmt_clock_event_start(ch, periodic);
return 0;
}
static int sh_cmt_clock_event_set_oneshot(struct clock_event_device *ced)
{
return sh_cmt_clock_event_set_state(ced, 0);
}
static int sh_cmt_clock_event_set_periodic(struct clock_event_device *ced)
{
return sh_cmt_clock_event_set_state(ced, 1);
}
static int sh_cmt_clock_event_next(unsigned long delta,
struct clock_event_device *ced)
{
struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
BUG_ON(!clockevent_state_oneshot(ced));
if (likely(ch->flags & FLAG_IRQCONTEXT))
ch->next_match_value = delta - 1;
else
sh_cmt_set_next(ch, delta - 1);
return 0;
}
static void sh_cmt_clock_event_suspend(struct clock_event_device *ced)
{
struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
dev_pm_genpd_suspend(&ch->cmt->pdev->dev);
clk_unprepare(ch->cmt->clk);
}
static void sh_cmt_clock_event_resume(struct clock_event_device *ced)
{
struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
clk_prepare(ch->cmt->clk);
dev_pm_genpd_resume(&ch->cmt->pdev->dev);
}
static int sh_cmt_register_clockevent(struct sh_cmt_channel *ch,
const char *name)
{
struct clock_event_device *ced = &ch->ced;
int irq;
int ret;
irq = platform_get_irq(ch->cmt->pdev, ch->index);
if (irq < 0)
return irq;
ret = request_irq(irq, sh_cmt_interrupt,
IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
dev_name(&ch->cmt->pdev->dev), ch);
if (ret) {
dev_err(&ch->cmt->pdev->dev, "ch%u: failed to request irq %d\n",
ch->index, irq);
return ret;
}
ced->name = name;
ced->features = CLOCK_EVT_FEAT_PERIODIC;
ced->features |= CLOCK_EVT_FEAT_ONESHOT;
ced->rating = 125;
ced->cpumask = cpu_possible_mask;
ced->set_next_event = sh_cmt_clock_event_next;
ced->set_state_shutdown = sh_cmt_clock_event_shutdown;
ced->set_state_periodic = sh_cmt_clock_event_set_periodic;
ced->set_state_oneshot = sh_cmt_clock_event_set_oneshot;
ced->suspend = sh_cmt_clock_event_suspend;
ced->resume = sh_cmt_clock_event_resume;
/* TODO: calculate good shift from rate and counter bit width */
ced->shift = 32;
ced->mult = div_sc(ch->cmt->rate, NSEC_PER_SEC, ced->shift);
ced->max_delta_ns = clockevent_delta2ns(ch->max_match_value, ced);
ced->max_delta_ticks = ch->max_match_value;
ced->min_delta_ns = clockevent_delta2ns(0x1f, ced);
ced->min_delta_ticks = 0x1f;
dev_info(&ch->cmt->pdev->dev, "ch%u: used for clock events\n",
ch->index);
clockevents_register_device(ced);
return 0;
}
static int sh_cmt_register(struct sh_cmt_channel *ch, const char *name,
bool clockevent, bool clocksource)
{
int ret;
if (clockevent) {
ch->cmt->has_clockevent = true;
ret = sh_cmt_register_clockevent(ch, name);
if (ret < 0)
return ret;
}
if (clocksource) {
ch->cmt->has_clocksource = true;
sh_cmt_register_clocksource(ch, name);
}
return 0;
}
static int sh_cmt_setup_channel(struct sh_cmt_channel *ch, unsigned int index,
unsigned int hwidx, bool clockevent,
bool clocksource, struct sh_cmt_device *cmt)
{
u32 value;
int ret;
/* Skip unused channels. */
if (!clockevent && !clocksource)
return 0;
ch->cmt = cmt;
ch->index = index;
ch->hwidx = hwidx;
ch->timer_bit = hwidx;
/*
* Compute the address of the channel control register block. For the
* timers with a per-channel start/stop register, compute its address
* as well.
*/
switch (cmt->info->model) {
case SH_CMT_16BIT:
ch->ioctrl = cmt->mapbase + 2 + ch->hwidx * 6;
break;
case SH_CMT_32BIT:
case SH_CMT_48BIT:
ch->ioctrl = cmt->mapbase + 0x10 + ch->hwidx * 0x10;
break;
case SH_CMT0_RCAR_GEN2:
case SH_CMT1_RCAR_GEN2:
ch->iostart = cmt->mapbase + ch->hwidx * 0x100;
ch->ioctrl = ch->iostart + 0x10;
ch->timer_bit = 0;
/* Enable the clock supply to the channel */
value = ioread32(cmt->mapbase + CMCLKE);
value |= BIT(hwidx);
iowrite32(value, cmt->mapbase + CMCLKE);
break;
}
if (cmt->info->width == (sizeof(ch->max_match_value) * 8))
ch->max_match_value = ~0;
else
ch->max_match_value = (1 << cmt->info->width) - 1;
ch->match_value = ch->max_match_value;
raw_spin_lock_init(&ch->lock);
ret = sh_cmt_register(ch, dev_name(&cmt->pdev->dev),
clockevent, clocksource);
if (ret) {
dev_err(&cmt->pdev->dev, "ch%u: registration failed\n",
ch->index);
return ret;
}
ch->cs_enabled = false;
return 0;
}
static int sh_cmt_map_memory(struct sh_cmt_device *cmt)
{
struct resource *mem;
mem = platform_get_resource(cmt->pdev, IORESOURCE_MEM, 0);
if (!mem) {
dev_err(&cmt->pdev->dev, "failed to get I/O memory\n");
return -ENXIO;
}
cmt->mapbase = ioremap(mem->start, resource_size(mem));
if (cmt->mapbase == NULL) {
dev_err(&cmt->pdev->dev, "failed to remap I/O memory\n");
return -ENXIO;
}
return 0;
}
static const struct platform_device_id sh_cmt_id_table[] = {
{ "sh-cmt-16", (kernel_ulong_t)&sh_cmt_info[SH_CMT_16BIT] },
{ "sh-cmt-32", (kernel_ulong_t)&sh_cmt_info[SH_CMT_32BIT] },
{ }
};
MODULE_DEVICE_TABLE(platform, sh_cmt_id_table);
static const struct of_device_id sh_cmt_of_table[] __maybe_unused = {
{
/* deprecated, preserved for backward compatibility */
.compatible = "renesas,cmt-48",
.data = &sh_cmt_info[SH_CMT_48BIT]
},
{
/* deprecated, preserved for backward compatibility */
.compatible = "renesas,cmt-48-gen2",
.data = &sh_cmt_info[SH_CMT0_RCAR_GEN2]
},
{
.compatible = "renesas,r8a7740-cmt1",
.data = &sh_cmt_info[SH_CMT_48BIT]
},
{
.compatible = "renesas,sh73a0-cmt1",
.data = &sh_cmt_info[SH_CMT_48BIT]
},
{
.compatible = "renesas,rcar-gen2-cmt0",
.data = &sh_cmt_info[SH_CMT0_RCAR_GEN2]
},
{
.compatible = "renesas,rcar-gen2-cmt1",
.data = &sh_cmt_info[SH_CMT1_RCAR_GEN2]
},
{
.compatible = "renesas,rcar-gen3-cmt0",
.data = &sh_cmt_info[SH_CMT0_RCAR_GEN2]
},
{
.compatible = "renesas,rcar-gen3-cmt1",
.data = &sh_cmt_info[SH_CMT1_RCAR_GEN2]
},
{
.compatible = "renesas,rcar-gen4-cmt0",
.data = &sh_cmt_info[SH_CMT0_RCAR_GEN2]
},
{
.compatible = "renesas,rcar-gen4-cmt1",
.data = &sh_cmt_info[SH_CMT1_RCAR_GEN2]
},
{ }
};
MODULE_DEVICE_TABLE(of, sh_cmt_of_table);
static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev)
{
unsigned int mask, i;
unsigned long rate;
int ret;
cmt->pdev = pdev;
raw_spin_lock_init(&cmt->lock);
if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) {
cmt->info = of_device_get_match_data(&pdev->dev);
cmt->hw_channels = cmt->info->channels_mask;
} else if (pdev->dev.platform_data) {
struct sh_timer_config *cfg = pdev->dev.platform_data;
const struct platform_device_id *id = pdev->id_entry;
cmt->info = (const struct sh_cmt_info *)id->driver_data;
cmt->hw_channels = cfg->channels_mask;
} else {
dev_err(&cmt->pdev->dev, "missing platform data\n");
return -ENXIO;
}
/* Get hold of clock. */
cmt->clk = clk_get(&cmt->pdev->dev, "fck");
if (IS_ERR(cmt->clk)) {
dev_err(&cmt->pdev->dev, "cannot get clock\n");
return PTR_ERR(cmt->clk);
}
ret = clk_prepare(cmt->clk);
if (ret < 0)
goto err_clk_put;
/* Determine clock rate. */
ret = clk_enable(cmt->clk);
if (ret < 0)
goto err_clk_unprepare;
rate = clk_get_rate(cmt->clk);
if (!rate) {
ret = -EINVAL;
goto err_clk_disable;
}
/* We shall wait 2 input clks after register writes */
if (cmt->info->model >= SH_CMT_48BIT)
cmt->reg_delay = DIV_ROUND_UP(2UL * USEC_PER_SEC, rate);
cmt->rate = rate / (cmt->info->width == 16 ? 512 : 8);
/* Map the memory resource(s). */
ret = sh_cmt_map_memory(cmt);
if (ret < 0)
goto err_clk_disable;
/* Allocate and setup the channels. */
cmt->num_channels = hweight8(cmt->hw_channels);
cmt->channels = kcalloc(cmt->num_channels, sizeof(*cmt->channels),
GFP_KERNEL);
if (cmt->channels == NULL) {
ret = -ENOMEM;
goto err_unmap;
}
/*
* Use the first channel as a clock event device and the second channel
* as a clock source. If only one channel is available use it for both.
*/
for (i = 0, mask = cmt->hw_channels; i < cmt->num_channels; ++i) {
unsigned int hwidx = ffs(mask) - 1;
bool clocksource = i == 1 || cmt->num_channels == 1;
bool clockevent = i == 0;
ret = sh_cmt_setup_channel(&cmt->channels[i], i, hwidx,
clockevent, clocksource, cmt);
if (ret < 0)
goto err_unmap;
mask &= ~(1 << hwidx);
}
clk_disable(cmt->clk);
platform_set_drvdata(pdev, cmt);
return 0;
err_unmap:
kfree(cmt->channels);
iounmap(cmt->mapbase);
err_clk_disable:
clk_disable(cmt->clk);
err_clk_unprepare:
clk_unprepare(cmt->clk);
err_clk_put:
clk_put(cmt->clk);
return ret;
}
static int sh_cmt_probe(struct platform_device *pdev)
{
struct sh_cmt_device *cmt = platform_get_drvdata(pdev);
int ret;
if (!is_sh_early_platform_device(pdev)) {
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
}
if (cmt) {
dev_info(&pdev->dev, "kept as earlytimer\n");
goto out;
}
cmt = kzalloc(sizeof(*cmt), GFP_KERNEL);
if (cmt == NULL)
return -ENOMEM;
ret = sh_cmt_setup(cmt, pdev);
if (ret) {
kfree(cmt);
pm_runtime_idle(&pdev->dev);
return ret;
}
if (is_sh_early_platform_device(pdev))
return 0;
out:
if (cmt->has_clockevent || cmt->has_clocksource)
pm_runtime_irq_safe(&pdev->dev);
else
pm_runtime_idle(&pdev->dev);
return 0;
}
static struct platform_driver sh_cmt_device_driver = {
.probe = sh_cmt_probe,
.driver = {
.name = "sh_cmt",
.of_match_table = of_match_ptr(sh_cmt_of_table),
.suppress_bind_attrs = true,
},
.id_table = sh_cmt_id_table,
};
static int __init sh_cmt_init(void)
{
return platform_driver_register(&sh_cmt_device_driver);
}
static void __exit sh_cmt_exit(void)
{
platform_driver_unregister(&sh_cmt_device_driver);
}
#ifdef CONFIG_SUPERH
sh_early_platform_init("earlytimer", &sh_cmt_device_driver);
#endif
subsys_initcall(sh_cmt_init);
module_exit(sh_cmt_exit);
MODULE_AUTHOR("Magnus Damm");
MODULE_DESCRIPTION("SuperH CMT Timer Driver");
|
linux-master
|
drivers/clocksource/sh_cmt.c
|
// SPDX-License-Identifier: GPL-2.0+
/*
* RDA8810PL SoC timer driver
*
* Copyright RDA Microelectronics Company Limited
* Copyright (c) 2017 Andreas Färber
* Copyright (c) 2018 Manivannan Sadhasivam
*
* RDA8810PL has two independent timers: OSTIMER (56 bit) and HWTIMER (64 bit).
* Each timer provides optional interrupt support. In this driver, OSTIMER is
* used for clockevents and HWTIMER is used for clocksource.
*/
#include <linux/init.h>
#include <linux/interrupt.h>
#include "timer-of.h"
#define RDA_OSTIMER_LOADVAL_L 0x000
#define RDA_OSTIMER_CTRL 0x004
#define RDA_HWTIMER_LOCKVAL_L 0x024
#define RDA_HWTIMER_LOCKVAL_H 0x028
#define RDA_TIMER_IRQ_MASK_SET 0x02c
#define RDA_TIMER_IRQ_MASK_CLR 0x030
#define RDA_TIMER_IRQ_CLR 0x034
#define RDA_OSTIMER_CTRL_ENABLE BIT(24)
#define RDA_OSTIMER_CTRL_REPEAT BIT(28)
#define RDA_OSTIMER_CTRL_LOAD BIT(30)
#define RDA_TIMER_IRQ_MASK_OSTIMER BIT(0)
#define RDA_TIMER_IRQ_CLR_OSTIMER BIT(0)
static int rda_ostimer_start(void __iomem *base, bool periodic, u64 cycles)
{
u32 ctrl, load_l;
load_l = (u32)cycles;
ctrl = ((cycles >> 32) & 0xffffff);
ctrl |= RDA_OSTIMER_CTRL_LOAD | RDA_OSTIMER_CTRL_ENABLE;
if (periodic)
ctrl |= RDA_OSTIMER_CTRL_REPEAT;
/* Enable ostimer interrupt first */
writel_relaxed(RDA_TIMER_IRQ_MASK_OSTIMER,
base + RDA_TIMER_IRQ_MASK_SET);
/* Write low 32 bits first, high 24 bits are with ctrl */
writel_relaxed(load_l, base + RDA_OSTIMER_LOADVAL_L);
writel_relaxed(ctrl, base + RDA_OSTIMER_CTRL);
return 0;
}
static int rda_ostimer_stop(void __iomem *base)
{
/* Disable ostimer interrupt first */
writel_relaxed(RDA_TIMER_IRQ_MASK_OSTIMER,
base + RDA_TIMER_IRQ_MASK_CLR);
writel_relaxed(0, base + RDA_OSTIMER_CTRL);
return 0;
}
static int rda_ostimer_set_state_shutdown(struct clock_event_device *evt)
{
struct timer_of *to = to_timer_of(evt);
rda_ostimer_stop(timer_of_base(to));
return 0;
}
static int rda_ostimer_set_state_oneshot(struct clock_event_device *evt)
{
struct timer_of *to = to_timer_of(evt);
rda_ostimer_stop(timer_of_base(to));
return 0;
}
static int rda_ostimer_set_state_periodic(struct clock_event_device *evt)
{
struct timer_of *to = to_timer_of(evt);
unsigned long cycles_per_jiffy;
rda_ostimer_stop(timer_of_base(to));
cycles_per_jiffy = ((unsigned long long)NSEC_PER_SEC / HZ *
evt->mult) >> evt->shift;
rda_ostimer_start(timer_of_base(to), true, cycles_per_jiffy);
return 0;
}
static int rda_ostimer_tick_resume(struct clock_event_device *evt)
{
return 0;
}
static int rda_ostimer_set_next_event(unsigned long evt,
struct clock_event_device *ev)
{
struct timer_of *to = to_timer_of(ev);
rda_ostimer_start(timer_of_base(to), false, evt);
return 0;
}
static irqreturn_t rda_ostimer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evt = dev_id;
struct timer_of *to = to_timer_of(evt);
/* clear timer int */
writel_relaxed(RDA_TIMER_IRQ_CLR_OSTIMER,
timer_of_base(to) + RDA_TIMER_IRQ_CLR);
if (evt->event_handler)
evt->event_handler(evt);
return IRQ_HANDLED;
}
static struct timer_of rda_ostimer_of = {
.flags = TIMER_OF_IRQ | TIMER_OF_BASE,
.clkevt = {
.name = "rda-ostimer",
.rating = 250,
.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT |
CLOCK_EVT_FEAT_DYNIRQ,
.set_state_shutdown = rda_ostimer_set_state_shutdown,
.set_state_oneshot = rda_ostimer_set_state_oneshot,
.set_state_periodic = rda_ostimer_set_state_periodic,
.tick_resume = rda_ostimer_tick_resume,
.set_next_event = rda_ostimer_set_next_event,
},
.of_base = {
.name = "rda-timer",
.index = 0,
},
.of_irq = {
.name = "ostimer",
.handler = rda_ostimer_interrupt,
.flags = IRQF_TIMER,
},
};
static u64 rda_hwtimer_read(struct clocksource *cs)
{
void __iomem *base = timer_of_base(&rda_ostimer_of);
u32 lo, hi;
/* Always read low 32 bits first */
do {
lo = readl_relaxed(base + RDA_HWTIMER_LOCKVAL_L);
hi = readl_relaxed(base + RDA_HWTIMER_LOCKVAL_H);
} while (hi != readl_relaxed(base + RDA_HWTIMER_LOCKVAL_H));
return ((u64)hi << 32) | lo;
}
static struct clocksource rda_hwtimer_clocksource = {
.name = "rda-timer",
.rating = 400,
.read = rda_hwtimer_read,
.mask = CLOCKSOURCE_MASK(64),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static int __init rda_timer_init(struct device_node *np)
{
unsigned long rate = 2000000;
int ret;
ret = timer_of_init(np, &rda_ostimer_of);
if (ret)
return ret;
clocksource_register_hz(&rda_hwtimer_clocksource, rate);
clockevents_config_and_register(&rda_ostimer_of.clkevt, rate,
0x2, UINT_MAX);
return 0;
}
TIMER_OF_DECLARE(rda8810pl, "rda,8810pl-timer", rda_timer_init);
|
linux-master
|
drivers/clocksource/timer-rda.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* arch/arm/mach-pxa/time.c
*
* PXA clocksource, clockevents, and OST interrupt handlers.
* Copyright (c) 2007 by Bill Gatliff <[email protected]>.
*
* Derived from Nicolas Pitre's PXA timer handler Copyright (c) 2001
* by MontaVista Software, Inc. (Nico, your code rocks!)
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/clk.h>
#include <linux/clockchips.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/sched/clock.h>
#include <linux/sched_clock.h>
#include <clocksource/pxa.h>
#include <asm/div64.h>
#define OSMR0 0x00 /* OS Timer 0 Match Register */
#define OSMR1 0x04 /* OS Timer 1 Match Register */
#define OSMR2 0x08 /* OS Timer 2 Match Register */
#define OSMR3 0x0C /* OS Timer 3 Match Register */
#define OSCR 0x10 /* OS Timer Counter Register */
#define OSSR 0x14 /* OS Timer Status Register */
#define OWER 0x18 /* OS Timer Watchdog Enable Register */
#define OIER 0x1C /* OS Timer Interrupt Enable Register */
#define OSSR_M3 (1 << 3) /* Match status channel 3 */
#define OSSR_M2 (1 << 2) /* Match status channel 2 */
#define OSSR_M1 (1 << 1) /* Match status channel 1 */
#define OSSR_M0 (1 << 0) /* Match status channel 0 */
#define OIER_E0 (1 << 0) /* Interrupt enable channel 0 */
/*
* This is PXA's sched_clock implementation. This has a resolution
* of at least 308 ns and a maximum value of 208 days.
*
* The return value is guaranteed to be monotonic in that range as
* long as there is always less than 582 seconds between successive
* calls to sched_clock() which should always be the case in practice.
*/
#define timer_readl(reg) readl_relaxed(timer_base + (reg))
#define timer_writel(val, reg) writel_relaxed((val), timer_base + (reg))
static void __iomem *timer_base;
static u64 notrace pxa_read_sched_clock(void)
{
return timer_readl(OSCR);
}
#define MIN_OSCR_DELTA 16
static irqreturn_t
pxa_ost0_interrupt(int irq, void *dev_id)
{
struct clock_event_device *c = dev_id;
/* Disarm the compare/match, signal the event. */
timer_writel(timer_readl(OIER) & ~OIER_E0, OIER);
timer_writel(OSSR_M0, OSSR);
c->event_handler(c);
return IRQ_HANDLED;
}
static int
pxa_osmr0_set_next_event(unsigned long delta, struct clock_event_device *dev)
{
unsigned long next, oscr;
timer_writel(timer_readl(OIER) | OIER_E0, OIER);
next = timer_readl(OSCR) + delta;
timer_writel(next, OSMR0);
oscr = timer_readl(OSCR);
return (signed)(next - oscr) <= MIN_OSCR_DELTA ? -ETIME : 0;
}
static int pxa_osmr0_shutdown(struct clock_event_device *evt)
{
/* initializing, released, or preparing for suspend */
timer_writel(timer_readl(OIER) & ~OIER_E0, OIER);
timer_writel(OSSR_M0, OSSR);
return 0;
}
#ifdef CONFIG_PM
static unsigned long osmr[4], oier, oscr;
static void pxa_timer_suspend(struct clock_event_device *cedev)
{
osmr[0] = timer_readl(OSMR0);
osmr[1] = timer_readl(OSMR1);
osmr[2] = timer_readl(OSMR2);
osmr[3] = timer_readl(OSMR3);
oier = timer_readl(OIER);
oscr = timer_readl(OSCR);
}
static void pxa_timer_resume(struct clock_event_device *cedev)
{
/*
* Ensure that we have at least MIN_OSCR_DELTA between match
* register 0 and the OSCR, to guarantee that we will receive
* the one-shot timer interrupt. We adjust OSMR0 in preference
* to OSCR to guarantee that OSCR is monotonically incrementing.
*/
if (osmr[0] - oscr < MIN_OSCR_DELTA)
osmr[0] += MIN_OSCR_DELTA;
timer_writel(osmr[0], OSMR0);
timer_writel(osmr[1], OSMR1);
timer_writel(osmr[2], OSMR2);
timer_writel(osmr[3], OSMR3);
timer_writel(oier, OIER);
timer_writel(oscr, OSCR);
}
#else
#define pxa_timer_suspend NULL
#define pxa_timer_resume NULL
#endif
static struct clock_event_device ckevt_pxa_osmr0 = {
.name = "osmr0",
.features = CLOCK_EVT_FEAT_ONESHOT,
.rating = 200,
.set_next_event = pxa_osmr0_set_next_event,
.set_state_shutdown = pxa_osmr0_shutdown,
.set_state_oneshot = pxa_osmr0_shutdown,
.suspend = pxa_timer_suspend,
.resume = pxa_timer_resume,
};
static int __init pxa_timer_common_init(int irq, unsigned long clock_tick_rate)
{
int ret;
timer_writel(0, OIER);
timer_writel(OSSR_M0 | OSSR_M1 | OSSR_M2 | OSSR_M3, OSSR);
sched_clock_register(pxa_read_sched_clock, 32, clock_tick_rate);
ckevt_pxa_osmr0.cpumask = cpumask_of(0);
ret = request_irq(irq, pxa_ost0_interrupt, IRQF_TIMER | IRQF_IRQPOLL,
"ost0", &ckevt_pxa_osmr0);
if (ret) {
pr_err("Failed to setup irq\n");
return ret;
}
ret = clocksource_mmio_init(timer_base + OSCR, "oscr0", clock_tick_rate, 200,
32, clocksource_mmio_readl_up);
if (ret) {
pr_err("Failed to init clocksource\n");
return ret;
}
clockevents_config_and_register(&ckevt_pxa_osmr0, clock_tick_rate,
MIN_OSCR_DELTA * 2, 0x7fffffff);
return 0;
}
static int __init pxa_timer_dt_init(struct device_node *np)
{
struct clk *clk;
int irq, ret;
/* timer registers are shared with watchdog timer */
timer_base = of_iomap(np, 0);
if (!timer_base) {
pr_err("%pOFn: unable to map resource\n", np);
return -ENXIO;
}
clk = of_clk_get(np, 0);
if (IS_ERR(clk)) {
pr_crit("%pOFn: unable to get clk\n", np);
return PTR_ERR(clk);
}
ret = clk_prepare_enable(clk);
if (ret) {
pr_crit("Failed to prepare clock\n");
return ret;
}
/* we are only interested in OS-timer0 irq */
irq = irq_of_parse_and_map(np, 0);
if (irq <= 0) {
pr_crit("%pOFn: unable to parse OS-timer0 irq\n", np);
return -EINVAL;
}
return pxa_timer_common_init(irq, clk_get_rate(clk));
}
TIMER_OF_DECLARE(pxa_timer, "marvell,pxa-timer", pxa_timer_dt_init);
/*
* Legacy timer init for non device-tree boards.
*/
void __init pxa_timer_nodt_init(int irq, void __iomem *base)
{
struct clk *clk;
timer_base = base;
clk = clk_get(NULL, "OSTIMER0");
if (clk && !IS_ERR(clk)) {
clk_prepare_enable(clk);
pxa_timer_common_init(irq, clk_get_rate(clk));
} else {
pr_crit("%s: unable to get clk\n", __func__);
}
}
|
linux-master
|
drivers/clocksource/timer-pxa.c
|
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2012 Broadcom Corporation
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/jiffies.h>
#include <linux/clockchips.h>
#include <linux/types.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#define KONA_GPTIMER_STCS_OFFSET 0x00000000
#define KONA_GPTIMER_STCLO_OFFSET 0x00000004
#define KONA_GPTIMER_STCHI_OFFSET 0x00000008
#define KONA_GPTIMER_STCM0_OFFSET 0x0000000C
#define KONA_GPTIMER_STCS_TIMER_MATCH_SHIFT 0
#define KONA_GPTIMER_STCS_COMPARE_ENABLE_SHIFT 4
struct kona_bcm_timers {
int tmr_irq;
void __iomem *tmr_regs;
};
static struct kona_bcm_timers timers;
static u32 arch_timer_rate;
/*
* We use the peripheral timers for system tick, the cpu global timer for
* profile tick
*/
static void kona_timer_disable_and_clear(void __iomem *base)
{
uint32_t reg;
/*
* clear and disable interrupts
* We are using compare/match register 0 for our system interrupts
*/
reg = readl(base + KONA_GPTIMER_STCS_OFFSET);
/* Clear compare (0) interrupt */
reg |= 1 << KONA_GPTIMER_STCS_TIMER_MATCH_SHIFT;
/* disable compare */
reg &= ~(1 << KONA_GPTIMER_STCS_COMPARE_ENABLE_SHIFT);
writel(reg, base + KONA_GPTIMER_STCS_OFFSET);
}
static int
kona_timer_get_counter(void __iomem *timer_base, uint32_t *msw, uint32_t *lsw)
{
int loop_limit = 3;
/*
* Read 64-bit free running counter
* 1. Read hi-word
* 2. Read low-word
* 3. Read hi-word again
* 4.1
* if new hi-word is not equal to previously read hi-word, then
* start from #1
* 4.2
* if new hi-word is equal to previously read hi-word then stop.
*/
do {
*msw = readl(timer_base + KONA_GPTIMER_STCHI_OFFSET);
*lsw = readl(timer_base + KONA_GPTIMER_STCLO_OFFSET);
if (*msw == readl(timer_base + KONA_GPTIMER_STCHI_OFFSET))
break;
} while (--loop_limit);
if (!loop_limit) {
pr_err("bcm_kona_timer: getting counter failed.\n");
pr_err(" Timer will be impacted\n");
return -ETIMEDOUT;
}
return 0;
}
static int kona_timer_set_next_event(unsigned long clc,
struct clock_event_device *unused)
{
/*
* timer (0) is disabled by the timer interrupt already
* so, here we reload the next event value and re-enable
* the timer.
*
* This way, we are potentially losing the time between
* timer-interrupt->set_next_event. CPU local timers, when
* they come in should get rid of skew.
*/
uint32_t lsw, msw;
uint32_t reg;
int ret;
ret = kona_timer_get_counter(timers.tmr_regs, &msw, &lsw);
if (ret)
return ret;
/* Load the "next" event tick value */
writel(lsw + clc, timers.tmr_regs + KONA_GPTIMER_STCM0_OFFSET);
/* Enable compare */
reg = readl(timers.tmr_regs + KONA_GPTIMER_STCS_OFFSET);
reg |= (1 << KONA_GPTIMER_STCS_COMPARE_ENABLE_SHIFT);
writel(reg, timers.tmr_regs + KONA_GPTIMER_STCS_OFFSET);
return 0;
}
static int kona_timer_shutdown(struct clock_event_device *evt)
{
kona_timer_disable_and_clear(timers.tmr_regs);
return 0;
}
static struct clock_event_device kona_clockevent_timer = {
.name = "timer 1",
.features = CLOCK_EVT_FEAT_ONESHOT,
.set_next_event = kona_timer_set_next_event,
.set_state_shutdown = kona_timer_shutdown,
.tick_resume = kona_timer_shutdown,
};
static void __init kona_timer_clockevents_init(void)
{
kona_clockevent_timer.cpumask = cpumask_of(0);
clockevents_config_and_register(&kona_clockevent_timer,
arch_timer_rate, 6, 0xffffffff);
}
static irqreturn_t kona_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evt = &kona_clockevent_timer;
kona_timer_disable_and_clear(timers.tmr_regs);
evt->event_handler(evt);
return IRQ_HANDLED;
}
static int __init kona_timer_init(struct device_node *node)
{
u32 freq;
struct clk *external_clk;
external_clk = of_clk_get_by_name(node, NULL);
if (!IS_ERR(external_clk)) {
arch_timer_rate = clk_get_rate(external_clk);
clk_prepare_enable(external_clk);
} else if (!of_property_read_u32(node, "clock-frequency", &freq)) {
arch_timer_rate = freq;
} else {
pr_err("Kona Timer v1 unable to determine clock-frequency\n");
return -EINVAL;
}
/* Setup IRQ numbers */
timers.tmr_irq = irq_of_parse_and_map(node, 0);
/* Setup IO addresses */
timers.tmr_regs = of_iomap(node, 0);
kona_timer_disable_and_clear(timers.tmr_regs);
kona_timer_clockevents_init();
if (request_irq(timers.tmr_irq, kona_timer_interrupt, IRQF_TIMER,
"Kona Timer Tick", NULL))
pr_err("%s: request_irq() failed\n", "Kona Timer Tick");
kona_timer_set_next_event((arch_timer_rate / HZ), NULL);
return 0;
}
TIMER_OF_DECLARE(brcm_kona, "brcm,kona-timer", kona_timer_init);
/*
* bcm,kona-timer is deprecated by brcm,kona-timer
* being kept here for driver compatibility
*/
TIMER_OF_DECLARE(bcm_kona, "bcm,kona-timer", kona_timer_init);
|
linux-master
|
drivers/clocksource/bcm_kona_timer.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* i8253 PIT clocksource
*/
#include <linux/clockchips.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/spinlock.h>
#include <linux/timex.h>
#include <linux/module.h>
#include <linux/i8253.h>
#include <linux/smp.h>
/*
* Protects access to I/O ports
*
* 0040-0043 : timer0, i8253 / i8254
* 0061-0061 : NMI Control Register which contains two speaker control bits.
*/
DEFINE_RAW_SPINLOCK(i8253_lock);
EXPORT_SYMBOL(i8253_lock);
/*
* Handle PIT quirk in pit_shutdown() where zeroing the counter register
* restarts the PIT, negating the shutdown. On platforms with the quirk,
* platform specific code can set this to false.
*/
bool i8253_clear_counter_on_shutdown __ro_after_init = true;
#ifdef CONFIG_CLKSRC_I8253
/*
* Since the PIT overflows every tick, its not very useful
* to just read by itself. So use jiffies to emulate a free
* running counter:
*/
static u64 i8253_read(struct clocksource *cs)
{
static int old_count;
static u32 old_jifs;
unsigned long flags;
int count;
u32 jifs;
raw_spin_lock_irqsave(&i8253_lock, flags);
/*
* Although our caller may have the read side of jiffies_lock,
* this is now a seqlock, and we are cheating in this routine
* by having side effects on state that we cannot undo if
* there is a collision on the seqlock and our caller has to
* retry. (Namely, old_jifs and old_count.) So we must treat
* jiffies as volatile despite the lock. We read jiffies
* before latching the timer count to guarantee that although
* the jiffies value might be older than the count (that is,
* the counter may underflow between the last point where
* jiffies was incremented and the point where we latch the
* count), it cannot be newer.
*/
jifs = jiffies;
outb_p(0x00, PIT_MODE); /* latch the count ASAP */
count = inb_p(PIT_CH0); /* read the latched count */
count |= inb_p(PIT_CH0) << 8;
/* VIA686a test code... reset the latch if count > max + 1 */
if (count > PIT_LATCH) {
outb_p(0x34, PIT_MODE);
outb_p(PIT_LATCH & 0xff, PIT_CH0);
outb_p(PIT_LATCH >> 8, PIT_CH0);
count = PIT_LATCH - 1;
}
/*
* It's possible for count to appear to go the wrong way for a
* couple of reasons:
*
* 1. The timer counter underflows, but we haven't handled the
* resulting interrupt and incremented jiffies yet.
* 2. Hardware problem with the timer, not giving us continuous time,
* the counter does small "jumps" upwards on some Pentium systems,
* (see c't 95/10 page 335 for Neptun bug.)
*
* Previous attempts to handle these cases intelligently were
* buggy, so we just do the simple thing now.
*/
if (count > old_count && jifs == old_jifs)
count = old_count;
old_count = count;
old_jifs = jifs;
raw_spin_unlock_irqrestore(&i8253_lock, flags);
count = (PIT_LATCH - 1) - count;
return (u64)(jifs * PIT_LATCH) + count;
}
static struct clocksource i8253_cs = {
.name = "pit",
.rating = 110,
.read = i8253_read,
.mask = CLOCKSOURCE_MASK(32),
};
int __init clocksource_i8253_init(void)
{
return clocksource_register_hz(&i8253_cs, PIT_TICK_RATE);
}
#endif
#ifdef CONFIG_CLKEVT_I8253
static int pit_shutdown(struct clock_event_device *evt)
{
if (!clockevent_state_oneshot(evt) && !clockevent_state_periodic(evt))
return 0;
raw_spin_lock(&i8253_lock);
outb_p(0x30, PIT_MODE);
if (i8253_clear_counter_on_shutdown) {
outb_p(0, PIT_CH0);
outb_p(0, PIT_CH0);
}
raw_spin_unlock(&i8253_lock);
return 0;
}
static int pit_set_oneshot(struct clock_event_device *evt)
{
raw_spin_lock(&i8253_lock);
outb_p(0x38, PIT_MODE);
raw_spin_unlock(&i8253_lock);
return 0;
}
static int pit_set_periodic(struct clock_event_device *evt)
{
raw_spin_lock(&i8253_lock);
/* binary, mode 2, LSB/MSB, ch 0 */
outb_p(0x34, PIT_MODE);
outb_p(PIT_LATCH & 0xff, PIT_CH0); /* LSB */
outb_p(PIT_LATCH >> 8, PIT_CH0); /* MSB */
raw_spin_unlock(&i8253_lock);
return 0;
}
/*
* Program the next event in oneshot mode
*
* Delta is given in PIT ticks
*/
static int pit_next_event(unsigned long delta, struct clock_event_device *evt)
{
raw_spin_lock(&i8253_lock);
outb_p(delta & 0xff , PIT_CH0); /* LSB */
outb_p(delta >> 8 , PIT_CH0); /* MSB */
raw_spin_unlock(&i8253_lock);
return 0;
}
/*
* On UP the PIT can serve all of the possible timer functions. On SMP systems
* it can be solely used for the global tick.
*/
struct clock_event_device i8253_clockevent = {
.name = "pit",
.features = CLOCK_EVT_FEAT_PERIODIC,
.set_state_shutdown = pit_shutdown,
.set_state_periodic = pit_set_periodic,
.set_next_event = pit_next_event,
};
/*
* Initialize the conversion factor and the min/max deltas of the clock event
* structure and register the clock event source with the framework.
*/
void __init clockevent_i8253_init(bool oneshot)
{
if (oneshot) {
i8253_clockevent.features |= CLOCK_EVT_FEAT_ONESHOT;
i8253_clockevent.set_state_oneshot = pit_set_oneshot;
}
/*
* Start pit with the boot cpu mask. x86 might make it global
* when it is used as broadcast device later.
*/
i8253_clockevent.cpumask = cpumask_of(smp_processor_id());
clockevents_config_and_register(&i8253_clockevent, PIT_TICK_RATE,
0xF, 0x7FFF);
}
#endif
|
linux-master
|
drivers/clocksource/i8253.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) Maxime Coquelin 2015
* Author: Maxime Coquelin <[email protected]>
*
* Inspired by time-efm32.c from Uwe Kleine-Koenig
*/
#include <linux/kernel.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/clk.h>
#include <linux/reset.h>
#include <linux/sched_clock.h>
#include <linux/slab.h>
#include "timer-of.h"
#define TIM_CR1 0x00
#define TIM_DIER 0x0c
#define TIM_SR 0x10
#define TIM_EGR 0x14
#define TIM_CNT 0x24
#define TIM_PSC 0x28
#define TIM_ARR 0x2c
#define TIM_CCR1 0x34
#define TIM_CR1_CEN BIT(0)
#define TIM_CR1_UDIS BIT(1)
#define TIM_CR1_OPM BIT(3)
#define TIM_CR1_ARPE BIT(7)
#define TIM_DIER_UIE BIT(0)
#define TIM_DIER_CC1IE BIT(1)
#define TIM_SR_UIF BIT(0)
#define TIM_EGR_UG BIT(0)
#define TIM_PSC_MAX USHRT_MAX
#define TIM_PSC_CLKRATE 10000
struct stm32_timer_private {
int bits;
};
/**
* stm32_timer_of_bits_set - set accessor helper
* @to: a timer_of structure pointer
* @bits: the number of bits (16 or 32)
*
* Accessor helper to set the number of bits in the timer-of private
* structure.
*
*/
static void stm32_timer_of_bits_set(struct timer_of *to, int bits)
{
struct stm32_timer_private *pd = to->private_data;
pd->bits = bits;
}
/**
* stm32_timer_of_bits_get - get accessor helper
* @to: a timer_of structure pointer
*
* Accessor helper to get the number of bits in the timer-of private
* structure.
*
* Returns an integer corresponding to the number of bits.
*/
static int stm32_timer_of_bits_get(struct timer_of *to)
{
struct stm32_timer_private *pd = to->private_data;
return pd->bits;
}
static void __iomem *stm32_timer_cnt __read_mostly;
static u64 notrace stm32_read_sched_clock(void)
{
return readl_relaxed(stm32_timer_cnt);
}
static struct delay_timer stm32_timer_delay;
static unsigned long stm32_read_delay(void)
{
return readl_relaxed(stm32_timer_cnt);
}
static void stm32_clock_event_disable(struct timer_of *to)
{
writel_relaxed(0, timer_of_base(to) + TIM_DIER);
}
/**
* stm32_timer_start - Start the counter without event
* @to: a timer_of structure pointer
*
* Start the timer in order to have the counter reset and start
* incrementing but disable interrupt event when there is a counter
* overflow. By default, the counter direction is used as upcounter.
*/
static void stm32_timer_start(struct timer_of *to)
{
writel_relaxed(TIM_CR1_UDIS | TIM_CR1_CEN, timer_of_base(to) + TIM_CR1);
}
static int stm32_clock_event_shutdown(struct clock_event_device *clkevt)
{
struct timer_of *to = to_timer_of(clkevt);
stm32_clock_event_disable(to);
return 0;
}
static int stm32_clock_event_set_next_event(unsigned long evt,
struct clock_event_device *clkevt)
{
struct timer_of *to = to_timer_of(clkevt);
unsigned long now, next;
next = readl_relaxed(timer_of_base(to) + TIM_CNT) + evt;
writel_relaxed(next, timer_of_base(to) + TIM_CCR1);
now = readl_relaxed(timer_of_base(to) + TIM_CNT);
if ((next - now) > evt)
return -ETIME;
writel_relaxed(TIM_DIER_CC1IE, timer_of_base(to) + TIM_DIER);
return 0;
}
static int stm32_clock_event_set_periodic(struct clock_event_device *clkevt)
{
struct timer_of *to = to_timer_of(clkevt);
stm32_timer_start(to);
return stm32_clock_event_set_next_event(timer_of_period(to), clkevt);
}
static int stm32_clock_event_set_oneshot(struct clock_event_device *clkevt)
{
struct timer_of *to = to_timer_of(clkevt);
stm32_timer_start(to);
return 0;
}
static irqreturn_t stm32_clock_event_handler(int irq, void *dev_id)
{
struct clock_event_device *clkevt = (struct clock_event_device *)dev_id;
struct timer_of *to = to_timer_of(clkevt);
writel_relaxed(0, timer_of_base(to) + TIM_SR);
if (clockevent_state_periodic(clkevt))
stm32_clock_event_set_periodic(clkevt);
else
stm32_clock_event_shutdown(clkevt);
clkevt->event_handler(clkevt);
return IRQ_HANDLED;
}
/**
* stm32_timer_width - Sort out the timer width (32/16)
* @to: a pointer to a timer-of structure
*
* Write the 32-bit max value and read/return the result. If the timer
* is 32 bits wide, the result will be UINT_MAX, otherwise it will
* be truncated by the 16-bit register to USHRT_MAX.
*
*/
static void __init stm32_timer_set_width(struct timer_of *to)
{
u32 width;
writel_relaxed(UINT_MAX, timer_of_base(to) + TIM_ARR);
width = readl_relaxed(timer_of_base(to) + TIM_ARR);
stm32_timer_of_bits_set(to, width == UINT_MAX ? 32 : 16);
}
/**
* stm32_timer_set_prescaler - Compute and set the prescaler register
* @to: a pointer to a timer-of structure
*
* Depending on the timer width, compute the prescaler to always
* target a 10MHz timer rate for 16 bits. 32-bit timers are
* considered precise and long enough to not use the prescaler.
*/
static void __init stm32_timer_set_prescaler(struct timer_of *to)
{
int prescaler = 1;
if (stm32_timer_of_bits_get(to) != 32) {
prescaler = DIV_ROUND_CLOSEST(timer_of_rate(to),
TIM_PSC_CLKRATE);
/*
* The prescaler register is an u16, the variable
* can't be greater than TIM_PSC_MAX, let's cap it in
* this case.
*/
prescaler = prescaler < TIM_PSC_MAX ? prescaler : TIM_PSC_MAX;
}
writel_relaxed(prescaler - 1, timer_of_base(to) + TIM_PSC);
writel_relaxed(TIM_EGR_UG, timer_of_base(to) + TIM_EGR);
writel_relaxed(0, timer_of_base(to) + TIM_SR);
/* Adjust rate and period given the prescaler value */
to->of_clk.rate = DIV_ROUND_CLOSEST(to->of_clk.rate, prescaler);
to->of_clk.period = DIV_ROUND_UP(to->of_clk.rate, HZ);
}
static int __init stm32_clocksource_init(struct timer_of *to)
{
u32 bits = stm32_timer_of_bits_get(to);
const char *name = to->np->full_name;
/*
* This driver allows to register several timers and relies on
* the generic time framework to select the right one.
* However, nothing allows to do the same for the
* sched_clock. We are not interested in a sched_clock for the
* 16-bit timers but only for the 32-bit one, so if no 32-bit
* timer is registered yet, we select this 32-bit timer as a
* sched_clock.
*/
if (bits == 32 && !stm32_timer_cnt) {
/*
* Start immediately the counter as we will be using
* it right after.
*/
stm32_timer_start(to);
stm32_timer_cnt = timer_of_base(to) + TIM_CNT;
sched_clock_register(stm32_read_sched_clock, bits, timer_of_rate(to));
pr_info("%s: STM32 sched_clock registered\n", name);
stm32_timer_delay.read_current_timer = stm32_read_delay;
stm32_timer_delay.freq = timer_of_rate(to);
register_current_timer_delay(&stm32_timer_delay);
pr_info("%s: STM32 delay timer registered\n", name);
}
return clocksource_mmio_init(timer_of_base(to) + TIM_CNT, name,
timer_of_rate(to), bits == 32 ? 250 : 100,
bits, clocksource_mmio_readl_up);
}
static void __init stm32_clockevent_init(struct timer_of *to)
{
u32 bits = stm32_timer_of_bits_get(to);
to->clkevt.name = to->np->full_name;
to->clkevt.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
to->clkevt.set_state_shutdown = stm32_clock_event_shutdown;
to->clkevt.set_state_periodic = stm32_clock_event_set_periodic;
to->clkevt.set_state_oneshot = stm32_clock_event_set_oneshot;
to->clkevt.tick_resume = stm32_clock_event_shutdown;
to->clkevt.set_next_event = stm32_clock_event_set_next_event;
to->clkevt.rating = bits == 32 ? 250 : 100;
clockevents_config_and_register(&to->clkevt, timer_of_rate(to), 0x1,
(1 << bits) - 1);
pr_info("%pOF: STM32 clockevent driver initialized (%d bits)\n",
to->np, bits);
}
static int __init stm32_timer_init(struct device_node *node)
{
struct reset_control *rstc;
struct timer_of *to;
int ret;
to = kzalloc(sizeof(*to), GFP_KERNEL);
if (!to)
return -ENOMEM;
to->flags = TIMER_OF_IRQ | TIMER_OF_CLOCK | TIMER_OF_BASE;
to->of_irq.handler = stm32_clock_event_handler;
ret = timer_of_init(node, to);
if (ret)
goto err;
to->private_data = kzalloc(sizeof(struct stm32_timer_private),
GFP_KERNEL);
if (!to->private_data) {
ret = -ENOMEM;
goto deinit;
}
rstc = of_reset_control_get(node, NULL);
if (!IS_ERR(rstc)) {
reset_control_assert(rstc);
reset_control_deassert(rstc);
}
stm32_timer_set_width(to);
stm32_timer_set_prescaler(to);
ret = stm32_clocksource_init(to);
if (ret)
goto deinit;
stm32_clockevent_init(to);
return 0;
deinit:
timer_of_cleanup(to);
err:
kfree(to);
return ret;
}
TIMER_OF_DECLARE(stm32, "st,stm32-timer", stm32_timer_init);
|
linux-master
|
drivers/clocksource/timer-stm32.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* MediaTek SoCs CPUX General Purpose Timer handling
*
* Based on timer-mediatek.c:
* Copyright (C) 2014 Matthias Brugger <[email protected]>
*
* Copyright (C) 2022 Collabora Ltd.
* AngeloGioacchino Del Regno <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/clockchips.h>
#include <linux/clocksource.h>
#include <linux/interrupt.h>
#include <linux/irqreturn.h>
#include <linux/sched_clock.h>
#include <linux/slab.h>
#include "timer-of.h"
#define TIMER_SYNC_TICKS 3
/* cpux mcusys wrapper */
#define CPUX_CON_REG 0x0
#define CPUX_IDX_REG 0x4
/* cpux */
#define CPUX_IDX_GLOBAL_CTRL 0x0
#define CPUX_ENABLE BIT(0)
#define CPUX_CLK_DIV_MASK GENMASK(10, 8)
#define CPUX_CLK_DIV1 BIT(8)
#define CPUX_CLK_DIV2 BIT(9)
#define CPUX_CLK_DIV4 BIT(10)
#define CPUX_IDX_GLOBAL_IRQ 0x30
static u32 mtk_cpux_readl(u32 reg_idx, struct timer_of *to)
{
writel(reg_idx, timer_of_base(to) + CPUX_IDX_REG);
return readl(timer_of_base(to) + CPUX_CON_REG);
}
static void mtk_cpux_writel(u32 val, u32 reg_idx, struct timer_of *to)
{
writel(reg_idx, timer_of_base(to) + CPUX_IDX_REG);
writel(val, timer_of_base(to) + CPUX_CON_REG);
}
static void mtk_cpux_set_irq(struct timer_of *to, bool enable)
{
const unsigned long *irq_mask = cpumask_bits(cpu_possible_mask);
u32 val;
val = mtk_cpux_readl(CPUX_IDX_GLOBAL_IRQ, to);
if (enable)
val |= *irq_mask;
else
val &= ~(*irq_mask);
mtk_cpux_writel(val, CPUX_IDX_GLOBAL_IRQ, to);
}
static int mtk_cpux_clkevt_shutdown(struct clock_event_device *clkevt)
{
/* Clear any irq */
mtk_cpux_set_irq(to_timer_of(clkevt), false);
/*
* Disabling CPUXGPT timer will crash the platform, especially
* if Trusted Firmware is using it (usually, for sleep states),
* so we only mask the IRQ and call it a day.
*/
return 0;
}
static int mtk_cpux_clkevt_resume(struct clock_event_device *clkevt)
{
mtk_cpux_set_irq(to_timer_of(clkevt), true);
return 0;
}
static struct timer_of to = {
/*
* There are per-cpu interrupts for the CPUX General Purpose Timer
* but since this timer feeds the AArch64 System Timer we can rely
* on the CPU timer PPIs as well, so we don't declare TIMER_OF_IRQ.
*/
.flags = TIMER_OF_BASE | TIMER_OF_CLOCK,
.clkevt = {
.name = "mtk-cpuxgpt",
.cpumask = cpu_possible_mask,
.rating = 10,
.set_state_shutdown = mtk_cpux_clkevt_shutdown,
.tick_resume = mtk_cpux_clkevt_resume,
},
};
static int __init mtk_cpux_init(struct device_node *node)
{
u32 freq, val;
int ret;
/* If this fails, bad things are about to happen... */
ret = timer_of_init(node, &to);
if (ret) {
WARN(1, "Cannot start CPUX timers.\n");
return ret;
}
/*
* Check if we're given a clock with the right frequency for this
* timer, otherwise warn but keep going with the setup anyway, as
* that makes it possible to still boot the kernel, even though
* it may not work correctly (random lockups, etc).
* The reason behind this is that having an early UART may not be
* possible for everyone and this gives a chance to retrieve kmsg
* for eventual debugging even on consumer devices.
*/
freq = timer_of_rate(&to);
if (freq > 13000000)
WARN(1, "Requested unsupported timer frequency %u\n", freq);
/* Clock input is 26MHz, set DIV2 to achieve 13MHz clock */
val = mtk_cpux_readl(CPUX_IDX_GLOBAL_CTRL, &to);
val &= ~CPUX_CLK_DIV_MASK;
val |= CPUX_CLK_DIV2;
mtk_cpux_writel(val, CPUX_IDX_GLOBAL_CTRL, &to);
/* Enable all CPUXGPT timers */
val = mtk_cpux_readl(CPUX_IDX_GLOBAL_CTRL, &to);
mtk_cpux_writel(val | CPUX_ENABLE, CPUX_IDX_GLOBAL_CTRL, &to);
clockevents_config_and_register(&to.clkevt, timer_of_rate(&to),
TIMER_SYNC_TICKS, 0xffffffff);
return 0;
}
TIMER_OF_DECLARE(mtk_mt6795, "mediatek,mt6795-systimer", mtk_cpux_init);
|
linux-master
|
drivers/clocksource/timer-mediatek-cpux.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Clock event driver for the CS5535/CS5536
*
* Copyright (C) 2006, Advanced Micro Devices, Inc.
* Copyright (C) 2007 Andres Salomon <[email protected]>
* Copyright (C) 2009 Andres Salomon <[email protected]>
*
* The MFGPTs are documented in AMD Geode CS5536 Companion Device Data Book.
*/
#include <linux/kernel.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/cs5535.h>
#include <linux/clockchips.h>
#define DRV_NAME "cs5535-clockevt"
static int timer_irq;
module_param_hw_named(irq, timer_irq, int, irq, 0644);
MODULE_PARM_DESC(irq, "Which IRQ to use for the clock source MFGPT ticks.");
/*
* We are using the 32.768kHz input clock - it's the only one that has the
* ranges we find desirable. The following table lists the suitable
* divisors and the associated Hz, minimum interval and the maximum interval:
*
* Divisor Hz Min Delta (s) Max Delta (s)
* 1 32768 .00048828125 2.000
* 2 16384 .0009765625 4.000
* 4 8192 .001953125 8.000
* 8 4096 .00390625 16.000
* 16 2048 .0078125 32.000
* 32 1024 .015625 64.000
* 64 512 .03125 128.000
* 128 256 .0625 256.000
* 256 128 .125 512.000
*/
static struct cs5535_mfgpt_timer *cs5535_event_clock;
/* Selected from the table above */
#define MFGPT_DIVISOR 16
#define MFGPT_SCALE 4 /* divisor = 2^(scale) */
#define MFGPT_HZ (32768 / MFGPT_DIVISOR)
#define MFGPT_PERIODIC (MFGPT_HZ / HZ)
/*
* The MFGPT timers on the CS5536 provide us with suitable timers to use
* as clock event sources - not as good as a HPET or APIC, but certainly
* better than the PIT. This isn't a general purpose MFGPT driver, but
* a simplified one designed specifically to act as a clock event source.
* For full details about the MFGPT, please consult the CS5536 data sheet.
*/
static void disable_timer(struct cs5535_mfgpt_timer *timer)
{
/* avoid races by clearing CMP1 and CMP2 unconditionally */
cs5535_mfgpt_write(timer, MFGPT_REG_SETUP,
(uint16_t) ~MFGPT_SETUP_CNTEN | MFGPT_SETUP_CMP1 |
MFGPT_SETUP_CMP2);
}
static void start_timer(struct cs5535_mfgpt_timer *timer, uint16_t delta)
{
cs5535_mfgpt_write(timer, MFGPT_REG_CMP2, delta);
cs5535_mfgpt_write(timer, MFGPT_REG_COUNTER, 0);
cs5535_mfgpt_write(timer, MFGPT_REG_SETUP,
MFGPT_SETUP_CNTEN | MFGPT_SETUP_CMP2);
}
static int mfgpt_shutdown(struct clock_event_device *evt)
{
disable_timer(cs5535_event_clock);
return 0;
}
static int mfgpt_set_periodic(struct clock_event_device *evt)
{
disable_timer(cs5535_event_clock);
start_timer(cs5535_event_clock, MFGPT_PERIODIC);
return 0;
}
static int mfgpt_next_event(unsigned long delta, struct clock_event_device *evt)
{
start_timer(cs5535_event_clock, delta);
return 0;
}
static struct clock_event_device cs5535_clockevent = {
.name = DRV_NAME,
.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
.set_state_shutdown = mfgpt_shutdown,
.set_state_periodic = mfgpt_set_periodic,
.set_state_oneshot = mfgpt_shutdown,
.tick_resume = mfgpt_shutdown,
.set_next_event = mfgpt_next_event,
.rating = 250,
};
static irqreturn_t mfgpt_tick(int irq, void *dev_id)
{
uint16_t val = cs5535_mfgpt_read(cs5535_event_clock, MFGPT_REG_SETUP);
/* See if the interrupt was for us */
if (!(val & (MFGPT_SETUP_SETUP | MFGPT_SETUP_CMP2 | MFGPT_SETUP_CMP1)))
return IRQ_NONE;
/* Turn off the clock (and clear the event) */
disable_timer(cs5535_event_clock);
if (clockevent_state_detached(&cs5535_clockevent) ||
clockevent_state_shutdown(&cs5535_clockevent))
return IRQ_HANDLED;
/* Clear the counter */
cs5535_mfgpt_write(cs5535_event_clock, MFGPT_REG_COUNTER, 0);
/* Restart the clock in periodic mode */
if (clockevent_state_periodic(&cs5535_clockevent))
cs5535_mfgpt_write(cs5535_event_clock, MFGPT_REG_SETUP,
MFGPT_SETUP_CNTEN | MFGPT_SETUP_CMP2);
cs5535_clockevent.event_handler(&cs5535_clockevent);
return IRQ_HANDLED;
}
static int __init cs5535_mfgpt_init(void)
{
unsigned long flags = IRQF_NOBALANCING | IRQF_TIMER | IRQF_SHARED;
struct cs5535_mfgpt_timer *timer;
int ret;
uint16_t val;
timer = cs5535_mfgpt_alloc_timer(MFGPT_TIMER_ANY, MFGPT_DOMAIN_WORKING);
if (!timer) {
printk(KERN_ERR DRV_NAME ": Could not allocate MFGPT timer\n");
return -ENODEV;
}
cs5535_event_clock = timer;
/* Set up the IRQ on the MFGPT side */
if (cs5535_mfgpt_setup_irq(timer, MFGPT_CMP2, &timer_irq)) {
printk(KERN_ERR DRV_NAME ": Could not set up IRQ %d\n",
timer_irq);
goto err_timer;
}
/* And register it with the kernel */
ret = request_irq(timer_irq, mfgpt_tick, flags, DRV_NAME, timer);
if (ret) {
printk(KERN_ERR DRV_NAME ": Unable to set up the interrupt.\n");
goto err_irq;
}
/* Set the clock scale and enable the event mode for CMP2 */
val = MFGPT_SCALE | (3 << 8);
cs5535_mfgpt_write(cs5535_event_clock, MFGPT_REG_SETUP, val);
/* Set up the clock event */
printk(KERN_INFO DRV_NAME
": Registering MFGPT timer as a clock event, using IRQ %d\n",
timer_irq);
clockevents_config_and_register(&cs5535_clockevent, MFGPT_HZ,
0xF, 0xFFFE);
return 0;
err_irq:
cs5535_mfgpt_release_irq(cs5535_event_clock, MFGPT_CMP2, &timer_irq);
err_timer:
cs5535_mfgpt_free_timer(cs5535_event_clock);
printk(KERN_ERR DRV_NAME ": Unable to set up the MFGPT clock source\n");
return -EIO;
}
module_init(cs5535_mfgpt_init);
MODULE_AUTHOR("Andres Salomon <[email protected]>");
MODULE_DESCRIPTION("CS5535/CS5536 MFGPT clock event driver");
MODULE_LICENSE("GPL");
|
linux-master
|
drivers/clocksource/timer-cs5535.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2017 Spreadtrum Communications Inc.
*/
#include <linux/init.h>
#include <linux/interrupt.h>
#include "timer-of.h"
#define TIMER_NAME "sprd_timer"
#define TIMER_LOAD_LO 0x0
#define TIMER_LOAD_HI 0x4
#define TIMER_VALUE_LO 0x8
#define TIMER_VALUE_HI 0xc
#define TIMER_CTL 0x10
#define TIMER_CTL_PERIOD_MODE BIT(0)
#define TIMER_CTL_ENABLE BIT(1)
#define TIMER_CTL_64BIT_WIDTH BIT(16)
#define TIMER_INT 0x14
#define TIMER_INT_EN BIT(0)
#define TIMER_INT_RAW_STS BIT(1)
#define TIMER_INT_MASK_STS BIT(2)
#define TIMER_INT_CLR BIT(3)
#define TIMER_VALUE_SHDW_LO 0x18
#define TIMER_VALUE_SHDW_HI 0x1c
#define TIMER_VALUE_LO_MASK GENMASK(31, 0)
static void sprd_timer_enable(void __iomem *base, u32 flag)
{
u32 val = readl_relaxed(base + TIMER_CTL);
val |= TIMER_CTL_ENABLE;
if (flag & TIMER_CTL_64BIT_WIDTH)
val |= TIMER_CTL_64BIT_WIDTH;
else
val &= ~TIMER_CTL_64BIT_WIDTH;
if (flag & TIMER_CTL_PERIOD_MODE)
val |= TIMER_CTL_PERIOD_MODE;
else
val &= ~TIMER_CTL_PERIOD_MODE;
writel_relaxed(val, base + TIMER_CTL);
}
static void sprd_timer_disable(void __iomem *base)
{
u32 val = readl_relaxed(base + TIMER_CTL);
val &= ~TIMER_CTL_ENABLE;
writel_relaxed(val, base + TIMER_CTL);
}
static void sprd_timer_update_counter(void __iomem *base, unsigned long cycles)
{
writel_relaxed(cycles & TIMER_VALUE_LO_MASK, base + TIMER_LOAD_LO);
writel_relaxed(0, base + TIMER_LOAD_HI);
}
static void sprd_timer_enable_interrupt(void __iomem *base)
{
writel_relaxed(TIMER_INT_EN, base + TIMER_INT);
}
static void sprd_timer_clear_interrupt(void __iomem *base)
{
u32 val = readl_relaxed(base + TIMER_INT);
val |= TIMER_INT_CLR;
writel_relaxed(val, base + TIMER_INT);
}
static int sprd_timer_set_next_event(unsigned long cycles,
struct clock_event_device *ce)
{
struct timer_of *to = to_timer_of(ce);
sprd_timer_disable(timer_of_base(to));
sprd_timer_update_counter(timer_of_base(to), cycles);
sprd_timer_enable(timer_of_base(to), 0);
return 0;
}
static int sprd_timer_set_periodic(struct clock_event_device *ce)
{
struct timer_of *to = to_timer_of(ce);
sprd_timer_disable(timer_of_base(to));
sprd_timer_update_counter(timer_of_base(to), timer_of_period(to));
sprd_timer_enable(timer_of_base(to), TIMER_CTL_PERIOD_MODE);
return 0;
}
static int sprd_timer_shutdown(struct clock_event_device *ce)
{
struct timer_of *to = to_timer_of(ce);
sprd_timer_disable(timer_of_base(to));
return 0;
}
static irqreturn_t sprd_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *ce = (struct clock_event_device *)dev_id;
struct timer_of *to = to_timer_of(ce);
sprd_timer_clear_interrupt(timer_of_base(to));
if (clockevent_state_oneshot(ce))
sprd_timer_disable(timer_of_base(to));
ce->event_handler(ce);
return IRQ_HANDLED;
}
static struct timer_of to = {
.flags = TIMER_OF_IRQ | TIMER_OF_BASE | TIMER_OF_CLOCK,
.clkevt = {
.name = TIMER_NAME,
.rating = 300,
.features = CLOCK_EVT_FEAT_DYNIRQ | CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_ONESHOT,
.set_state_shutdown = sprd_timer_shutdown,
.set_state_periodic = sprd_timer_set_periodic,
.set_next_event = sprd_timer_set_next_event,
.cpumask = cpu_possible_mask,
},
.of_irq = {
.handler = sprd_timer_interrupt,
.flags = IRQF_TIMER | IRQF_IRQPOLL,
},
};
static int __init sprd_timer_init(struct device_node *np)
{
int ret;
ret = timer_of_init(np, &to);
if (ret)
return ret;
sprd_timer_enable_interrupt(timer_of_base(&to));
clockevents_config_and_register(&to.clkevt, timer_of_rate(&to),
1, UINT_MAX);
return 0;
}
static struct timer_of suspend_to = {
.flags = TIMER_OF_BASE | TIMER_OF_CLOCK,
};
static u64 sprd_suspend_timer_read(struct clocksource *cs)
{
return ~(u64)readl_relaxed(timer_of_base(&suspend_to) +
TIMER_VALUE_SHDW_LO) & cs->mask;
}
static int sprd_suspend_timer_enable(struct clocksource *cs)
{
sprd_timer_update_counter(timer_of_base(&suspend_to),
TIMER_VALUE_LO_MASK);
sprd_timer_enable(timer_of_base(&suspend_to), TIMER_CTL_PERIOD_MODE);
return 0;
}
static void sprd_suspend_timer_disable(struct clocksource *cs)
{
sprd_timer_disable(timer_of_base(&suspend_to));
}
static struct clocksource suspend_clocksource = {
.name = "sprd_suspend_timer",
.rating = 200,
.read = sprd_suspend_timer_read,
.enable = sprd_suspend_timer_enable,
.disable = sprd_suspend_timer_disable,
.mask = CLOCKSOURCE_MASK(32),
.flags = CLOCK_SOURCE_IS_CONTINUOUS | CLOCK_SOURCE_SUSPEND_NONSTOP,
};
static int __init sprd_suspend_timer_init(struct device_node *np)
{
int ret;
ret = timer_of_init(np, &suspend_to);
if (ret)
return ret;
clocksource_register_hz(&suspend_clocksource,
timer_of_rate(&suspend_to));
return 0;
}
TIMER_OF_DECLARE(sc9860_timer, "sprd,sc9860-timer", sprd_timer_init);
TIMER_OF_DECLARE(sc9860_persistent_timer, "sprd,sc9860-suspend-timer",
sprd_suspend_timer_init);
|
linux-master
|
drivers/clocksource/timer-sprd.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* Clocksource driver for the synthetic counter and timers
* provided by the Hyper-V hypervisor to guest VMs, as described
* in the Hyper-V Top Level Functional Spec (TLFS). This driver
* is instruction set architecture independent.
*
* Copyright (C) 2019, Microsoft, Inc.
*
* Author: Michael Kelley <[email protected]>
*/
#include <linux/percpu.h>
#include <linux/cpumask.h>
#include <linux/clockchips.h>
#include <linux/clocksource.h>
#include <linux/sched_clock.h>
#include <linux/mm.h>
#include <linux/cpuhotplug.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/acpi.h>
#include <linux/hyperv.h>
#include <clocksource/hyperv_timer.h>
#include <asm/hyperv-tlfs.h>
#include <asm/mshyperv.h>
static struct clock_event_device __percpu *hv_clock_event;
static u64 hv_sched_clock_offset __ro_after_init;
/*
* If false, we're using the old mechanism for stimer0 interrupts
* where it sends a VMbus message when it expires. The old
* mechanism is used when running on older versions of Hyper-V
* that don't support Direct Mode. While Hyper-V provides
* four stimer's per CPU, Linux uses only stimer0.
*
* Because Direct Mode does not require processing a VMbus
* message, stimer interrupts can be enabled earlier in the
* process of booting a CPU, and consistent with when timer
* interrupts are enabled for other clocksource drivers.
* However, for legacy versions of Hyper-V when Direct Mode
* is not enabled, setting up stimer interrupts must be
* delayed until VMbus is initialized and can process the
* interrupt message.
*/
static bool direct_mode_enabled;
static int stimer0_irq = -1;
static int stimer0_message_sint;
static __maybe_unused DEFINE_PER_CPU(long, stimer0_evt);
/*
* Common code for stimer0 interrupts coming via Direct Mode or
* as a VMbus message.
*/
void hv_stimer0_isr(void)
{
struct clock_event_device *ce;
ce = this_cpu_ptr(hv_clock_event);
ce->event_handler(ce);
}
EXPORT_SYMBOL_GPL(hv_stimer0_isr);
/*
* stimer0 interrupt handler for architectures that support
* per-cpu interrupts, which also implies Direct Mode.
*/
static irqreturn_t __maybe_unused hv_stimer0_percpu_isr(int irq, void *dev_id)
{
hv_stimer0_isr();
return IRQ_HANDLED;
}
static int hv_ce_set_next_event(unsigned long delta,
struct clock_event_device *evt)
{
u64 current_tick;
current_tick = hv_read_reference_counter();
current_tick += delta;
hv_set_register(HV_REGISTER_STIMER0_COUNT, current_tick);
return 0;
}
static int hv_ce_shutdown(struct clock_event_device *evt)
{
hv_set_register(HV_REGISTER_STIMER0_COUNT, 0);
hv_set_register(HV_REGISTER_STIMER0_CONFIG, 0);
if (direct_mode_enabled && stimer0_irq >= 0)
disable_percpu_irq(stimer0_irq);
return 0;
}
static int hv_ce_set_oneshot(struct clock_event_device *evt)
{
union hv_stimer_config timer_cfg;
timer_cfg.as_uint64 = 0;
timer_cfg.enable = 1;
timer_cfg.auto_enable = 1;
if (direct_mode_enabled) {
/*
* When it expires, the timer will directly interrupt
* on the specified hardware vector/IRQ.
*/
timer_cfg.direct_mode = 1;
timer_cfg.apic_vector = HYPERV_STIMER0_VECTOR;
if (stimer0_irq >= 0)
enable_percpu_irq(stimer0_irq, IRQ_TYPE_NONE);
} else {
/*
* When it expires, the timer will generate a VMbus message,
* to be handled by the normal VMbus interrupt handler.
*/
timer_cfg.direct_mode = 0;
timer_cfg.sintx = stimer0_message_sint;
}
hv_set_register(HV_REGISTER_STIMER0_CONFIG, timer_cfg.as_uint64);
return 0;
}
/*
* hv_stimer_init - Per-cpu initialization of the clockevent
*/
static int hv_stimer_init(unsigned int cpu)
{
struct clock_event_device *ce;
if (!hv_clock_event)
return 0;
ce = per_cpu_ptr(hv_clock_event, cpu);
ce->name = "Hyper-V clockevent";
ce->features = CLOCK_EVT_FEAT_ONESHOT;
ce->cpumask = cpumask_of(cpu);
ce->rating = 1000;
ce->set_state_shutdown = hv_ce_shutdown;
ce->set_state_oneshot = hv_ce_set_oneshot;
ce->set_next_event = hv_ce_set_next_event;
clockevents_config_and_register(ce,
HV_CLOCK_HZ,
HV_MIN_DELTA_TICKS,
HV_MAX_MAX_DELTA_TICKS);
return 0;
}
/*
* hv_stimer_cleanup - Per-cpu cleanup of the clockevent
*/
int hv_stimer_cleanup(unsigned int cpu)
{
struct clock_event_device *ce;
if (!hv_clock_event)
return 0;
/*
* In the legacy case where Direct Mode is not enabled
* (which can only be on x86/64), stimer cleanup happens
* relatively early in the CPU offlining process. We
* must unbind the stimer-based clockevent device so
* that the LAPIC timer can take over until clockevents
* are no longer needed in the offlining process. Note
* that clockevents_unbind_device() eventually calls
* hv_ce_shutdown().
*
* The unbind should not be done when Direct Mode is
* enabled because we may be on an architecture where
* there are no other clockevent devices to fallback to.
*/
ce = per_cpu_ptr(hv_clock_event, cpu);
if (direct_mode_enabled)
hv_ce_shutdown(ce);
else
clockevents_unbind_device(ce, cpu);
return 0;
}
EXPORT_SYMBOL_GPL(hv_stimer_cleanup);
/*
* These placeholders are overridden by arch specific code on
* architectures that need special setup of the stimer0 IRQ because
* they don't support per-cpu IRQs (such as x86/x64).
*/
void __weak hv_setup_stimer0_handler(void (*handler)(void))
{
};
void __weak hv_remove_stimer0_handler(void)
{
};
#ifdef CONFIG_ACPI
/* Called only on architectures with per-cpu IRQs (i.e., not x86/x64) */
static int hv_setup_stimer0_irq(void)
{
int ret;
ret = acpi_register_gsi(NULL, HYPERV_STIMER0_VECTOR,
ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_HIGH);
if (ret < 0) {
pr_err("Can't register Hyper-V stimer0 GSI. Error %d", ret);
return ret;
}
stimer0_irq = ret;
ret = request_percpu_irq(stimer0_irq, hv_stimer0_percpu_isr,
"Hyper-V stimer0", &stimer0_evt);
if (ret) {
pr_err("Can't request Hyper-V stimer0 IRQ %d. Error %d",
stimer0_irq, ret);
acpi_unregister_gsi(stimer0_irq);
stimer0_irq = -1;
}
return ret;
}
static void hv_remove_stimer0_irq(void)
{
if (stimer0_irq == -1) {
hv_remove_stimer0_handler();
} else {
free_percpu_irq(stimer0_irq, &stimer0_evt);
acpi_unregister_gsi(stimer0_irq);
stimer0_irq = -1;
}
}
#else
static int hv_setup_stimer0_irq(void)
{
return 0;
}
static void hv_remove_stimer0_irq(void)
{
}
#endif
/* hv_stimer_alloc - Global initialization of the clockevent and stimer0 */
int hv_stimer_alloc(bool have_percpu_irqs)
{
int ret;
/*
* Synthetic timers are always available except on old versions of
* Hyper-V on x86. In that case, return as error as Linux will use a
* clockevent based on emulated LAPIC timer hardware.
*/
if (!(ms_hyperv.features & HV_MSR_SYNTIMER_AVAILABLE))
return -EINVAL;
hv_clock_event = alloc_percpu(struct clock_event_device);
if (!hv_clock_event)
return -ENOMEM;
direct_mode_enabled = ms_hyperv.misc_features &
HV_STIMER_DIRECT_MODE_AVAILABLE;
/*
* If Direct Mode isn't enabled, the remainder of the initialization
* is done later by hv_stimer_legacy_init()
*/
if (!direct_mode_enabled)
return 0;
if (have_percpu_irqs) {
ret = hv_setup_stimer0_irq();
if (ret)
goto free_clock_event;
} else {
hv_setup_stimer0_handler(hv_stimer0_isr);
}
/*
* Since we are in Direct Mode, stimer initialization
* can be done now with a CPUHP value in the same range
* as other clockevent devices.
*/
ret = cpuhp_setup_state(CPUHP_AP_HYPERV_TIMER_STARTING,
"clockevents/hyperv/stimer:starting",
hv_stimer_init, hv_stimer_cleanup);
if (ret < 0) {
hv_remove_stimer0_irq();
goto free_clock_event;
}
return ret;
free_clock_event:
free_percpu(hv_clock_event);
hv_clock_event = NULL;
return ret;
}
EXPORT_SYMBOL_GPL(hv_stimer_alloc);
/*
* hv_stimer_legacy_init -- Called from the VMbus driver to handle
* the case when Direct Mode is not enabled, and the stimer
* must be initialized late in the CPU onlining process.
*
*/
void hv_stimer_legacy_init(unsigned int cpu, int sint)
{
if (direct_mode_enabled)
return;
/*
* This function gets called by each vCPU, so setting the
* global stimer_message_sint value each time is conceptually
* not ideal, but the value passed in is always the same and
* it avoids introducing yet another interface into this
* clocksource driver just to set the sint in the legacy case.
*/
stimer0_message_sint = sint;
(void)hv_stimer_init(cpu);
}
EXPORT_SYMBOL_GPL(hv_stimer_legacy_init);
/*
* hv_stimer_legacy_cleanup -- Called from the VMbus driver to
* handle the case when Direct Mode is not enabled, and the
* stimer must be cleaned up early in the CPU offlining
* process.
*/
void hv_stimer_legacy_cleanup(unsigned int cpu)
{
if (direct_mode_enabled)
return;
(void)hv_stimer_cleanup(cpu);
}
EXPORT_SYMBOL_GPL(hv_stimer_legacy_cleanup);
/*
* Do a global cleanup of clockevents for the cases of kexec and
* vmbus exit
*/
void hv_stimer_global_cleanup(void)
{
int cpu;
/*
* hv_stime_legacy_cleanup() will stop the stimer if Direct
* Mode is not enabled, and fallback to the LAPIC timer.
*/
for_each_present_cpu(cpu) {
hv_stimer_legacy_cleanup(cpu);
}
if (!hv_clock_event)
return;
if (direct_mode_enabled) {
cpuhp_remove_state(CPUHP_AP_HYPERV_TIMER_STARTING);
hv_remove_stimer0_irq();
stimer0_irq = -1;
}
free_percpu(hv_clock_event);
hv_clock_event = NULL;
}
EXPORT_SYMBOL_GPL(hv_stimer_global_cleanup);
static __always_inline u64 read_hv_clock_msr(void)
{
/*
* Read the partition counter to get the current tick count. This count
* is set to 0 when the partition is created and is incremented in 100
* nanosecond units.
*
* Use hv_raw_get_register() because this function is used from
* noinstr. Notable; while HV_REGISTER_TIME_REF_COUNT is a synthetic
* register it doesn't need the GHCB path.
*/
return hv_raw_get_register(HV_REGISTER_TIME_REF_COUNT);
}
/*
* Code and definitions for the Hyper-V clocksources. Two
* clocksources are defined: one that reads the Hyper-V defined MSR, and
* the other that uses the TSC reference page feature as defined in the
* TLFS. The MSR version is for compatibility with old versions of
* Hyper-V and 32-bit x86. The TSC reference page version is preferred.
*/
static union {
struct ms_hyperv_tsc_page page;
u8 reserved[PAGE_SIZE];
} tsc_pg __bss_decrypted __aligned(PAGE_SIZE);
static struct ms_hyperv_tsc_page *tsc_page = &tsc_pg.page;
static unsigned long tsc_pfn;
unsigned long hv_get_tsc_pfn(void)
{
return tsc_pfn;
}
EXPORT_SYMBOL_GPL(hv_get_tsc_pfn);
struct ms_hyperv_tsc_page *hv_get_tsc_page(void)
{
return tsc_page;
}
EXPORT_SYMBOL_GPL(hv_get_tsc_page);
static __always_inline u64 read_hv_clock_tsc(void)
{
u64 cur_tsc, time;
/*
* The Hyper-V Top-Level Function Spec (TLFS), section Timers,
* subsection Refererence Counter, guarantees that the TSC and MSR
* times are in sync and monotonic. Therefore we can fall back
* to the MSR in case the TSC page indicates unavailability.
*/
if (!hv_read_tsc_page_tsc(tsc_page, &cur_tsc, &time))
time = read_hv_clock_msr();
return time;
}
static u64 notrace read_hv_clock_tsc_cs(struct clocksource *arg)
{
return read_hv_clock_tsc();
}
static u64 noinstr read_hv_sched_clock_tsc(void)
{
return (read_hv_clock_tsc() - hv_sched_clock_offset) *
(NSEC_PER_SEC / HV_CLOCK_HZ);
}
static void suspend_hv_clock_tsc(struct clocksource *arg)
{
union hv_reference_tsc_msr tsc_msr;
/* Disable the TSC page */
tsc_msr.as_uint64 = hv_get_register(HV_REGISTER_REFERENCE_TSC);
tsc_msr.enable = 0;
hv_set_register(HV_REGISTER_REFERENCE_TSC, tsc_msr.as_uint64);
}
static void resume_hv_clock_tsc(struct clocksource *arg)
{
union hv_reference_tsc_msr tsc_msr;
/* Re-enable the TSC page */
tsc_msr.as_uint64 = hv_get_register(HV_REGISTER_REFERENCE_TSC);
tsc_msr.enable = 1;
tsc_msr.pfn = tsc_pfn;
hv_set_register(HV_REGISTER_REFERENCE_TSC, tsc_msr.as_uint64);
}
#ifdef HAVE_VDSO_CLOCKMODE_HVCLOCK
static int hv_cs_enable(struct clocksource *cs)
{
vclocks_set_used(VDSO_CLOCKMODE_HVCLOCK);
return 0;
}
#endif
static struct clocksource hyperv_cs_tsc = {
.name = "hyperv_clocksource_tsc_page",
.rating = 500,
.read = read_hv_clock_tsc_cs,
.mask = CLOCKSOURCE_MASK(64),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
.suspend= suspend_hv_clock_tsc,
.resume = resume_hv_clock_tsc,
#ifdef HAVE_VDSO_CLOCKMODE_HVCLOCK
.enable = hv_cs_enable,
.vdso_clock_mode = VDSO_CLOCKMODE_HVCLOCK,
#else
.vdso_clock_mode = VDSO_CLOCKMODE_NONE,
#endif
};
static u64 notrace read_hv_clock_msr_cs(struct clocksource *arg)
{
return read_hv_clock_msr();
}
static struct clocksource hyperv_cs_msr = {
.name = "hyperv_clocksource_msr",
.rating = 495,
.read = read_hv_clock_msr_cs,
.mask = CLOCKSOURCE_MASK(64),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
/*
* Reference to pv_ops must be inline so objtool
* detection of noinstr violations can work correctly.
*/
#ifdef CONFIG_GENERIC_SCHED_CLOCK
static __always_inline void hv_setup_sched_clock(void *sched_clock)
{
/*
* We're on an architecture with generic sched clock (not x86/x64).
* The Hyper-V sched clock read function returns nanoseconds, not
* the normal 100ns units of the Hyper-V synthetic clock.
*/
sched_clock_register(sched_clock, 64, NSEC_PER_SEC);
}
#elif defined CONFIG_PARAVIRT
static __always_inline void hv_setup_sched_clock(void *sched_clock)
{
/* We're on x86/x64 *and* using PV ops */
paravirt_set_sched_clock(sched_clock);
}
#else /* !CONFIG_GENERIC_SCHED_CLOCK && !CONFIG_PARAVIRT */
static __always_inline void hv_setup_sched_clock(void *sched_clock) {}
#endif /* CONFIG_GENERIC_SCHED_CLOCK */
static void __init hv_init_tsc_clocksource(void)
{
union hv_reference_tsc_msr tsc_msr;
/*
* If Hyper-V offers TSC_INVARIANT, then the virtualized TSC correctly
* handles frequency and offset changes due to live migration,
* pause/resume, and other VM management operations. So lower the
* Hyper-V Reference TSC rating, causing the generic TSC to be used.
* TSC_INVARIANT is not offered on ARM64, so the Hyper-V Reference
* TSC will be preferred over the virtualized ARM64 arch counter.
*/
if (ms_hyperv.features & HV_ACCESS_TSC_INVARIANT) {
hyperv_cs_tsc.rating = 250;
hyperv_cs_msr.rating = 245;
}
if (!(ms_hyperv.features & HV_MSR_REFERENCE_TSC_AVAILABLE))
return;
hv_read_reference_counter = read_hv_clock_tsc;
/*
* TSC page mapping works differently in root compared to guest.
* - In guest partition the guest PFN has to be passed to the
* hypervisor.
* - In root partition it's other way around: it has to map the PFN
* provided by the hypervisor.
* But it can't be mapped right here as it's too early and MMU isn't
* ready yet. So, we only set the enable bit here and will remap the
* page later in hv_remap_tsc_clocksource().
*
* It worth mentioning, that TSC clocksource read function
* (read_hv_clock_tsc) has a MSR-based fallback mechanism, used when
* TSC page is zeroed (which is the case until the PFN is remapped) and
* thus TSC clocksource will work even without the real TSC page
* mapped.
*/
tsc_msr.as_uint64 = hv_get_register(HV_REGISTER_REFERENCE_TSC);
if (hv_root_partition)
tsc_pfn = tsc_msr.pfn;
else
tsc_pfn = HVPFN_DOWN(virt_to_phys(tsc_page));
tsc_msr.enable = 1;
tsc_msr.pfn = tsc_pfn;
hv_set_register(HV_REGISTER_REFERENCE_TSC, tsc_msr.as_uint64);
clocksource_register_hz(&hyperv_cs_tsc, NSEC_PER_SEC/100);
/*
* If TSC is invariant, then let it stay as the sched clock since it
* will be faster than reading the TSC page. But if not invariant, use
* the TSC page so that live migrations across hosts with different
* frequencies is handled correctly.
*/
if (!(ms_hyperv.features & HV_ACCESS_TSC_INVARIANT)) {
hv_sched_clock_offset = hv_read_reference_counter();
hv_setup_sched_clock(read_hv_sched_clock_tsc);
}
}
void __init hv_init_clocksource(void)
{
/*
* Try to set up the TSC page clocksource, then the MSR clocksource.
* At least one of these will always be available except on very old
* versions of Hyper-V on x86. In that case we won't have a Hyper-V
* clocksource, but Linux will still run with a clocksource based
* on the emulated PIT or LAPIC timer.
*
* Never use the MSR clocksource as sched clock. It's too slow.
* Better to use the native sched clock as the fallback.
*/
hv_init_tsc_clocksource();
if (ms_hyperv.features & HV_MSR_TIME_REF_COUNT_AVAILABLE)
clocksource_register_hz(&hyperv_cs_msr, NSEC_PER_SEC/100);
}
void __init hv_remap_tsc_clocksource(void)
{
if (!(ms_hyperv.features & HV_MSR_REFERENCE_TSC_AVAILABLE))
return;
if (!hv_root_partition) {
WARN(1, "%s: attempt to remap TSC page in guest partition\n",
__func__);
return;
}
tsc_page = memremap(tsc_pfn << HV_HYP_PAGE_SHIFT, sizeof(tsc_pg),
MEMREMAP_WB);
if (!tsc_page)
pr_err("Failed to remap Hyper-V TSC page.\n");
}
|
linux-master
|
drivers/clocksource/hyperv_timer.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* drivers/clocksource/arm_global_timer.c
*
* Copyright (C) 2013 STMicroelectronics (R&D) Limited.
* Author: Stuart Menefy <[email protected]>
* Author: Srinivas Kandagatla <[email protected]>
*/
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <linux/cpu.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <linux/sched_clock.h>
#include <asm/cputype.h>
#define GT_COUNTER0 0x00
#define GT_COUNTER1 0x04
#define GT_CONTROL 0x08
#define GT_CONTROL_TIMER_ENABLE BIT(0) /* this bit is NOT banked */
#define GT_CONTROL_COMP_ENABLE BIT(1) /* banked */
#define GT_CONTROL_IRQ_ENABLE BIT(2) /* banked */
#define GT_CONTROL_AUTO_INC BIT(3) /* banked */
#define GT_CONTROL_PRESCALER_SHIFT 8
#define GT_CONTROL_PRESCALER_MAX 0xF
#define GT_CONTROL_PRESCALER_MASK (GT_CONTROL_PRESCALER_MAX << \
GT_CONTROL_PRESCALER_SHIFT)
#define GT_INT_STATUS 0x0c
#define GT_INT_STATUS_EVENT_FLAG BIT(0)
#define GT_COMP0 0x10
#define GT_COMP1 0x14
#define GT_AUTO_INC 0x18
#define MAX_F_ERR 50
/*
* We are expecting to be clocked by the ARM peripheral clock.
*
* Note: it is assumed we are using a prescaler value of zero, so this is
* the units for all operations.
*/
static void __iomem *gt_base;
static struct notifier_block gt_clk_rate_change_nb;
static u32 gt_psv_new, gt_psv_bck, gt_target_rate;
static int gt_ppi;
static struct clock_event_device __percpu *gt_evt;
/*
* To get the value from the Global Timer Counter register proceed as follows:
* 1. Read the upper 32-bit timer counter register
* 2. Read the lower 32-bit timer counter register
* 3. Read the upper 32-bit timer counter register again. If the value is
* different to the 32-bit upper value read previously, go back to step 2.
* Otherwise the 64-bit timer counter value is correct.
*/
static u64 notrace _gt_counter_read(void)
{
u64 counter;
u32 lower;
u32 upper, old_upper;
upper = readl_relaxed(gt_base + GT_COUNTER1);
do {
old_upper = upper;
lower = readl_relaxed(gt_base + GT_COUNTER0);
upper = readl_relaxed(gt_base + GT_COUNTER1);
} while (upper != old_upper);
counter = upper;
counter <<= 32;
counter |= lower;
return counter;
}
static u64 gt_counter_read(void)
{
return _gt_counter_read();
}
/**
* To ensure that updates to comparator value register do not set the
* Interrupt Status Register proceed as follows:
* 1. Clear the Comp Enable bit in the Timer Control Register.
* 2. Write the lower 32-bit Comparator Value Register.
* 3. Write the upper 32-bit Comparator Value Register.
* 4. Set the Comp Enable bit and, if necessary, the IRQ enable bit.
*/
static void gt_compare_set(unsigned long delta, int periodic)
{
u64 counter = gt_counter_read();
unsigned long ctrl;
counter += delta;
ctrl = readl(gt_base + GT_CONTROL);
ctrl &= ~(GT_CONTROL_COMP_ENABLE | GT_CONTROL_IRQ_ENABLE |
GT_CONTROL_AUTO_INC);
ctrl |= GT_CONTROL_TIMER_ENABLE;
writel_relaxed(ctrl, gt_base + GT_CONTROL);
writel_relaxed(lower_32_bits(counter), gt_base + GT_COMP0);
writel_relaxed(upper_32_bits(counter), gt_base + GT_COMP1);
if (periodic) {
writel_relaxed(delta, gt_base + GT_AUTO_INC);
ctrl |= GT_CONTROL_AUTO_INC;
}
ctrl |= GT_CONTROL_COMP_ENABLE | GT_CONTROL_IRQ_ENABLE;
writel_relaxed(ctrl, gt_base + GT_CONTROL);
}
static int gt_clockevent_shutdown(struct clock_event_device *evt)
{
unsigned long ctrl;
ctrl = readl(gt_base + GT_CONTROL);
ctrl &= ~(GT_CONTROL_COMP_ENABLE | GT_CONTROL_IRQ_ENABLE |
GT_CONTROL_AUTO_INC);
writel(ctrl, gt_base + GT_CONTROL);
return 0;
}
static int gt_clockevent_set_periodic(struct clock_event_device *evt)
{
gt_compare_set(DIV_ROUND_CLOSEST(gt_target_rate, HZ), 1);
return 0;
}
static int gt_clockevent_set_next_event(unsigned long evt,
struct clock_event_device *unused)
{
gt_compare_set(evt, 0);
return 0;
}
static irqreturn_t gt_clockevent_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evt = dev_id;
if (!(readl_relaxed(gt_base + GT_INT_STATUS) &
GT_INT_STATUS_EVENT_FLAG))
return IRQ_NONE;
/**
* ERRATA 740657( Global Timer can send 2 interrupts for
* the same event in single-shot mode)
* Workaround:
* Either disable single-shot mode.
* Or
* Modify the Interrupt Handler to avoid the
* offending sequence. This is achieved by clearing
* the Global Timer flag _after_ having incremented
* the Comparator register value to a higher value.
*/
if (clockevent_state_oneshot(evt))
gt_compare_set(ULONG_MAX, 0);
writel_relaxed(GT_INT_STATUS_EVENT_FLAG, gt_base + GT_INT_STATUS);
evt->event_handler(evt);
return IRQ_HANDLED;
}
static int gt_starting_cpu(unsigned int cpu)
{
struct clock_event_device *clk = this_cpu_ptr(gt_evt);
clk->name = "arm_global_timer";
clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT |
CLOCK_EVT_FEAT_PERCPU;
clk->set_state_shutdown = gt_clockevent_shutdown;
clk->set_state_periodic = gt_clockevent_set_periodic;
clk->set_state_oneshot = gt_clockevent_shutdown;
clk->set_state_oneshot_stopped = gt_clockevent_shutdown;
clk->set_next_event = gt_clockevent_set_next_event;
clk->cpumask = cpumask_of(cpu);
clk->rating = 300;
clk->irq = gt_ppi;
clockevents_config_and_register(clk, gt_target_rate,
1, 0xffffffff);
enable_percpu_irq(clk->irq, IRQ_TYPE_NONE);
return 0;
}
static int gt_dying_cpu(unsigned int cpu)
{
struct clock_event_device *clk = this_cpu_ptr(gt_evt);
gt_clockevent_shutdown(clk);
disable_percpu_irq(clk->irq);
return 0;
}
static u64 gt_clocksource_read(struct clocksource *cs)
{
return gt_counter_read();
}
static void gt_resume(struct clocksource *cs)
{
unsigned long ctrl;
ctrl = readl(gt_base + GT_CONTROL);
if (!(ctrl & GT_CONTROL_TIMER_ENABLE))
/* re-enable timer on resume */
writel(GT_CONTROL_TIMER_ENABLE, gt_base + GT_CONTROL);
}
static struct clocksource gt_clocksource = {
.name = "arm_global_timer",
.rating = 300,
.read = gt_clocksource_read,
.mask = CLOCKSOURCE_MASK(64),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
.resume = gt_resume,
};
#ifdef CONFIG_CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
static u64 notrace gt_sched_clock_read(void)
{
return _gt_counter_read();
}
#endif
static unsigned long gt_read_long(void)
{
return readl_relaxed(gt_base + GT_COUNTER0);
}
static struct delay_timer gt_delay_timer = {
.read_current_timer = gt_read_long,
};
static void gt_write_presc(u32 psv)
{
u32 reg;
reg = readl(gt_base + GT_CONTROL);
reg &= ~GT_CONTROL_PRESCALER_MASK;
reg |= psv << GT_CONTROL_PRESCALER_SHIFT;
writel(reg, gt_base + GT_CONTROL);
}
static u32 gt_read_presc(void)
{
u32 reg;
reg = readl(gt_base + GT_CONTROL);
reg &= GT_CONTROL_PRESCALER_MASK;
return reg >> GT_CONTROL_PRESCALER_SHIFT;
}
static void __init gt_delay_timer_init(void)
{
gt_delay_timer.freq = gt_target_rate;
register_current_timer_delay(>_delay_timer);
}
static int __init gt_clocksource_init(void)
{
writel(0, gt_base + GT_CONTROL);
writel(0, gt_base + GT_COUNTER0);
writel(0, gt_base + GT_COUNTER1);
/* set prescaler and enable timer on all the cores */
writel(((CONFIG_ARM_GT_INITIAL_PRESCALER_VAL - 1) <<
GT_CONTROL_PRESCALER_SHIFT)
| GT_CONTROL_TIMER_ENABLE, gt_base + GT_CONTROL);
#ifdef CONFIG_CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
sched_clock_register(gt_sched_clock_read, 64, gt_target_rate);
#endif
return clocksource_register_hz(>_clocksource, gt_target_rate);
}
static int gt_clk_rate_change_cb(struct notifier_block *nb,
unsigned long event, void *data)
{
struct clk_notifier_data *ndata = data;
switch (event) {
case PRE_RATE_CHANGE:
{
int psv;
psv = DIV_ROUND_CLOSEST(ndata->new_rate,
gt_target_rate);
if (abs(gt_target_rate - (ndata->new_rate / psv)) > MAX_F_ERR)
return NOTIFY_BAD;
psv--;
/* prescaler within legal range? */
if (psv < 0 || psv > GT_CONTROL_PRESCALER_MAX)
return NOTIFY_BAD;
/*
* store timer clock ctrl register so we can restore it in case
* of an abort.
*/
gt_psv_bck = gt_read_presc();
gt_psv_new = psv;
/* scale down: adjust divider in post-change notification */
if (ndata->new_rate < ndata->old_rate)
return NOTIFY_DONE;
/* scale up: adjust divider now - before frequency change */
gt_write_presc(psv);
break;
}
case POST_RATE_CHANGE:
/* scale up: pre-change notification did the adjustment */
if (ndata->new_rate > ndata->old_rate)
return NOTIFY_OK;
/* scale down: adjust divider now - after frequency change */
gt_write_presc(gt_psv_new);
break;
case ABORT_RATE_CHANGE:
/* we have to undo the adjustment in case we scale up */
if (ndata->new_rate < ndata->old_rate)
return NOTIFY_OK;
/* restore original register value */
gt_write_presc(gt_psv_bck);
break;
default:
return NOTIFY_DONE;
}
return NOTIFY_DONE;
}
static int __init global_timer_of_register(struct device_node *np)
{
struct clk *gt_clk;
static unsigned long gt_clk_rate;
int err = 0;
/*
* In A9 r2p0 the comparators for each processor with the global timer
* fire when the timer value is greater than or equal to. In previous
* revisions the comparators fired when the timer value was equal to.
*/
if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9
&& (read_cpuid_id() & 0xf0000f) < 0x200000) {
pr_warn("global-timer: non support for this cpu version.\n");
return -ENOSYS;
}
gt_ppi = irq_of_parse_and_map(np, 0);
if (!gt_ppi) {
pr_warn("global-timer: unable to parse irq\n");
return -EINVAL;
}
gt_base = of_iomap(np, 0);
if (!gt_base) {
pr_warn("global-timer: invalid base address\n");
return -ENXIO;
}
gt_clk = of_clk_get(np, 0);
if (!IS_ERR(gt_clk)) {
err = clk_prepare_enable(gt_clk);
if (err)
goto out_unmap;
} else {
pr_warn("global-timer: clk not found\n");
err = -EINVAL;
goto out_unmap;
}
gt_clk_rate = clk_get_rate(gt_clk);
gt_target_rate = gt_clk_rate / CONFIG_ARM_GT_INITIAL_PRESCALER_VAL;
gt_clk_rate_change_nb.notifier_call =
gt_clk_rate_change_cb;
err = clk_notifier_register(gt_clk, >_clk_rate_change_nb);
if (err) {
pr_warn("Unable to register clock notifier\n");
goto out_clk;
}
gt_evt = alloc_percpu(struct clock_event_device);
if (!gt_evt) {
pr_warn("global-timer: can't allocate memory\n");
err = -ENOMEM;
goto out_clk_nb;
}
err = request_percpu_irq(gt_ppi, gt_clockevent_interrupt,
"gt", gt_evt);
if (err) {
pr_warn("global-timer: can't register interrupt %d (%d)\n",
gt_ppi, err);
goto out_free;
}
/* Register and immediately configure the timer on the boot CPU */
err = gt_clocksource_init();
if (err)
goto out_irq;
err = cpuhp_setup_state(CPUHP_AP_ARM_GLOBAL_TIMER_STARTING,
"clockevents/arm/global_timer:starting",
gt_starting_cpu, gt_dying_cpu);
if (err)
goto out_irq;
gt_delay_timer_init();
return 0;
out_irq:
free_percpu_irq(gt_ppi, gt_evt);
out_free:
free_percpu(gt_evt);
out_clk_nb:
clk_notifier_unregister(gt_clk, >_clk_rate_change_nb);
out_clk:
clk_disable_unprepare(gt_clk);
out_unmap:
iounmap(gt_base);
WARN(err, "ARM Global timer register failed (%d)\n", err);
return err;
}
/* Only tested on r2p2 and r3p0 */
TIMER_OF_DECLARE(arm_gt, "arm,cortex-a9-global-timer",
global_timer_of_register);
|
linux-master
|
drivers/clocksource/arm_global_timer.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* 64-bit Periodic Interval Timer driver
*
* Copyright (C) 2019 Microchip Technology Inc. and its subsidiaries
*
* Author: Claudiu Beznea <[email protected]>
*/
#include <linux/clk.h>
#include <linux/clockchips.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/sched_clock.h>
#include <linux/slab.h>
#define MCHP_PIT64B_CR 0x00 /* Control Register */
#define MCHP_PIT64B_CR_START BIT(0)
#define MCHP_PIT64B_CR_SWRST BIT(8)
#define MCHP_PIT64B_MR 0x04 /* Mode Register */
#define MCHP_PIT64B_MR_CONT BIT(0)
#define MCHP_PIT64B_MR_ONE_SHOT (0)
#define MCHP_PIT64B_MR_SGCLK BIT(3)
#define MCHP_PIT64B_MR_PRES GENMASK(11, 8)
#define MCHP_PIT64B_LSB_PR 0x08 /* LSB Period Register */
#define MCHP_PIT64B_MSB_PR 0x0C /* MSB Period Register */
#define MCHP_PIT64B_IER 0x10 /* Interrupt Enable Register */
#define MCHP_PIT64B_IER_PERIOD BIT(0)
#define MCHP_PIT64B_ISR 0x1C /* Interrupt Status Register */
#define MCHP_PIT64B_TLSBR 0x20 /* Timer LSB Register */
#define MCHP_PIT64B_TMSBR 0x24 /* Timer MSB Register */
#define MCHP_PIT64B_PRES_MAX 0x10
#define MCHP_PIT64B_LSBMASK GENMASK_ULL(31, 0)
#define MCHP_PIT64B_PRES_TO_MODE(p) (MCHP_PIT64B_MR_PRES & ((p) << 8))
#define MCHP_PIT64B_MODE_TO_PRES(m) ((MCHP_PIT64B_MR_PRES & (m)) >> 8)
#define MCHP_PIT64B_DEF_FREQ 5000000UL /* 5 MHz */
#define MCHP_PIT64B_NAME "pit64b"
/**
* struct mchp_pit64b_timer - PIT64B timer data structure
* @base: base address of PIT64B hardware block
* @pclk: PIT64B's peripheral clock
* @gclk: PIT64B's generic clock
* @mode: precomputed value for mode register
*/
struct mchp_pit64b_timer {
void __iomem *base;
struct clk *pclk;
struct clk *gclk;
u32 mode;
};
/**
* struct mchp_pit64b_clkevt - PIT64B clockevent data structure
* @timer: PIT64B timer
* @clkevt: clockevent
*/
struct mchp_pit64b_clkevt {
struct mchp_pit64b_timer timer;
struct clock_event_device clkevt;
};
#define clkevt_to_mchp_pit64b_timer(x) \
((struct mchp_pit64b_timer *)container_of(x,\
struct mchp_pit64b_clkevt, clkevt))
/**
* struct mchp_pit64b_clksrc - PIT64B clocksource data structure
* @timer: PIT64B timer
* @clksrc: clocksource
*/
struct mchp_pit64b_clksrc {
struct mchp_pit64b_timer timer;
struct clocksource clksrc;
};
#define clksrc_to_mchp_pit64b_timer(x) \
((struct mchp_pit64b_timer *)container_of(x,\
struct mchp_pit64b_clksrc, clksrc))
/* Base address for clocksource timer. */
static void __iomem *mchp_pit64b_cs_base;
/* Default cycles for clockevent timer. */
static u64 mchp_pit64b_ce_cycles;
/* Delay timer. */
static struct delay_timer mchp_pit64b_dt;
static inline u64 mchp_pit64b_cnt_read(void __iomem *base)
{
unsigned long flags;
u32 low, high;
raw_local_irq_save(flags);
/*
* When using a 64 bit period TLSB must be read first, followed by the
* read of TMSB. This sequence generates an atomic read of the 64 bit
* timer value whatever the lapse of time between the accesses.
*/
low = readl_relaxed(base + MCHP_PIT64B_TLSBR);
high = readl_relaxed(base + MCHP_PIT64B_TMSBR);
raw_local_irq_restore(flags);
return (((u64)high << 32) | low);
}
static inline void mchp_pit64b_reset(struct mchp_pit64b_timer *timer,
u64 cycles, u32 mode, u32 irqs)
{
u32 low, high;
low = cycles & MCHP_PIT64B_LSBMASK;
high = cycles >> 32;
writel_relaxed(MCHP_PIT64B_CR_SWRST, timer->base + MCHP_PIT64B_CR);
writel_relaxed(mode | timer->mode, timer->base + MCHP_PIT64B_MR);
writel_relaxed(high, timer->base + MCHP_PIT64B_MSB_PR);
writel_relaxed(low, timer->base + MCHP_PIT64B_LSB_PR);
writel_relaxed(irqs, timer->base + MCHP_PIT64B_IER);
writel_relaxed(MCHP_PIT64B_CR_START, timer->base + MCHP_PIT64B_CR);
}
static void mchp_pit64b_suspend(struct mchp_pit64b_timer *timer)
{
writel_relaxed(MCHP_PIT64B_CR_SWRST, timer->base + MCHP_PIT64B_CR);
if (timer->mode & MCHP_PIT64B_MR_SGCLK)
clk_disable_unprepare(timer->gclk);
clk_disable_unprepare(timer->pclk);
}
static void mchp_pit64b_resume(struct mchp_pit64b_timer *timer)
{
clk_prepare_enable(timer->pclk);
if (timer->mode & MCHP_PIT64B_MR_SGCLK)
clk_prepare_enable(timer->gclk);
}
static void mchp_pit64b_clksrc_suspend(struct clocksource *cs)
{
struct mchp_pit64b_timer *timer = clksrc_to_mchp_pit64b_timer(cs);
mchp_pit64b_suspend(timer);
}
static void mchp_pit64b_clksrc_resume(struct clocksource *cs)
{
struct mchp_pit64b_timer *timer = clksrc_to_mchp_pit64b_timer(cs);
mchp_pit64b_resume(timer);
mchp_pit64b_reset(timer, ULLONG_MAX, MCHP_PIT64B_MR_CONT, 0);
}
static u64 mchp_pit64b_clksrc_read(struct clocksource *cs)
{
return mchp_pit64b_cnt_read(mchp_pit64b_cs_base);
}
static u64 notrace mchp_pit64b_sched_read_clk(void)
{
return mchp_pit64b_cnt_read(mchp_pit64b_cs_base);
}
static unsigned long notrace mchp_pit64b_dt_read(void)
{
return mchp_pit64b_cnt_read(mchp_pit64b_cs_base);
}
static int mchp_pit64b_clkevt_shutdown(struct clock_event_device *cedev)
{
struct mchp_pit64b_timer *timer = clkevt_to_mchp_pit64b_timer(cedev);
if (!clockevent_state_detached(cedev))
mchp_pit64b_suspend(timer);
return 0;
}
static int mchp_pit64b_clkevt_set_periodic(struct clock_event_device *cedev)
{
struct mchp_pit64b_timer *timer = clkevt_to_mchp_pit64b_timer(cedev);
if (clockevent_state_shutdown(cedev))
mchp_pit64b_resume(timer);
mchp_pit64b_reset(timer, mchp_pit64b_ce_cycles, MCHP_PIT64B_MR_CONT,
MCHP_PIT64B_IER_PERIOD);
return 0;
}
static int mchp_pit64b_clkevt_set_oneshot(struct clock_event_device *cedev)
{
struct mchp_pit64b_timer *timer = clkevt_to_mchp_pit64b_timer(cedev);
if (clockevent_state_shutdown(cedev))
mchp_pit64b_resume(timer);
mchp_pit64b_reset(timer, mchp_pit64b_ce_cycles, MCHP_PIT64B_MR_ONE_SHOT,
MCHP_PIT64B_IER_PERIOD);
return 0;
}
static int mchp_pit64b_clkevt_set_next_event(unsigned long evt,
struct clock_event_device *cedev)
{
struct mchp_pit64b_timer *timer = clkevt_to_mchp_pit64b_timer(cedev);
mchp_pit64b_reset(timer, evt, MCHP_PIT64B_MR_ONE_SHOT,
MCHP_PIT64B_IER_PERIOD);
return 0;
}
static irqreturn_t mchp_pit64b_interrupt(int irq, void *dev_id)
{
struct mchp_pit64b_clkevt *irq_data = dev_id;
/* Need to clear the interrupt. */
readl_relaxed(irq_data->timer.base + MCHP_PIT64B_ISR);
irq_data->clkevt.event_handler(&irq_data->clkevt);
return IRQ_HANDLED;
}
static void __init mchp_pit64b_pres_compute(u32 *pres, u32 clk_rate,
u32 max_rate)
{
u32 tmp;
for (*pres = 0; *pres < MCHP_PIT64B_PRES_MAX; (*pres)++) {
tmp = clk_rate / (*pres + 1);
if (tmp <= max_rate)
break;
}
/* Use the biggest prescaler if we didn't match one. */
if (*pres == MCHP_PIT64B_PRES_MAX)
*pres = MCHP_PIT64B_PRES_MAX - 1;
}
/**
* mchp_pit64b_init_mode() - prepare PIT64B mode register value to be used at
* runtime; this includes prescaler and SGCLK bit
* @timer: pointer to pit64b timer to init
* @max_rate: maximum rate that timer's clock could use
*
* PIT64B timer may be fed by gclk or pclk. When gclk is used its rate has to
* be at least 3 times lower that pclk's rate. pclk rate is fixed, gclk rate
* could be changed via clock APIs. The chosen clock (pclk or gclk) could be
* divided by the internal PIT64B's divider.
*
* This function, first tries to use GCLK by requesting the desired rate from
* PMC and then using the internal PIT64B prescaler, if any, to reach the
* requested rate. If PCLK/GCLK < 3 (condition requested by PIT64B hardware)
* then the function falls back on using PCLK as clock source for PIT64B timer
* choosing the highest prescaler in case it doesn't locate one to match the
* requested frequency.
*
* Below is presented the PIT64B block in relation with PMC:
*
* PIT64B
* PMC +------------------------------------+
* +----+ | +-----+ |
* | |-->gclk -->|-->| | +---------+ +-----+ |
* | | | | MUX |--->| Divider |->|timer| |
* | |-->pclk -->|-->| | +---------+ +-----+ |
* +----+ | +-----+ |
* | ^ |
* | sel |
* +------------------------------------+
*
* Where:
* - gclk rate <= pclk rate/3
* - gclk rate could be requested from PMC
* - pclk rate is fixed (cannot be requested from PMC)
*/
static int __init mchp_pit64b_init_mode(struct mchp_pit64b_timer *timer,
unsigned long max_rate)
{
unsigned long pclk_rate, diff = 0, best_diff = ULONG_MAX;
long gclk_round = 0;
u32 pres, best_pres = 0;
pclk_rate = clk_get_rate(timer->pclk);
if (!pclk_rate)
return -EINVAL;
timer->mode = 0;
/* Try using GCLK. */
gclk_round = clk_round_rate(timer->gclk, max_rate);
if (gclk_round < 0)
goto pclk;
if (pclk_rate / gclk_round < 3)
goto pclk;
mchp_pit64b_pres_compute(&pres, gclk_round, max_rate);
best_diff = abs(gclk_round / (pres + 1) - max_rate);
best_pres = pres;
if (!best_diff) {
timer->mode |= MCHP_PIT64B_MR_SGCLK;
clk_set_rate(timer->gclk, gclk_round);
goto done;
}
pclk:
/* Check if requested rate could be obtained using PCLK. */
mchp_pit64b_pres_compute(&pres, pclk_rate, max_rate);
diff = abs(pclk_rate / (pres + 1) - max_rate);
if (best_diff > diff) {
/* Use PCLK. */
best_pres = pres;
} else {
/* Use GCLK. */
timer->mode |= MCHP_PIT64B_MR_SGCLK;
clk_set_rate(timer->gclk, gclk_round);
}
done:
timer->mode |= MCHP_PIT64B_PRES_TO_MODE(best_pres);
pr_info("PIT64B: using clk=%s with prescaler %u, freq=%lu [Hz]\n",
timer->mode & MCHP_PIT64B_MR_SGCLK ? "gclk" : "pclk", best_pres,
timer->mode & MCHP_PIT64B_MR_SGCLK ?
gclk_round / (best_pres + 1) : pclk_rate / (best_pres + 1));
return 0;
}
static int __init mchp_pit64b_init_clksrc(struct mchp_pit64b_timer *timer,
u32 clk_rate)
{
struct mchp_pit64b_clksrc *cs;
int ret;
cs = kzalloc(sizeof(*cs), GFP_KERNEL);
if (!cs)
return -ENOMEM;
mchp_pit64b_resume(timer);
mchp_pit64b_reset(timer, ULLONG_MAX, MCHP_PIT64B_MR_CONT, 0);
mchp_pit64b_cs_base = timer->base;
cs->timer.base = timer->base;
cs->timer.pclk = timer->pclk;
cs->timer.gclk = timer->gclk;
cs->timer.mode = timer->mode;
cs->clksrc.name = MCHP_PIT64B_NAME;
cs->clksrc.mask = CLOCKSOURCE_MASK(64);
cs->clksrc.flags = CLOCK_SOURCE_IS_CONTINUOUS;
cs->clksrc.rating = 210;
cs->clksrc.read = mchp_pit64b_clksrc_read;
cs->clksrc.suspend = mchp_pit64b_clksrc_suspend;
cs->clksrc.resume = mchp_pit64b_clksrc_resume;
ret = clocksource_register_hz(&cs->clksrc, clk_rate);
if (ret) {
pr_debug("clksrc: Failed to register PIT64B clocksource!\n");
/* Stop timer. */
mchp_pit64b_suspend(timer);
kfree(cs);
return ret;
}
sched_clock_register(mchp_pit64b_sched_read_clk, 64, clk_rate);
mchp_pit64b_dt.read_current_timer = mchp_pit64b_dt_read;
mchp_pit64b_dt.freq = clk_rate;
register_current_timer_delay(&mchp_pit64b_dt);
return 0;
}
static int __init mchp_pit64b_init_clkevt(struct mchp_pit64b_timer *timer,
u32 clk_rate, u32 irq)
{
struct mchp_pit64b_clkevt *ce;
int ret;
ce = kzalloc(sizeof(*ce), GFP_KERNEL);
if (!ce)
return -ENOMEM;
mchp_pit64b_ce_cycles = DIV_ROUND_CLOSEST(clk_rate, HZ);
ce->timer.base = timer->base;
ce->timer.pclk = timer->pclk;
ce->timer.gclk = timer->gclk;
ce->timer.mode = timer->mode;
ce->clkevt.name = MCHP_PIT64B_NAME;
ce->clkevt.features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC;
ce->clkevt.rating = 150;
ce->clkevt.set_state_shutdown = mchp_pit64b_clkevt_shutdown;
ce->clkevt.set_state_periodic = mchp_pit64b_clkevt_set_periodic;
ce->clkevt.set_state_oneshot = mchp_pit64b_clkevt_set_oneshot;
ce->clkevt.set_next_event = mchp_pit64b_clkevt_set_next_event;
ce->clkevt.cpumask = cpumask_of(0);
ce->clkevt.irq = irq;
ret = request_irq(irq, mchp_pit64b_interrupt, IRQF_TIMER,
"pit64b_tick", ce);
if (ret) {
pr_debug("clkevt: Failed to setup PIT64B IRQ\n");
kfree(ce);
return ret;
}
clockevents_config_and_register(&ce->clkevt, clk_rate, 1, ULONG_MAX);
return 0;
}
static int __init mchp_pit64b_dt_init_timer(struct device_node *node,
bool clkevt)
{
struct mchp_pit64b_timer timer;
unsigned long clk_rate;
u32 irq = 0;
int ret;
/* Parse DT node. */
timer.pclk = of_clk_get_by_name(node, "pclk");
if (IS_ERR(timer.pclk))
return PTR_ERR(timer.pclk);
timer.gclk = of_clk_get_by_name(node, "gclk");
if (IS_ERR(timer.gclk))
return PTR_ERR(timer.gclk);
timer.base = of_iomap(node, 0);
if (!timer.base)
return -ENXIO;
if (clkevt) {
irq = irq_of_parse_and_map(node, 0);
if (!irq) {
ret = -ENODEV;
goto io_unmap;
}
}
/* Initialize mode (prescaler + SGCK bit). To be used at runtime. */
ret = mchp_pit64b_init_mode(&timer, MCHP_PIT64B_DEF_FREQ);
if (ret)
goto irq_unmap;
if (timer.mode & MCHP_PIT64B_MR_SGCLK)
clk_rate = clk_get_rate(timer.gclk);
else
clk_rate = clk_get_rate(timer.pclk);
clk_rate = clk_rate / (MCHP_PIT64B_MODE_TO_PRES(timer.mode) + 1);
if (clkevt)
ret = mchp_pit64b_init_clkevt(&timer, clk_rate, irq);
else
ret = mchp_pit64b_init_clksrc(&timer, clk_rate);
if (ret)
goto irq_unmap;
return 0;
irq_unmap:
irq_dispose_mapping(irq);
io_unmap:
iounmap(timer.base);
return ret;
}
static int __init mchp_pit64b_dt_init(struct device_node *node)
{
static int inits;
switch (inits++) {
case 0:
/* 1st request, register clockevent. */
return mchp_pit64b_dt_init_timer(node, true);
case 1:
/* 2nd request, register clocksource. */
return mchp_pit64b_dt_init_timer(node, false);
}
/* The rest, don't care. */
return -EINVAL;
}
TIMER_OF_DECLARE(mchp_pit64b, "microchip,sam9x60-pit64b", mchp_pit64b_dt_init);
|
linux-master
|
drivers/clocksource/timer-microchip-pit64b.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Mediatek SoCs General-Purpose Timer handling.
*
* Copyright (C) 2014 Matthias Brugger
*
* Matthias Brugger <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/clockchips.h>
#include <linux/clocksource.h>
#include <linux/interrupt.h>
#include <linux/irqreturn.h>
#include <linux/sched_clock.h>
#include <linux/slab.h>
#include "timer-of.h"
#define TIMER_CLK_EVT (1)
#define TIMER_CLK_SRC (2)
#define TIMER_SYNC_TICKS (3)
/* gpt */
#define GPT_IRQ_EN_REG 0x00
#define GPT_IRQ_ENABLE(val) BIT((val) - 1)
#define GPT_IRQ_ACK_REG 0x08
#define GPT_IRQ_ACK(val) BIT((val) - 1)
#define GPT_CTRL_REG(val) (0x10 * (val))
#define GPT_CTRL_OP(val) (((val) & 0x3) << 4)
#define GPT_CTRL_OP_ONESHOT (0)
#define GPT_CTRL_OP_REPEAT (1)
#define GPT_CTRL_OP_FREERUN (3)
#define GPT_CTRL_CLEAR (2)
#define GPT_CTRL_ENABLE (1)
#define GPT_CTRL_DISABLE (0)
#define GPT_CLK_REG(val) (0x04 + (0x10 * (val)))
#define GPT_CLK_SRC(val) (((val) & 0x1) << 4)
#define GPT_CLK_SRC_SYS13M (0)
#define GPT_CLK_SRC_RTC32K (1)
#define GPT_CLK_DIV1 (0x0)
#define GPT_CLK_DIV2 (0x1)
#define GPT_CNT_REG(val) (0x08 + (0x10 * (val)))
#define GPT_CMP_REG(val) (0x0C + (0x10 * (val)))
/* system timer */
#define SYST_BASE (0x40)
#define SYST_CON (SYST_BASE + 0x0)
#define SYST_VAL (SYST_BASE + 0x4)
#define SYST_CON_REG(to) (timer_of_base(to) + SYST_CON)
#define SYST_VAL_REG(to) (timer_of_base(to) + SYST_VAL)
/*
* SYST_CON_EN: Clock enable. Shall be set to
* - Start timer countdown.
* - Allow timeout ticks being updated.
* - Allow changing interrupt status,like clear irq pending.
*
* SYST_CON_IRQ_EN: Set to enable interrupt.
*
* SYST_CON_IRQ_CLR: Set to clear interrupt.
*/
#define SYST_CON_EN BIT(0)
#define SYST_CON_IRQ_EN BIT(1)
#define SYST_CON_IRQ_CLR BIT(4)
static void __iomem *gpt_sched_reg __read_mostly;
static void mtk_syst_ack_irq(struct timer_of *to)
{
/* Clear and disable interrupt */
writel(SYST_CON_EN, SYST_CON_REG(to));
writel(SYST_CON_IRQ_CLR | SYST_CON_EN, SYST_CON_REG(to));
}
static irqreturn_t mtk_syst_handler(int irq, void *dev_id)
{
struct clock_event_device *clkevt = dev_id;
struct timer_of *to = to_timer_of(clkevt);
mtk_syst_ack_irq(to);
clkevt->event_handler(clkevt);
return IRQ_HANDLED;
}
static int mtk_syst_clkevt_next_event(unsigned long ticks,
struct clock_event_device *clkevt)
{
struct timer_of *to = to_timer_of(clkevt);
/* Enable clock to allow timeout tick update later */
writel(SYST_CON_EN, SYST_CON_REG(to));
/*
* Write new timeout ticks. Timer shall start countdown
* after timeout ticks are updated.
*/
writel(ticks, SYST_VAL_REG(to));
/* Enable interrupt */
writel(SYST_CON_EN | SYST_CON_IRQ_EN, SYST_CON_REG(to));
return 0;
}
static int mtk_syst_clkevt_shutdown(struct clock_event_device *clkevt)
{
/* Clear any irq */
mtk_syst_ack_irq(to_timer_of(clkevt));
/* Disable timer */
writel(0, SYST_CON_REG(to_timer_of(clkevt)));
return 0;
}
static int mtk_syst_clkevt_resume(struct clock_event_device *clkevt)
{
return mtk_syst_clkevt_shutdown(clkevt);
}
static int mtk_syst_clkevt_oneshot(struct clock_event_device *clkevt)
{
return 0;
}
static u64 notrace mtk_gpt_read_sched_clock(void)
{
return readl_relaxed(gpt_sched_reg);
}
static void mtk_gpt_clkevt_time_stop(struct timer_of *to, u8 timer)
{
u32 val;
val = readl(timer_of_base(to) + GPT_CTRL_REG(timer));
writel(val & ~GPT_CTRL_ENABLE, timer_of_base(to) +
GPT_CTRL_REG(timer));
}
static void mtk_gpt_clkevt_time_setup(struct timer_of *to,
unsigned long delay, u8 timer)
{
writel(delay, timer_of_base(to) + GPT_CMP_REG(timer));
}
static void mtk_gpt_clkevt_time_start(struct timer_of *to,
bool periodic, u8 timer)
{
u32 val;
/* Acknowledge interrupt */
writel(GPT_IRQ_ACK(timer), timer_of_base(to) + GPT_IRQ_ACK_REG);
val = readl(timer_of_base(to) + GPT_CTRL_REG(timer));
/* Clear 2 bit timer operation mode field */
val &= ~GPT_CTRL_OP(0x3);
if (periodic)
val |= GPT_CTRL_OP(GPT_CTRL_OP_REPEAT);
else
val |= GPT_CTRL_OP(GPT_CTRL_OP_ONESHOT);
writel(val | GPT_CTRL_ENABLE | GPT_CTRL_CLEAR,
timer_of_base(to) + GPT_CTRL_REG(timer));
}
static int mtk_gpt_clkevt_shutdown(struct clock_event_device *clk)
{
mtk_gpt_clkevt_time_stop(to_timer_of(clk), TIMER_CLK_EVT);
return 0;
}
static int mtk_gpt_clkevt_set_periodic(struct clock_event_device *clk)
{
struct timer_of *to = to_timer_of(clk);
mtk_gpt_clkevt_time_stop(to, TIMER_CLK_EVT);
mtk_gpt_clkevt_time_setup(to, to->of_clk.period, TIMER_CLK_EVT);
mtk_gpt_clkevt_time_start(to, true, TIMER_CLK_EVT);
return 0;
}
static int mtk_gpt_clkevt_next_event(unsigned long event,
struct clock_event_device *clk)
{
struct timer_of *to = to_timer_of(clk);
mtk_gpt_clkevt_time_stop(to, TIMER_CLK_EVT);
mtk_gpt_clkevt_time_setup(to, event, TIMER_CLK_EVT);
mtk_gpt_clkevt_time_start(to, false, TIMER_CLK_EVT);
return 0;
}
static irqreturn_t mtk_gpt_interrupt(int irq, void *dev_id)
{
struct clock_event_device *clkevt = (struct clock_event_device *)dev_id;
struct timer_of *to = to_timer_of(clkevt);
/* Acknowledge timer0 irq */
writel(GPT_IRQ_ACK(TIMER_CLK_EVT), timer_of_base(to) + GPT_IRQ_ACK_REG);
clkevt->event_handler(clkevt);
return IRQ_HANDLED;
}
static void
__init mtk_gpt_setup(struct timer_of *to, u8 timer, u8 option)
{
writel(GPT_CTRL_CLEAR | GPT_CTRL_DISABLE,
timer_of_base(to) + GPT_CTRL_REG(timer));
writel(GPT_CLK_SRC(GPT_CLK_SRC_SYS13M) | GPT_CLK_DIV1,
timer_of_base(to) + GPT_CLK_REG(timer));
writel(0x0, timer_of_base(to) + GPT_CMP_REG(timer));
writel(GPT_CTRL_OP(option) | GPT_CTRL_ENABLE,
timer_of_base(to) + GPT_CTRL_REG(timer));
}
static void mtk_gpt_enable_irq(struct timer_of *to, u8 timer)
{
u32 val;
/* Disable all interrupts */
writel(0x0, timer_of_base(to) + GPT_IRQ_EN_REG);
/* Acknowledge all spurious pending interrupts */
writel(0x3f, timer_of_base(to) + GPT_IRQ_ACK_REG);
val = readl(timer_of_base(to) + GPT_IRQ_EN_REG);
writel(val | GPT_IRQ_ENABLE(timer),
timer_of_base(to) + GPT_IRQ_EN_REG);
}
static void mtk_gpt_resume(struct clock_event_device *clk)
{
struct timer_of *to = to_timer_of(clk);
mtk_gpt_enable_irq(to, TIMER_CLK_EVT);
}
static void mtk_gpt_suspend(struct clock_event_device *clk)
{
struct timer_of *to = to_timer_of(clk);
/* Disable all interrupts */
writel(0x0, timer_of_base(to) + GPT_IRQ_EN_REG);
/*
* This is called with interrupts disabled,
* so we need to ack any interrupt that is pending
* or for example ATF will prevent a suspend from completing.
*/
writel(0x3f, timer_of_base(to) + GPT_IRQ_ACK_REG);
}
static struct timer_of to = {
.flags = TIMER_OF_IRQ | TIMER_OF_BASE | TIMER_OF_CLOCK,
.clkevt = {
.name = "mtk-clkevt",
.rating = 300,
.cpumask = cpu_possible_mask,
},
.of_irq = {
.flags = IRQF_TIMER | IRQF_IRQPOLL,
},
};
static int __init mtk_syst_init(struct device_node *node)
{
int ret;
to.clkevt.features = CLOCK_EVT_FEAT_DYNIRQ | CLOCK_EVT_FEAT_ONESHOT;
to.clkevt.set_state_shutdown = mtk_syst_clkevt_shutdown;
to.clkevt.set_state_oneshot = mtk_syst_clkevt_oneshot;
to.clkevt.tick_resume = mtk_syst_clkevt_resume;
to.clkevt.set_next_event = mtk_syst_clkevt_next_event;
to.of_irq.handler = mtk_syst_handler;
ret = timer_of_init(node, &to);
if (ret)
return ret;
clockevents_config_and_register(&to.clkevt, timer_of_rate(&to),
TIMER_SYNC_TICKS, 0xffffffff);
return 0;
}
static int __init mtk_gpt_init(struct device_node *node)
{
int ret;
to.clkevt.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
to.clkevt.set_state_shutdown = mtk_gpt_clkevt_shutdown;
to.clkevt.set_state_periodic = mtk_gpt_clkevt_set_periodic;
to.clkevt.set_state_oneshot = mtk_gpt_clkevt_shutdown;
to.clkevt.tick_resume = mtk_gpt_clkevt_shutdown;
to.clkevt.set_next_event = mtk_gpt_clkevt_next_event;
to.clkevt.suspend = mtk_gpt_suspend;
to.clkevt.resume = mtk_gpt_resume;
to.of_irq.handler = mtk_gpt_interrupt;
ret = timer_of_init(node, &to);
if (ret)
return ret;
/* Configure clock source */
mtk_gpt_setup(&to, TIMER_CLK_SRC, GPT_CTRL_OP_FREERUN);
clocksource_mmio_init(timer_of_base(&to) + GPT_CNT_REG(TIMER_CLK_SRC),
node->name, timer_of_rate(&to), 300, 32,
clocksource_mmio_readl_up);
gpt_sched_reg = timer_of_base(&to) + GPT_CNT_REG(TIMER_CLK_SRC);
sched_clock_register(mtk_gpt_read_sched_clock, 32, timer_of_rate(&to));
/* Configure clock event */
mtk_gpt_setup(&to, TIMER_CLK_EVT, GPT_CTRL_OP_REPEAT);
clockevents_config_and_register(&to.clkevt, timer_of_rate(&to),
TIMER_SYNC_TICKS, 0xffffffff);
mtk_gpt_enable_irq(&to, TIMER_CLK_EVT);
return 0;
}
TIMER_OF_DECLARE(mtk_mt6577, "mediatek,mt6577-timer", mtk_gpt_init);
TIMER_OF_DECLARE(mtk_mt6765, "mediatek,mt6765-timer", mtk_syst_init);
|
linux-master
|
drivers/clocksource/timer-mediatek.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* Allwinner A1X SoCs timer handling.
*
* Copyright (C) 2012 Maxime Ripard
*
* Maxime Ripard <[email protected]>
*
* Based on code from
* Allwinner Technology Co., Ltd. <www.allwinnertech.com>
* Benn Huang <[email protected]>
*/
#include <linux/clk.h>
#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqreturn.h>
#include <linux/sched_clock.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include "timer-of.h"
#define TIMER_IRQ_EN_REG 0x00
#define TIMER_IRQ_EN(val) BIT(val)
#define TIMER_IRQ_ST_REG 0x04
#define TIMER_IRQ_CLEAR(val) BIT(val)
#define TIMER_CTL_REG(val) (0x10 * val + 0x10)
#define TIMER_CTL_ENABLE BIT(0)
#define TIMER_CTL_RELOAD BIT(1)
#define TIMER_CTL_CLK_SRC(val) (((val) & 0x3) << 2)
#define TIMER_CTL_CLK_SRC_OSC24M (1)
#define TIMER_CTL_CLK_PRES(val) (((val) & 0x7) << 4)
#define TIMER_CTL_ONESHOT BIT(7)
#define TIMER_INTVAL_REG(val) (0x10 * (val) + 0x14)
#define TIMER_CNTVAL_REG(val) (0x10 * (val) + 0x18)
#define TIMER_SYNC_TICKS 3
/*
* When we disable a timer, we need to wait at least for 2 cycles of
* the timer source clock. We will use for that the clocksource timer
* that is already setup and runs at the same frequency than the other
* timers, and we never will be disabled.
*/
static void sun4i_clkevt_sync(void __iomem *base)
{
u32 old = readl(base + TIMER_CNTVAL_REG(1));
while ((old - readl(base + TIMER_CNTVAL_REG(1))) < TIMER_SYNC_TICKS)
cpu_relax();
}
static void sun4i_clkevt_time_stop(void __iomem *base, u8 timer)
{
u32 val = readl(base + TIMER_CTL_REG(timer));
writel(val & ~TIMER_CTL_ENABLE, base + TIMER_CTL_REG(timer));
sun4i_clkevt_sync(base);
}
static void sun4i_clkevt_time_setup(void __iomem *base, u8 timer,
unsigned long delay)
{
writel(delay, base + TIMER_INTVAL_REG(timer));
}
static void sun4i_clkevt_time_start(void __iomem *base, u8 timer,
bool periodic)
{
u32 val = readl(base + TIMER_CTL_REG(timer));
if (periodic)
val &= ~TIMER_CTL_ONESHOT;
else
val |= TIMER_CTL_ONESHOT;
writel(val | TIMER_CTL_ENABLE | TIMER_CTL_RELOAD,
base + TIMER_CTL_REG(timer));
}
static int sun4i_clkevt_shutdown(struct clock_event_device *evt)
{
struct timer_of *to = to_timer_of(evt);
sun4i_clkevt_time_stop(timer_of_base(to), 0);
return 0;
}
static int sun4i_clkevt_set_oneshot(struct clock_event_device *evt)
{
struct timer_of *to = to_timer_of(evt);
sun4i_clkevt_time_stop(timer_of_base(to), 0);
sun4i_clkevt_time_start(timer_of_base(to), 0, false);
return 0;
}
static int sun4i_clkevt_set_periodic(struct clock_event_device *evt)
{
struct timer_of *to = to_timer_of(evt);
sun4i_clkevt_time_stop(timer_of_base(to), 0);
sun4i_clkevt_time_setup(timer_of_base(to), 0, timer_of_period(to));
sun4i_clkevt_time_start(timer_of_base(to), 0, true);
return 0;
}
static int sun4i_clkevt_next_event(unsigned long evt,
struct clock_event_device *clkevt)
{
struct timer_of *to = to_timer_of(clkevt);
sun4i_clkevt_time_stop(timer_of_base(to), 0);
sun4i_clkevt_time_setup(timer_of_base(to), 0, evt - TIMER_SYNC_TICKS);
sun4i_clkevt_time_start(timer_of_base(to), 0, false);
return 0;
}
static void sun4i_timer_clear_interrupt(void __iomem *base)
{
writel(TIMER_IRQ_CLEAR(0), base + TIMER_IRQ_ST_REG);
}
static irqreturn_t sun4i_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evt = dev_id;
struct timer_of *to = to_timer_of(evt);
sun4i_timer_clear_interrupt(timer_of_base(to));
evt->event_handler(evt);
return IRQ_HANDLED;
}
static struct timer_of to = {
.flags = TIMER_OF_IRQ | TIMER_OF_CLOCK | TIMER_OF_BASE,
.clkevt = {
.name = "sun4i_tick",
.rating = 350,
.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT |
CLOCK_EVT_FEAT_DYNIRQ,
.set_state_shutdown = sun4i_clkevt_shutdown,
.set_state_periodic = sun4i_clkevt_set_periodic,
.set_state_oneshot = sun4i_clkevt_set_oneshot,
.tick_resume = sun4i_clkevt_shutdown,
.set_next_event = sun4i_clkevt_next_event,
.cpumask = cpu_possible_mask,
},
.of_irq = {
.handler = sun4i_timer_interrupt,
.flags = IRQF_TIMER | IRQF_IRQPOLL,
},
};
static u64 notrace sun4i_timer_sched_read(void)
{
return ~readl(timer_of_base(&to) + TIMER_CNTVAL_REG(1));
}
static int __init sun4i_timer_init(struct device_node *node)
{
int ret;
u32 val;
ret = timer_of_init(node, &to);
if (ret)
return ret;
writel(~0, timer_of_base(&to) + TIMER_INTVAL_REG(1));
writel(TIMER_CTL_ENABLE | TIMER_CTL_RELOAD |
TIMER_CTL_CLK_SRC(TIMER_CTL_CLK_SRC_OSC24M),
timer_of_base(&to) + TIMER_CTL_REG(1));
/*
* sched_clock_register does not have priorities, and on sun6i and
* later there is a better sched_clock registered by arm_arch_timer.c
*/
if (of_machine_is_compatible("allwinner,sun4i-a10") ||
of_machine_is_compatible("allwinner,sun5i-a13") ||
of_machine_is_compatible("allwinner,sun5i-a10s") ||
of_machine_is_compatible("allwinner,suniv-f1c100s"))
sched_clock_register(sun4i_timer_sched_read, 32,
timer_of_rate(&to));
ret = clocksource_mmio_init(timer_of_base(&to) + TIMER_CNTVAL_REG(1),
node->name, timer_of_rate(&to), 350, 32,
clocksource_mmio_readl_down);
if (ret) {
pr_err("Failed to register clocksource\n");
return ret;
}
writel(TIMER_CTL_CLK_SRC(TIMER_CTL_CLK_SRC_OSC24M),
timer_of_base(&to) + TIMER_CTL_REG(0));
/* Make sure timer is stopped before playing with interrupts */
sun4i_clkevt_time_stop(timer_of_base(&to), 0);
/* clear timer0 interrupt */
sun4i_timer_clear_interrupt(timer_of_base(&to));
clockevents_config_and_register(&to.clkevt, timer_of_rate(&to),
TIMER_SYNC_TICKS, 0xffffffff);
/* Enable timer0 interrupt */
val = readl(timer_of_base(&to) + TIMER_IRQ_EN_REG);
writel(val | TIMER_IRQ_EN(0), timer_of_base(&to) + TIMER_IRQ_EN_REG);
return ret;
}
TIMER_OF_DECLARE(sun4i, "allwinner,sun4i-a10-timer",
sun4i_timer_init);
TIMER_OF_DECLARE(sun8i_a23, "allwinner,sun8i-a23-timer",
sun4i_timer_init);
TIMER_OF_DECLARE(sun8i_v3s, "allwinner,sun8i-v3s-timer",
sun4i_timer_init);
TIMER_OF_DECLARE(suniv, "allwinner,suniv-f1c100s-timer",
sun4i_timer_init);
|
linux-master
|
drivers/clocksource/timer-sun4i.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/drivers/clocksource/zevio-timer.c
*
* Copyright (C) 2013 Daniel Tang <[email protected]>
*/
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/clk.h>
#include <linux/clockchips.h>
#include <linux/cpumask.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#define IO_CURRENT_VAL 0x00
#define IO_DIVIDER 0x04
#define IO_CONTROL 0x08
#define IO_TIMER1 0x00
#define IO_TIMER2 0x0C
#define IO_MATCH_BEGIN 0x18
#define IO_MATCH(x) (IO_MATCH_BEGIN + ((x) << 2))
#define IO_INTR_STS 0x00
#define IO_INTR_ACK 0x00
#define IO_INTR_MSK 0x04
#define CNTL_STOP_TIMER (1 << 4)
#define CNTL_RUN_TIMER (0 << 4)
#define CNTL_INC (1 << 3)
#define CNTL_DEC (0 << 3)
#define CNTL_TOZERO 0
#define CNTL_MATCH(x) ((x) + 1)
#define CNTL_FOREVER 7
/* There are 6 match registers but we only use one. */
#define TIMER_MATCH 0
#define TIMER_INTR_MSK (1 << (TIMER_MATCH))
#define TIMER_INTR_ALL 0x3F
struct zevio_timer {
void __iomem *base;
void __iomem *timer1, *timer2;
void __iomem *interrupt_regs;
struct clk *clk;
struct clock_event_device clkevt;
char clocksource_name[64];
char clockevent_name[64];
};
static int zevio_timer_set_event(unsigned long delta,
struct clock_event_device *dev)
{
struct zevio_timer *timer = container_of(dev, struct zevio_timer,
clkevt);
writel(delta, timer->timer1 + IO_CURRENT_VAL);
writel(CNTL_RUN_TIMER | CNTL_DEC | CNTL_MATCH(TIMER_MATCH),
timer->timer1 + IO_CONTROL);
return 0;
}
static int zevio_timer_shutdown(struct clock_event_device *dev)
{
struct zevio_timer *timer = container_of(dev, struct zevio_timer,
clkevt);
/* Disable timer interrupts */
writel(0, timer->interrupt_regs + IO_INTR_MSK);
writel(TIMER_INTR_ALL, timer->interrupt_regs + IO_INTR_ACK);
/* Stop timer */
writel(CNTL_STOP_TIMER, timer->timer1 + IO_CONTROL);
return 0;
}
static int zevio_timer_set_oneshot(struct clock_event_device *dev)
{
struct zevio_timer *timer = container_of(dev, struct zevio_timer,
clkevt);
/* Enable timer interrupts */
writel(TIMER_INTR_MSK, timer->interrupt_regs + IO_INTR_MSK);
writel(TIMER_INTR_ALL, timer->interrupt_regs + IO_INTR_ACK);
return 0;
}
static irqreturn_t zevio_timer_interrupt(int irq, void *dev_id)
{
struct zevio_timer *timer = dev_id;
u32 intr;
intr = readl(timer->interrupt_regs + IO_INTR_ACK);
if (!(intr & TIMER_INTR_MSK))
return IRQ_NONE;
writel(TIMER_INTR_MSK, timer->interrupt_regs + IO_INTR_ACK);
writel(CNTL_STOP_TIMER, timer->timer1 + IO_CONTROL);
if (timer->clkevt.event_handler)
timer->clkevt.event_handler(&timer->clkevt);
return IRQ_HANDLED;
}
static int __init zevio_timer_add(struct device_node *node)
{
struct zevio_timer *timer;
struct resource res;
int irqnr, ret;
timer = kzalloc(sizeof(*timer), GFP_KERNEL);
if (!timer)
return -ENOMEM;
timer->base = of_iomap(node, 0);
if (!timer->base) {
ret = -EINVAL;
goto error_free;
}
timer->timer1 = timer->base + IO_TIMER1;
timer->timer2 = timer->base + IO_TIMER2;
timer->clk = of_clk_get(node, 0);
if (IS_ERR(timer->clk)) {
ret = PTR_ERR(timer->clk);
pr_err("Timer clock not found! (error %d)\n", ret);
goto error_unmap;
}
timer->interrupt_regs = of_iomap(node, 1);
irqnr = irq_of_parse_and_map(node, 0);
of_address_to_resource(node, 0, &res);
scnprintf(timer->clocksource_name, sizeof(timer->clocksource_name),
"%llx.%pOFn_clocksource",
(unsigned long long)res.start, node);
scnprintf(timer->clockevent_name, sizeof(timer->clockevent_name),
"%llx.%pOFn_clockevent",
(unsigned long long)res.start, node);
if (timer->interrupt_regs && irqnr) {
timer->clkevt.name = timer->clockevent_name;
timer->clkevt.set_next_event = zevio_timer_set_event;
timer->clkevt.set_state_shutdown = zevio_timer_shutdown;
timer->clkevt.set_state_oneshot = zevio_timer_set_oneshot;
timer->clkevt.tick_resume = zevio_timer_set_oneshot;
timer->clkevt.rating = 200;
timer->clkevt.cpumask = cpu_possible_mask;
timer->clkevt.features = CLOCK_EVT_FEAT_ONESHOT;
timer->clkevt.irq = irqnr;
writel(CNTL_STOP_TIMER, timer->timer1 + IO_CONTROL);
writel(0, timer->timer1 + IO_DIVIDER);
/* Start with timer interrupts disabled */
writel(0, timer->interrupt_regs + IO_INTR_MSK);
writel(TIMER_INTR_ALL, timer->interrupt_regs + IO_INTR_ACK);
/* Interrupt to occur when timer value matches 0 */
writel(0, timer->base + IO_MATCH(TIMER_MATCH));
if (request_irq(irqnr, zevio_timer_interrupt,
IRQF_TIMER | IRQF_IRQPOLL,
timer->clockevent_name, timer)) {
pr_err("%s: request_irq() failed\n",
timer->clockevent_name);
}
clockevents_config_and_register(&timer->clkevt,
clk_get_rate(timer->clk), 0x0001, 0xffff);
pr_info("Added %s as clockevent\n", timer->clockevent_name);
}
writel(CNTL_STOP_TIMER, timer->timer2 + IO_CONTROL);
writel(0, timer->timer2 + IO_CURRENT_VAL);
writel(0, timer->timer2 + IO_DIVIDER);
writel(CNTL_RUN_TIMER | CNTL_FOREVER | CNTL_INC,
timer->timer2 + IO_CONTROL);
clocksource_mmio_init(timer->timer2 + IO_CURRENT_VAL,
timer->clocksource_name,
clk_get_rate(timer->clk),
200, 16,
clocksource_mmio_readw_up);
pr_info("Added %s as clocksource\n", timer->clocksource_name);
return 0;
error_unmap:
iounmap(timer->base);
error_free:
kfree(timer);
return ret;
}
static int __init zevio_timer_init(struct device_node *node)
{
return zevio_timer_add(node);
}
TIMER_OF_DECLARE(zevio_timer, "lsi,zevio-timer", zevio_timer_init);
|
linux-master
|
drivers/clocksource/timer-zevio.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2016-17 Synopsys, Inc. (www.synopsys.com)
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*/
/* ARC700 has two 32bit independent prog Timers: TIMER0 and TIMER1, Each can be
* programmed to go from @count to @limit and optionally interrupt.
* We've designated TIMER0 for clockevents and TIMER1 for clocksource
*
* ARCv2 based HS38 cores have RTC (in-core) and GFRC (inside ARConnect/MCIP)
* which are suitable for UP and SMP based clocksources respectively
*/
#include <linux/interrupt.h>
#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <linux/cpu.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/sched_clock.h>
#include <soc/arc/timers.h>
#include <soc/arc/mcip.h>
static unsigned long arc_timer_freq;
static int noinline arc_get_timer_clk(struct device_node *node)
{
struct clk *clk;
int ret;
clk = of_clk_get(node, 0);
if (IS_ERR(clk)) {
pr_err("timer missing clk\n");
return PTR_ERR(clk);
}
ret = clk_prepare_enable(clk);
if (ret) {
pr_err("Couldn't enable parent clk\n");
return ret;
}
arc_timer_freq = clk_get_rate(clk);
return 0;
}
/********** Clock Source Device *********/
#ifdef CONFIG_ARC_TIMERS_64BIT
static u64 arc_read_gfrc(struct clocksource *cs)
{
unsigned long flags;
u32 l, h;
/*
* From a programming model pov, there seems to be just one instance of
* MCIP_CMD/MCIP_READBACK however micro-architecturally there's
* an instance PER ARC CORE (not per cluster), and there are dedicated
* hardware decode logic (per core) inside ARConnect to handle
* simultaneous read/write accesses from cores via those two registers.
* So several concurrent commands to ARConnect are OK if they are
* trying to access two different sub-components (like GFRC,
* inter-core interrupt, etc...). HW also supports simultaneously
* accessing GFRC by multiple cores.
* That's why it is safe to disable hard interrupts on the local CPU
* before access to GFRC instead of taking global MCIP spinlock
* defined in arch/arc/kernel/mcip.c
*/
local_irq_save(flags);
__mcip_cmd(CMD_GFRC_READ_LO, 0);
l = read_aux_reg(ARC_REG_MCIP_READBACK);
__mcip_cmd(CMD_GFRC_READ_HI, 0);
h = read_aux_reg(ARC_REG_MCIP_READBACK);
local_irq_restore(flags);
return (((u64)h) << 32) | l;
}
static notrace u64 arc_gfrc_clock_read(void)
{
return arc_read_gfrc(NULL);
}
static struct clocksource arc_counter_gfrc = {
.name = "ARConnect GFRC",
.rating = 400,
.read = arc_read_gfrc,
.mask = CLOCKSOURCE_MASK(64),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static int __init arc_cs_setup_gfrc(struct device_node *node)
{
struct mcip_bcr mp;
int ret;
READ_BCR(ARC_REG_MCIP_BCR, mp);
if (!mp.gfrc) {
pr_warn("Global-64-bit-Ctr clocksource not detected\n");
return -ENXIO;
}
ret = arc_get_timer_clk(node);
if (ret)
return ret;
sched_clock_register(arc_gfrc_clock_read, 64, arc_timer_freq);
return clocksource_register_hz(&arc_counter_gfrc, arc_timer_freq);
}
TIMER_OF_DECLARE(arc_gfrc, "snps,archs-timer-gfrc", arc_cs_setup_gfrc);
#define AUX_RTC_CTRL 0x103
#define AUX_RTC_LOW 0x104
#define AUX_RTC_HIGH 0x105
static u64 arc_read_rtc(struct clocksource *cs)
{
unsigned long status;
u32 l, h;
/*
* hardware has an internal state machine which tracks readout of
* low/high and updates the CTRL.status if
* - interrupt/exception taken between the two reads
* - high increments after low has been read
*/
do {
l = read_aux_reg(AUX_RTC_LOW);
h = read_aux_reg(AUX_RTC_HIGH);
status = read_aux_reg(AUX_RTC_CTRL);
} while (!(status & BIT(31)));
return (((u64)h) << 32) | l;
}
static notrace u64 arc_rtc_clock_read(void)
{
return arc_read_rtc(NULL);
}
static struct clocksource arc_counter_rtc = {
.name = "ARCv2 RTC",
.rating = 350,
.read = arc_read_rtc,
.mask = CLOCKSOURCE_MASK(64),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static int __init arc_cs_setup_rtc(struct device_node *node)
{
struct bcr_timer timer;
int ret;
READ_BCR(ARC_REG_TIMERS_BCR, timer);
if (!timer.rtc) {
pr_warn("Local-64-bit-Ctr clocksource not detected\n");
return -ENXIO;
}
/* Local to CPU hence not usable in SMP */
if (IS_ENABLED(CONFIG_SMP)) {
pr_warn("Local-64-bit-Ctr not usable in SMP\n");
return -EINVAL;
}
ret = arc_get_timer_clk(node);
if (ret)
return ret;
write_aux_reg(AUX_RTC_CTRL, 1);
sched_clock_register(arc_rtc_clock_read, 64, arc_timer_freq);
return clocksource_register_hz(&arc_counter_rtc, arc_timer_freq);
}
TIMER_OF_DECLARE(arc_rtc, "snps,archs-timer-rtc", arc_cs_setup_rtc);
#endif
/*
* 32bit TIMER1 to keep counting monotonically and wraparound
*/
static u64 arc_read_timer1(struct clocksource *cs)
{
return (u64) read_aux_reg(ARC_REG_TIMER1_CNT);
}
static notrace u64 arc_timer1_clock_read(void)
{
return arc_read_timer1(NULL);
}
static struct clocksource arc_counter_timer1 = {
.name = "ARC Timer1",
.rating = 300,
.read = arc_read_timer1,
.mask = CLOCKSOURCE_MASK(32),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static int __init arc_cs_setup_timer1(struct device_node *node)
{
int ret;
/* Local to CPU hence not usable in SMP */
if (IS_ENABLED(CONFIG_SMP))
return -EINVAL;
ret = arc_get_timer_clk(node);
if (ret)
return ret;
write_aux_reg(ARC_REG_TIMER1_LIMIT, ARC_TIMERN_MAX);
write_aux_reg(ARC_REG_TIMER1_CNT, 0);
write_aux_reg(ARC_REG_TIMER1_CTRL, ARC_TIMER_CTRL_NH);
sched_clock_register(arc_timer1_clock_read, 32, arc_timer_freq);
return clocksource_register_hz(&arc_counter_timer1, arc_timer_freq);
}
/********** Clock Event Device *********/
static int arc_timer_irq;
/*
* Arm the timer to interrupt after @cycles
* The distinction for oneshot/periodic is done in arc_event_timer_ack() below
*/
static void arc_timer_event_setup(unsigned int cycles)
{
write_aux_reg(ARC_REG_TIMER0_LIMIT, cycles);
write_aux_reg(ARC_REG_TIMER0_CNT, 0); /* start from 0 */
write_aux_reg(ARC_REG_TIMER0_CTRL, ARC_TIMER_CTRL_IE | ARC_TIMER_CTRL_NH);
}
static int arc_clkevent_set_next_event(unsigned long delta,
struct clock_event_device *dev)
{
arc_timer_event_setup(delta);
return 0;
}
static int arc_clkevent_set_periodic(struct clock_event_device *dev)
{
/*
* At X Hz, 1 sec = 1000ms -> X cycles;
* 10ms -> X / 100 cycles
*/
arc_timer_event_setup(arc_timer_freq / HZ);
return 0;
}
static DEFINE_PER_CPU(struct clock_event_device, arc_clockevent_device) = {
.name = "ARC Timer0",
.features = CLOCK_EVT_FEAT_ONESHOT |
CLOCK_EVT_FEAT_PERIODIC,
.rating = 300,
.set_next_event = arc_clkevent_set_next_event,
.set_state_periodic = arc_clkevent_set_periodic,
};
static irqreturn_t timer_irq_handler(int irq, void *dev_id)
{
/*
* Note that generic IRQ core could have passed @evt for @dev_id if
* irq_set_chip_and_handler() asked for handle_percpu_devid_irq()
*/
struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
int irq_reenable = clockevent_state_periodic(evt);
/*
* 1. ACK the interrupt
* - For ARC700, any write to CTRL reg ACKs it, so just rewrite
* Count when [N]ot [H]alted bit.
* - For HS3x, it is a bit subtle. On taken count-down interrupt,
* IP bit [3] is set, which needs to be cleared for ACK'ing.
* The write below can only update the other two bits, hence
* explicitly clears IP bit
* 2. Re-arm interrupt if periodic by writing to IE bit [0]
*/
write_aux_reg(ARC_REG_TIMER0_CTRL, irq_reenable | ARC_TIMER_CTRL_NH);
evt->event_handler(evt);
return IRQ_HANDLED;
}
static int arc_timer_starting_cpu(unsigned int cpu)
{
struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
evt->cpumask = cpumask_of(smp_processor_id());
clockevents_config_and_register(evt, arc_timer_freq, 0, ARC_TIMERN_MAX);
enable_percpu_irq(arc_timer_irq, 0);
return 0;
}
static int arc_timer_dying_cpu(unsigned int cpu)
{
disable_percpu_irq(arc_timer_irq);
return 0;
}
/*
* clockevent setup for boot CPU
*/
static int __init arc_clockevent_setup(struct device_node *node)
{
struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
int ret;
arc_timer_irq = irq_of_parse_and_map(node, 0);
if (arc_timer_irq <= 0) {
pr_err("clockevent: missing irq\n");
return -EINVAL;
}
ret = arc_get_timer_clk(node);
if (ret)
return ret;
/* Needs apriori irq_set_percpu_devid() done in intc map function */
ret = request_percpu_irq(arc_timer_irq, timer_irq_handler,
"Timer0 (per-cpu-tick)", evt);
if (ret) {
pr_err("clockevent: unable to request irq\n");
return ret;
}
ret = cpuhp_setup_state(CPUHP_AP_ARC_TIMER_STARTING,
"clockevents/arc/timer:starting",
arc_timer_starting_cpu,
arc_timer_dying_cpu);
if (ret) {
pr_err("Failed to setup hotplug state\n");
return ret;
}
return 0;
}
static int __init arc_of_timer_init(struct device_node *np)
{
static int init_count = 0;
int ret;
if (!init_count) {
init_count = 1;
ret = arc_clockevent_setup(np);
} else {
ret = arc_cs_setup_timer1(np);
}
return ret;
}
TIMER_OF_DECLARE(arc_clkevt, "snps,arc-timer", arc_of_timer_init);
|
linux-master
|
drivers/clocksource/arc_timer.c
|
// SPDX-License-Identifier: GPL-2.0
#include <linux/init.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/ioport.h>
#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/sched_clock.h>
#include <linux/syscore_ops.h>
#include <soc/at91/atmel_tcb.h>
/*
* We're configured to use a specific TC block, one that's not hooked
* up to external hardware, to provide a time solution:
*
* - Two channels combine to create a free-running 32 bit counter
* with a base rate of 5+ MHz, packaged as a clocksource (with
* resolution better than 200 nsec).
* - Some chips support 32 bit counter. A single channel is used for
* this 32 bit free-running counter. the second channel is not used.
*
* - The third channel may be used to provide a clockevent source, used in
* either periodic or oneshot mode. For 16-bit counter its runs at 32 KiHZ,
* and can handle delays of up to two seconds. For 32-bit counters, it runs at
* the same rate as the clocksource
*
* REVISIT behavior during system suspend states... we should disable
* all clocks and save the power. Easily done for clockevent devices,
* but clocksources won't necessarily get the needed notifications.
* For deeper system sleep states, this will be mandatory...
*/
static void __iomem *tcaddr;
static struct
{
u32 cmr;
u32 imr;
u32 rc;
bool clken;
} tcb_cache[3];
static u32 bmr_cache;
static const u8 atmel_tcb_divisors[] = { 2, 8, 32, 128 };
static u64 tc_get_cycles(struct clocksource *cs)
{
unsigned long flags;
u32 lower, upper;
raw_local_irq_save(flags);
do {
upper = readl_relaxed(tcaddr + ATMEL_TC_REG(1, CV));
lower = readl_relaxed(tcaddr + ATMEL_TC_REG(0, CV));
} while (upper != readl_relaxed(tcaddr + ATMEL_TC_REG(1, CV)));
raw_local_irq_restore(flags);
return (upper << 16) | lower;
}
static u64 tc_get_cycles32(struct clocksource *cs)
{
return readl_relaxed(tcaddr + ATMEL_TC_REG(0, CV));
}
static void tc_clksrc_suspend(struct clocksource *cs)
{
int i;
for (i = 0; i < ARRAY_SIZE(tcb_cache); i++) {
tcb_cache[i].cmr = readl(tcaddr + ATMEL_TC_REG(i, CMR));
tcb_cache[i].imr = readl(tcaddr + ATMEL_TC_REG(i, IMR));
tcb_cache[i].rc = readl(tcaddr + ATMEL_TC_REG(i, RC));
tcb_cache[i].clken = !!(readl(tcaddr + ATMEL_TC_REG(i, SR)) &
ATMEL_TC_CLKSTA);
}
bmr_cache = readl(tcaddr + ATMEL_TC_BMR);
}
static void tc_clksrc_resume(struct clocksource *cs)
{
int i;
for (i = 0; i < ARRAY_SIZE(tcb_cache); i++) {
/* Restore registers for the channel, RA and RB are not used */
writel(tcb_cache[i].cmr, tcaddr + ATMEL_TC_REG(i, CMR));
writel(tcb_cache[i].rc, tcaddr + ATMEL_TC_REG(i, RC));
writel(0, tcaddr + ATMEL_TC_REG(i, RA));
writel(0, tcaddr + ATMEL_TC_REG(i, RB));
/* Disable all the interrupts */
writel(0xff, tcaddr + ATMEL_TC_REG(i, IDR));
/* Reenable interrupts that were enabled before suspending */
writel(tcb_cache[i].imr, tcaddr + ATMEL_TC_REG(i, IER));
/* Start the clock if it was used */
if (tcb_cache[i].clken)
writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(i, CCR));
}
/* Dual channel, chain channels */
writel(bmr_cache, tcaddr + ATMEL_TC_BMR);
/* Finally, trigger all the channels*/
writel(ATMEL_TC_SYNC, tcaddr + ATMEL_TC_BCR);
}
static struct clocksource clksrc = {
.rating = 200,
.read = tc_get_cycles,
.mask = CLOCKSOURCE_MASK(32),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
.suspend = tc_clksrc_suspend,
.resume = tc_clksrc_resume,
};
static u64 notrace tc_sched_clock_read(void)
{
return tc_get_cycles(&clksrc);
}
static u64 notrace tc_sched_clock_read32(void)
{
return tc_get_cycles32(&clksrc);
}
static struct delay_timer tc_delay_timer;
static unsigned long tc_delay_timer_read(void)
{
return tc_get_cycles(&clksrc);
}
static unsigned long notrace tc_delay_timer_read32(void)
{
return tc_get_cycles32(&clksrc);
}
#ifdef CONFIG_GENERIC_CLOCKEVENTS
struct tc_clkevt_device {
struct clock_event_device clkevt;
struct clk *clk;
u32 rate;
void __iomem *regs;
};
static struct tc_clkevt_device *to_tc_clkevt(struct clock_event_device *clkevt)
{
return container_of(clkevt, struct tc_clkevt_device, clkevt);
}
static u32 timer_clock;
static int tc_shutdown(struct clock_event_device *d)
{
struct tc_clkevt_device *tcd = to_tc_clkevt(d);
void __iomem *regs = tcd->regs;
writel(0xff, regs + ATMEL_TC_REG(2, IDR));
writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR));
if (!clockevent_state_detached(d))
clk_disable(tcd->clk);
return 0;
}
static int tc_set_oneshot(struct clock_event_device *d)
{
struct tc_clkevt_device *tcd = to_tc_clkevt(d);
void __iomem *regs = tcd->regs;
if (clockevent_state_oneshot(d) || clockevent_state_periodic(d))
tc_shutdown(d);
clk_enable(tcd->clk);
/* count up to RC, then irq and stop */
writel(timer_clock | ATMEL_TC_CPCSTOP | ATMEL_TC_WAVE |
ATMEL_TC_WAVESEL_UP_AUTO, regs + ATMEL_TC_REG(2, CMR));
writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
/* set_next_event() configures and starts the timer */
return 0;
}
static int tc_set_periodic(struct clock_event_device *d)
{
struct tc_clkevt_device *tcd = to_tc_clkevt(d);
void __iomem *regs = tcd->regs;
if (clockevent_state_oneshot(d) || clockevent_state_periodic(d))
tc_shutdown(d);
/* By not making the gentime core emulate periodic mode on top
* of oneshot, we get lower overhead and improved accuracy.
*/
clk_enable(tcd->clk);
/* count up to RC, then irq and restart */
writel(timer_clock | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO,
regs + ATMEL_TC_REG(2, CMR));
writel((tcd->rate + HZ / 2) / HZ, tcaddr + ATMEL_TC_REG(2, RC));
/* Enable clock and interrupts on RC compare */
writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
/* go go gadget! */
writel(ATMEL_TC_CLKEN | ATMEL_TC_SWTRG, regs +
ATMEL_TC_REG(2, CCR));
return 0;
}
static int tc_next_event(unsigned long delta, struct clock_event_device *d)
{
writel_relaxed(delta, tcaddr + ATMEL_TC_REG(2, RC));
/* go go gadget! */
writel_relaxed(ATMEL_TC_CLKEN | ATMEL_TC_SWTRG,
tcaddr + ATMEL_TC_REG(2, CCR));
return 0;
}
static struct tc_clkevt_device clkevt = {
.clkevt = {
.features = CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_ONESHOT,
/* Should be lower than at91rm9200's system timer */
.rating = 125,
.set_next_event = tc_next_event,
.set_state_shutdown = tc_shutdown,
.set_state_periodic = tc_set_periodic,
.set_state_oneshot = tc_set_oneshot,
},
};
static irqreturn_t ch2_irq(int irq, void *handle)
{
struct tc_clkevt_device *dev = handle;
unsigned int sr;
sr = readl_relaxed(dev->regs + ATMEL_TC_REG(2, SR));
if (sr & ATMEL_TC_CPCS) {
dev->clkevt.event_handler(&dev->clkevt);
return IRQ_HANDLED;
}
return IRQ_NONE;
}
static int __init setup_clkevents(struct atmel_tc *tc, int divisor_idx)
{
int ret;
struct clk *t2_clk = tc->clk[2];
int irq = tc->irq[2];
int bits = tc->tcb_config->counter_width;
/* try to enable t2 clk to avoid future errors in mode change */
ret = clk_prepare_enable(t2_clk);
if (ret)
return ret;
clkevt.regs = tc->regs;
clkevt.clk = t2_clk;
if (bits == 32) {
timer_clock = divisor_idx;
clkevt.rate = clk_get_rate(t2_clk) / atmel_tcb_divisors[divisor_idx];
} else {
ret = clk_prepare_enable(tc->slow_clk);
if (ret) {
clk_disable_unprepare(t2_clk);
return ret;
}
clkevt.rate = clk_get_rate(tc->slow_clk);
timer_clock = ATMEL_TC_TIMER_CLOCK5;
}
clk_disable(t2_clk);
clkevt.clkevt.cpumask = cpumask_of(0);
ret = request_irq(irq, ch2_irq, IRQF_TIMER, "tc_clkevt", &clkevt);
if (ret) {
clk_unprepare(t2_clk);
if (bits != 32)
clk_disable_unprepare(tc->slow_clk);
return ret;
}
clockevents_config_and_register(&clkevt.clkevt, clkevt.rate, 1, BIT(bits) - 1);
return ret;
}
#else /* !CONFIG_GENERIC_CLOCKEVENTS */
static int __init setup_clkevents(struct atmel_tc *tc, int divisor_idx)
{
/* NOTHING */
return 0;
}
#endif
static void __init tcb_setup_dual_chan(struct atmel_tc *tc, int mck_divisor_idx)
{
/* channel 0: waveform mode, input mclk/8, clock TIOA0 on overflow */
writel(mck_divisor_idx /* likely divide-by-8 */
| ATMEL_TC_WAVE
| ATMEL_TC_WAVESEL_UP /* free-run */
| ATMEL_TC_ACPA_SET /* TIOA0 rises at 0 */
| ATMEL_TC_ACPC_CLEAR, /* (duty cycle 50%) */
tcaddr + ATMEL_TC_REG(0, CMR));
writel(0x0000, tcaddr + ATMEL_TC_REG(0, RA));
writel(0x8000, tcaddr + ATMEL_TC_REG(0, RC));
writel(0xff, tcaddr + ATMEL_TC_REG(0, IDR)); /* no irqs */
writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(0, CCR));
/* channel 1: waveform mode, input TIOA0 */
writel(ATMEL_TC_XC1 /* input: TIOA0 */
| ATMEL_TC_WAVE
| ATMEL_TC_WAVESEL_UP, /* free-run */
tcaddr + ATMEL_TC_REG(1, CMR));
writel(0xff, tcaddr + ATMEL_TC_REG(1, IDR)); /* no irqs */
writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(1, CCR));
/* chain channel 0 to channel 1*/
writel(ATMEL_TC_TC1XC1S_TIOA0, tcaddr + ATMEL_TC_BMR);
/* then reset all the timers */
writel(ATMEL_TC_SYNC, tcaddr + ATMEL_TC_BCR);
}
static void __init tcb_setup_single_chan(struct atmel_tc *tc, int mck_divisor_idx)
{
/* channel 0: waveform mode, input mclk/8 */
writel(mck_divisor_idx /* likely divide-by-8 */
| ATMEL_TC_WAVE
| ATMEL_TC_WAVESEL_UP, /* free-run */
tcaddr + ATMEL_TC_REG(0, CMR));
writel(0xff, tcaddr + ATMEL_TC_REG(0, IDR)); /* no irqs */
writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(0, CCR));
/* then reset all the timers */
writel(ATMEL_TC_SYNC, tcaddr + ATMEL_TC_BCR);
}
static struct atmel_tcb_config tcb_rm9200_config = {
.counter_width = 16,
};
static struct atmel_tcb_config tcb_sam9x5_config = {
.counter_width = 32,
};
static struct atmel_tcb_config tcb_sama5d2_config = {
.counter_width = 32,
.has_gclk = 1,
};
static const struct of_device_id atmel_tcb_of_match[] = {
{ .compatible = "atmel,at91rm9200-tcb", .data = &tcb_rm9200_config, },
{ .compatible = "atmel,at91sam9x5-tcb", .data = &tcb_sam9x5_config, },
{ .compatible = "atmel,sama5d2-tcb", .data = &tcb_sama5d2_config, },
{ /* sentinel */ }
};
static int __init tcb_clksrc_init(struct device_node *node)
{
struct atmel_tc tc;
struct clk *t0_clk;
const struct of_device_id *match;
u64 (*tc_sched_clock)(void);
u32 rate, divided_rate = 0;
int best_divisor_idx = -1;
int bits;
int i;
int ret;
/* Protect against multiple calls */
if (tcaddr)
return 0;
tc.regs = of_iomap(node->parent, 0);
if (!tc.regs)
return -ENXIO;
t0_clk = of_clk_get_by_name(node->parent, "t0_clk");
if (IS_ERR(t0_clk))
return PTR_ERR(t0_clk);
tc.slow_clk = of_clk_get_by_name(node->parent, "slow_clk");
if (IS_ERR(tc.slow_clk))
return PTR_ERR(tc.slow_clk);
tc.clk[0] = t0_clk;
tc.clk[1] = of_clk_get_by_name(node->parent, "t1_clk");
if (IS_ERR(tc.clk[1]))
tc.clk[1] = t0_clk;
tc.clk[2] = of_clk_get_by_name(node->parent, "t2_clk");
if (IS_ERR(tc.clk[2]))
tc.clk[2] = t0_clk;
tc.irq[2] = of_irq_get(node->parent, 2);
if (tc.irq[2] <= 0) {
tc.irq[2] = of_irq_get(node->parent, 0);
if (tc.irq[2] <= 0)
return -EINVAL;
}
match = of_match_node(atmel_tcb_of_match, node->parent);
if (!match)
return -ENODEV;
tc.tcb_config = match->data;
bits = tc.tcb_config->counter_width;
for (i = 0; i < ARRAY_SIZE(tc.irq); i++)
writel(ATMEL_TC_ALL_IRQ, tc.regs + ATMEL_TC_REG(i, IDR));
ret = clk_prepare_enable(t0_clk);
if (ret) {
pr_debug("can't enable T0 clk\n");
return ret;
}
/* How fast will we be counting? Pick something over 5 MHz. */
rate = (u32) clk_get_rate(t0_clk);
i = 0;
if (tc.tcb_config->has_gclk)
i = 1;
for (; i < ARRAY_SIZE(atmel_tcb_divisors); i++) {
unsigned divisor = atmel_tcb_divisors[i];
unsigned tmp;
tmp = rate / divisor;
pr_debug("TC: %u / %-3u [%d] --> %u\n", rate, divisor, i, tmp);
if ((best_divisor_idx >= 0) && (tmp < 5 * 1000 * 1000))
break;
divided_rate = tmp;
best_divisor_idx = i;
}
clksrc.name = kbasename(node->parent->full_name);
clkevt.clkevt.name = kbasename(node->parent->full_name);
pr_debug("%s at %d.%03d MHz\n", clksrc.name, divided_rate / 1000000,
((divided_rate % 1000000) + 500) / 1000);
tcaddr = tc.regs;
if (bits == 32) {
/* use appropriate function to read 32 bit counter */
clksrc.read = tc_get_cycles32;
/* setup only channel 0 */
tcb_setup_single_chan(&tc, best_divisor_idx);
tc_sched_clock = tc_sched_clock_read32;
tc_delay_timer.read_current_timer = tc_delay_timer_read32;
} else {
/* we have three clocks no matter what the
* underlying platform supports.
*/
ret = clk_prepare_enable(tc.clk[1]);
if (ret) {
pr_debug("can't enable T1 clk\n");
goto err_disable_t0;
}
/* setup both channel 0 & 1 */
tcb_setup_dual_chan(&tc, best_divisor_idx);
tc_sched_clock = tc_sched_clock_read;
tc_delay_timer.read_current_timer = tc_delay_timer_read;
}
/* and away we go! */
ret = clocksource_register_hz(&clksrc, divided_rate);
if (ret)
goto err_disable_t1;
/* channel 2: periodic and oneshot timer support */
ret = setup_clkevents(&tc, best_divisor_idx);
if (ret)
goto err_unregister_clksrc;
sched_clock_register(tc_sched_clock, 32, divided_rate);
tc_delay_timer.freq = divided_rate;
register_current_timer_delay(&tc_delay_timer);
return 0;
err_unregister_clksrc:
clocksource_unregister(&clksrc);
err_disable_t1:
if (bits != 32)
clk_disable_unprepare(tc.clk[1]);
err_disable_t0:
clk_disable_unprepare(t0_clk);
tcaddr = NULL;
return ret;
}
TIMER_OF_DECLARE(atmel_tcb_clksrc, "atmel,tcb-timer", tcb_clksrc_init);
|
linux-master
|
drivers/clocksource/timer-atmel-tcb.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2017, Linaro Ltd. All rights reserved.
*
* Author: Daniel Lezcano <[email protected]>
*/
#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/slab.h>
#include "timer-of.h"
/**
* timer_of_irq_exit - Release the interrupt
* @of_irq: an of_timer_irq structure pointer
*
* Free the irq resource
*/
static __init void timer_of_irq_exit(struct of_timer_irq *of_irq)
{
struct timer_of *to = container_of(of_irq, struct timer_of, of_irq);
struct clock_event_device *clkevt = &to->clkevt;
if (of_irq->percpu)
free_percpu_irq(of_irq->irq, clkevt);
else
free_irq(of_irq->irq, clkevt);
}
/**
* timer_of_irq_init - Request the interrupt
* @np: a device tree node pointer
* @of_irq: an of_timer_irq structure pointer
*
* Get the interrupt number from the DT from its definition and
* request it. The interrupt is gotten by falling back the following way:
*
* - Get interrupt number by name
* - Get interrupt number by index
*
* When the interrupt is per CPU, 'request_percpu_irq()' is called,
* otherwise 'request_irq()' is used.
*
* Returns 0 on success, < 0 otherwise
*/
static __init int timer_of_irq_init(struct device_node *np,
struct of_timer_irq *of_irq)
{
int ret;
struct timer_of *to = container_of(of_irq, struct timer_of, of_irq);
struct clock_event_device *clkevt = &to->clkevt;
if (of_irq->name) {
of_irq->irq = ret = of_irq_get_byname(np, of_irq->name);
if (ret < 0) {
pr_err("Failed to get interrupt %s for %pOF\n",
of_irq->name, np);
return ret;
}
} else {
of_irq->irq = irq_of_parse_and_map(np, of_irq->index);
}
if (!of_irq->irq) {
pr_err("Failed to map interrupt for %pOF\n", np);
return -EINVAL;
}
ret = of_irq->percpu ?
request_percpu_irq(of_irq->irq, of_irq->handler,
np->full_name, clkevt) :
request_irq(of_irq->irq, of_irq->handler,
of_irq->flags ? of_irq->flags : IRQF_TIMER,
np->full_name, clkevt);
if (ret) {
pr_err("Failed to request irq %d for %pOF\n", of_irq->irq, np);
return ret;
}
clkevt->irq = of_irq->irq;
return 0;
}
/**
* timer_of_clk_exit - Release the clock resources
* @of_clk: a of_timer_clk structure pointer
*
* Disables and releases the refcount on the clk
*/
static __init void timer_of_clk_exit(struct of_timer_clk *of_clk)
{
of_clk->rate = 0;
clk_disable_unprepare(of_clk->clk);
clk_put(of_clk->clk);
}
/**
* timer_of_clk_init - Initialize the clock resources
* @np: a device tree node pointer
* @of_clk: a of_timer_clk structure pointer
*
* Get the clock by name or by index, enable it and get the rate
*
* Returns 0 on success, < 0 otherwise
*/
static __init int timer_of_clk_init(struct device_node *np,
struct of_timer_clk *of_clk)
{
int ret;
of_clk->clk = of_clk->name ? of_clk_get_by_name(np, of_clk->name) :
of_clk_get(np, of_clk->index);
if (IS_ERR(of_clk->clk)) {
ret = PTR_ERR(of_clk->clk);
if (ret != -EPROBE_DEFER)
pr_err("Failed to get clock for %pOF\n", np);
goto out;
}
ret = clk_prepare_enable(of_clk->clk);
if (ret) {
pr_err("Failed for enable clock for %pOF\n", np);
goto out_clk_put;
}
of_clk->rate = clk_get_rate(of_clk->clk);
if (!of_clk->rate) {
ret = -EINVAL;
pr_err("Failed to get clock rate for %pOF\n", np);
goto out_clk_disable;
}
of_clk->period = DIV_ROUND_UP(of_clk->rate, HZ);
out:
return ret;
out_clk_disable:
clk_disable_unprepare(of_clk->clk);
out_clk_put:
clk_put(of_clk->clk);
goto out;
}
static __init void timer_of_base_exit(struct of_timer_base *of_base)
{
iounmap(of_base->base);
}
static __init int timer_of_base_init(struct device_node *np,
struct of_timer_base *of_base)
{
of_base->base = of_base->name ?
of_io_request_and_map(np, of_base->index, of_base->name) :
of_iomap(np, of_base->index);
if (IS_ERR_OR_NULL(of_base->base)) {
pr_err("Failed to iomap (%s:%s)\n", np->name, of_base->name);
return of_base->base ? PTR_ERR(of_base->base) : -ENOMEM;
}
return 0;
}
int __init timer_of_init(struct device_node *np, struct timer_of *to)
{
int ret = -EINVAL;
int flags = 0;
if (to->flags & TIMER_OF_BASE) {
ret = timer_of_base_init(np, &to->of_base);
if (ret)
goto out_fail;
flags |= TIMER_OF_BASE;
}
if (to->flags & TIMER_OF_CLOCK) {
ret = timer_of_clk_init(np, &to->of_clk);
if (ret)
goto out_fail;
flags |= TIMER_OF_CLOCK;
}
if (to->flags & TIMER_OF_IRQ) {
ret = timer_of_irq_init(np, &to->of_irq);
if (ret)
goto out_fail;
flags |= TIMER_OF_IRQ;
}
if (!to->clkevt.name)
to->clkevt.name = np->full_name;
to->np = np;
return ret;
out_fail:
if (flags & TIMER_OF_IRQ)
timer_of_irq_exit(&to->of_irq);
if (flags & TIMER_OF_CLOCK)
timer_of_clk_exit(&to->of_clk);
if (flags & TIMER_OF_BASE)
timer_of_base_exit(&to->of_base);
return ret;
}
/**
* timer_of_cleanup - release timer_of resources
* @to: timer_of structure
*
* Release the resources that has been used in timer_of_init().
* This function should be called in init error cases
*/
void __init timer_of_cleanup(struct timer_of *to)
{
if (to->flags & TIMER_OF_IRQ)
timer_of_irq_exit(&to->of_irq);
if (to->flags & TIMER_OF_CLOCK)
timer_of_clk_exit(&to->of_clk);
if (to->flags & TIMER_OF_BASE)
timer_of_base_exit(&to->of_base);
}
|
linux-master
|
drivers/clocksource/timer-of.c
|
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
#define pr_fmt(fmt) "mips-gic-timer: " fmt
#include <linux/clk.h>
#include <linux/clockchips.h>
#include <linux/cpu.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/notifier.h>
#include <linux/of_irq.h>
#include <linux/percpu.h>
#include <linux/sched_clock.h>
#include <linux/smp.h>
#include <linux/time.h>
#include <asm/mips-cps.h>
static DEFINE_PER_CPU(struct clock_event_device, gic_clockevent_device);
static int gic_timer_irq;
static unsigned int gic_frequency;
static bool __read_mostly gic_clock_unstable;
static void gic_clocksource_unstable(char *reason);
static u64 notrace gic_read_count_2x32(void)
{
unsigned int hi, hi2, lo;
do {
hi = read_gic_counter_32h();
lo = read_gic_counter_32l();
hi2 = read_gic_counter_32h();
} while (hi2 != hi);
return (((u64) hi) << 32) + lo;
}
static u64 notrace gic_read_count_64(void)
{
return read_gic_counter();
}
static u64 notrace gic_read_count(void)
{
if (mips_cm_is64)
return gic_read_count_64();
return gic_read_count_2x32();
}
static int gic_next_event(unsigned long delta, struct clock_event_device *evt)
{
int cpu = cpumask_first(evt->cpumask);
u64 cnt;
int res;
cnt = gic_read_count();
cnt += (u64)delta;
if (cpu == raw_smp_processor_id()) {
write_gic_vl_compare(cnt);
} else {
write_gic_vl_other(mips_cm_vp_id(cpu));
write_gic_vo_compare(cnt);
}
res = ((int)(gic_read_count() - cnt) >= 0) ? -ETIME : 0;
return res;
}
static irqreturn_t gic_compare_interrupt(int irq, void *dev_id)
{
struct clock_event_device *cd = dev_id;
write_gic_vl_compare(read_gic_vl_compare());
cd->event_handler(cd);
return IRQ_HANDLED;
}
static struct irqaction gic_compare_irqaction = {
.handler = gic_compare_interrupt,
.percpu_dev_id = &gic_clockevent_device,
.flags = IRQF_PERCPU | IRQF_TIMER,
.name = "timer",
};
static void gic_clockevent_cpu_init(unsigned int cpu,
struct clock_event_device *cd)
{
cd->name = "MIPS GIC";
cd->features = CLOCK_EVT_FEAT_ONESHOT |
CLOCK_EVT_FEAT_C3STOP;
cd->rating = 350;
cd->irq = gic_timer_irq;
cd->cpumask = cpumask_of(cpu);
cd->set_next_event = gic_next_event;
clockevents_config_and_register(cd, gic_frequency, 0x300, 0x7fffffff);
enable_percpu_irq(gic_timer_irq, IRQ_TYPE_NONE);
}
static void gic_clockevent_cpu_exit(struct clock_event_device *cd)
{
disable_percpu_irq(gic_timer_irq);
}
static void gic_update_frequency(void *data)
{
unsigned long rate = (unsigned long)data;
clockevents_update_freq(this_cpu_ptr(&gic_clockevent_device), rate);
}
static int gic_starting_cpu(unsigned int cpu)
{
gic_clockevent_cpu_init(cpu, this_cpu_ptr(&gic_clockevent_device));
return 0;
}
static int gic_clk_notifier(struct notifier_block *nb, unsigned long action,
void *data)
{
struct clk_notifier_data *cnd = data;
if (action == POST_RATE_CHANGE) {
gic_clocksource_unstable("ref clock rate change");
on_each_cpu(gic_update_frequency, (void *)cnd->new_rate, 1);
}
return NOTIFY_OK;
}
static int gic_dying_cpu(unsigned int cpu)
{
gic_clockevent_cpu_exit(this_cpu_ptr(&gic_clockevent_device));
return 0;
}
static struct notifier_block gic_clk_nb = {
.notifier_call = gic_clk_notifier,
};
static int gic_clockevent_init(void)
{
int ret;
if (!gic_frequency)
return -ENXIO;
ret = setup_percpu_irq(gic_timer_irq, &gic_compare_irqaction);
if (ret < 0) {
pr_err("IRQ %d setup failed (%d)\n", gic_timer_irq, ret);
return ret;
}
cpuhp_setup_state(CPUHP_AP_MIPS_GIC_TIMER_STARTING,
"clockevents/mips/gic/timer:starting",
gic_starting_cpu, gic_dying_cpu);
return 0;
}
static u64 gic_hpt_read(struct clocksource *cs)
{
return gic_read_count();
}
static struct clocksource gic_clocksource = {
.name = "GIC",
.read = gic_hpt_read,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
.vdso_clock_mode = VDSO_CLOCKMODE_GIC,
};
static void gic_clocksource_unstable(char *reason)
{
if (gic_clock_unstable)
return;
gic_clock_unstable = true;
pr_info("GIC timer is unstable due to %s\n", reason);
clocksource_mark_unstable(&gic_clocksource);
}
static int __init __gic_clocksource_init(void)
{
unsigned int count_width;
int ret;
/* Set clocksource mask. */
count_width = read_gic_config() & GIC_CONFIG_COUNTBITS;
count_width >>= __ffs(GIC_CONFIG_COUNTBITS);
count_width *= 4;
count_width += 32;
gic_clocksource.mask = CLOCKSOURCE_MASK(count_width);
/* Calculate a somewhat reasonable rating value. */
gic_clocksource.rating = 200 + gic_frequency / 10000000;
ret = clocksource_register_hz(&gic_clocksource, gic_frequency);
if (ret < 0)
pr_warn("Unable to register clocksource\n");
return ret;
}
static int __init gic_clocksource_of_init(struct device_node *node)
{
struct clk *clk;
int ret;
if (!mips_gic_present() || !node->parent ||
!of_device_is_compatible(node->parent, "mti,gic")) {
pr_warn("No DT definition\n");
return -ENXIO;
}
clk = of_clk_get(node, 0);
if (!IS_ERR(clk)) {
ret = clk_prepare_enable(clk);
if (ret < 0) {
pr_err("Failed to enable clock\n");
clk_put(clk);
return ret;
}
gic_frequency = clk_get_rate(clk);
} else if (of_property_read_u32(node, "clock-frequency",
&gic_frequency)) {
pr_err("Frequency not specified\n");
return -EINVAL;
}
gic_timer_irq = irq_of_parse_and_map(node, 0);
if (!gic_timer_irq) {
pr_err("IRQ not specified\n");
return -EINVAL;
}
ret = __gic_clocksource_init();
if (ret)
return ret;
ret = gic_clockevent_init();
if (!ret && !IS_ERR(clk)) {
if (clk_notifier_register(clk, &gic_clk_nb) < 0)
pr_warn("Unable to register clock notifier\n");
}
/* And finally start the counter */
clear_gic_config(GIC_CONFIG_COUNTSTOP);
/*
* It's safe to use the MIPS GIC timer as a sched clock source only if
* its ticks are stable, which is true on either the platforms with
* stable CPU frequency or on the platforms with CM3 and CPU frequency
* change performed by the CPC core clocks divider.
*/
if (mips_cm_revision() >= CM_REV_CM3 || !IS_ENABLED(CONFIG_CPU_FREQ)) {
sched_clock_register(mips_cm_is64 ?
gic_read_count_64 : gic_read_count_2x32,
64, gic_frequency);
}
return 0;
}
TIMER_OF_DECLARE(mips_gic_timer, "mti,gic-timer",
gic_clocksource_of_init);
|
linux-master
|
drivers/clocksource/mips-gic-timer.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2014-2018 Nuvoton Technologies [email protected]
* All rights reserved.
*
* Copyright 2017 Google, Inc.
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/clockchips.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
#include "timer-of.h"
/* Timers registers */
#define NPCM7XX_REG_TCSR0 0x0 /* Timer 0 Control and Status Register */
#define NPCM7XX_REG_TICR0 0x8 /* Timer 0 Initial Count Register */
#define NPCM7XX_REG_TCSR1 0x4 /* Timer 1 Control and Status Register */
#define NPCM7XX_REG_TICR1 0xc /* Timer 1 Initial Count Register */
#define NPCM7XX_REG_TDR1 0x14 /* Timer 1 Data Register */
#define NPCM7XX_REG_TISR 0x18 /* Timer Interrupt Status Register */
/* Timers control */
#define NPCM7XX_Tx_RESETINT 0x1f
#define NPCM7XX_Tx_PERIOD BIT(27)
#define NPCM7XX_Tx_INTEN BIT(29)
#define NPCM7XX_Tx_COUNTEN BIT(30)
#define NPCM7XX_Tx_ONESHOT 0x0
#define NPCM7XX_Tx_OPER GENMASK(28, 27)
#define NPCM7XX_Tx_MIN_PRESCALE 0x1
#define NPCM7XX_Tx_TDR_MASK_BITS 24
#define NPCM7XX_Tx_MAX_CNT 0xFFFFFF
#define NPCM7XX_T0_CLR_INT 0x1
#define NPCM7XX_Tx_CLR_CSR 0x0
/* Timers operating mode */
#define NPCM7XX_START_PERIODIC_Tx (NPCM7XX_Tx_PERIOD | NPCM7XX_Tx_COUNTEN | \
NPCM7XX_Tx_INTEN | \
NPCM7XX_Tx_MIN_PRESCALE)
#define NPCM7XX_START_ONESHOT_Tx (NPCM7XX_Tx_ONESHOT | NPCM7XX_Tx_COUNTEN | \
NPCM7XX_Tx_INTEN | \
NPCM7XX_Tx_MIN_PRESCALE)
#define NPCM7XX_START_Tx (NPCM7XX_Tx_COUNTEN | NPCM7XX_Tx_PERIOD | \
NPCM7XX_Tx_MIN_PRESCALE)
#define NPCM7XX_DEFAULT_CSR (NPCM7XX_Tx_CLR_CSR | NPCM7XX_Tx_MIN_PRESCALE)
static int npcm7xx_timer_resume(struct clock_event_device *evt)
{
struct timer_of *to = to_timer_of(evt);
u32 val;
val = readl(timer_of_base(to) + NPCM7XX_REG_TCSR0);
val |= NPCM7XX_Tx_COUNTEN;
writel(val, timer_of_base(to) + NPCM7XX_REG_TCSR0);
return 0;
}
static int npcm7xx_timer_shutdown(struct clock_event_device *evt)
{
struct timer_of *to = to_timer_of(evt);
u32 val;
val = readl(timer_of_base(to) + NPCM7XX_REG_TCSR0);
val &= ~NPCM7XX_Tx_COUNTEN;
writel(val, timer_of_base(to) + NPCM7XX_REG_TCSR0);
return 0;
}
static int npcm7xx_timer_oneshot(struct clock_event_device *evt)
{
struct timer_of *to = to_timer_of(evt);
u32 val;
val = readl(timer_of_base(to) + NPCM7XX_REG_TCSR0);
val &= ~NPCM7XX_Tx_OPER;
val |= NPCM7XX_START_ONESHOT_Tx;
writel(val, timer_of_base(to) + NPCM7XX_REG_TCSR0);
return 0;
}
static int npcm7xx_timer_periodic(struct clock_event_device *evt)
{
struct timer_of *to = to_timer_of(evt);
u32 val;
writel(timer_of_period(to), timer_of_base(to) + NPCM7XX_REG_TICR0);
val = readl(timer_of_base(to) + NPCM7XX_REG_TCSR0);
val &= ~NPCM7XX_Tx_OPER;
val |= NPCM7XX_START_PERIODIC_Tx;
writel(val, timer_of_base(to) + NPCM7XX_REG_TCSR0);
return 0;
}
static int npcm7xx_clockevent_set_next_event(unsigned long evt,
struct clock_event_device *clk)
{
struct timer_of *to = to_timer_of(clk);
u32 val;
writel(evt, timer_of_base(to) + NPCM7XX_REG_TICR0);
val = readl(timer_of_base(to) + NPCM7XX_REG_TCSR0);
val |= NPCM7XX_START_Tx;
writel(val, timer_of_base(to) + NPCM7XX_REG_TCSR0);
return 0;
}
static irqreturn_t npcm7xx_timer0_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evt = (struct clock_event_device *)dev_id;
struct timer_of *to = to_timer_of(evt);
writel(NPCM7XX_T0_CLR_INT, timer_of_base(to) + NPCM7XX_REG_TISR);
evt->event_handler(evt);
return IRQ_HANDLED;
}
static struct timer_of npcm7xx_to = {
.flags = TIMER_OF_IRQ | TIMER_OF_BASE | TIMER_OF_CLOCK,
.clkevt = {
.name = "npcm7xx-timer0",
.features = CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_ONESHOT,
.set_next_event = npcm7xx_clockevent_set_next_event,
.set_state_shutdown = npcm7xx_timer_shutdown,
.set_state_periodic = npcm7xx_timer_periodic,
.set_state_oneshot = npcm7xx_timer_oneshot,
.tick_resume = npcm7xx_timer_resume,
.rating = 300,
},
.of_irq = {
.handler = npcm7xx_timer0_interrupt,
.flags = IRQF_TIMER | IRQF_IRQPOLL,
},
};
static void __init npcm7xx_clockevents_init(void)
{
writel(NPCM7XX_DEFAULT_CSR,
timer_of_base(&npcm7xx_to) + NPCM7XX_REG_TCSR0);
writel(NPCM7XX_Tx_RESETINT,
timer_of_base(&npcm7xx_to) + NPCM7XX_REG_TISR);
npcm7xx_to.clkevt.cpumask = cpumask_of(0);
clockevents_config_and_register(&npcm7xx_to.clkevt,
timer_of_rate(&npcm7xx_to),
0x1, NPCM7XX_Tx_MAX_CNT);
}
static void __init npcm7xx_clocksource_init(void)
{
u32 val;
writel(NPCM7XX_DEFAULT_CSR,
timer_of_base(&npcm7xx_to) + NPCM7XX_REG_TCSR1);
writel(NPCM7XX_Tx_MAX_CNT,
timer_of_base(&npcm7xx_to) + NPCM7XX_REG_TICR1);
val = readl(timer_of_base(&npcm7xx_to) + NPCM7XX_REG_TCSR1);
val |= NPCM7XX_START_Tx;
writel(val, timer_of_base(&npcm7xx_to) + NPCM7XX_REG_TCSR1);
clocksource_mmio_init(timer_of_base(&npcm7xx_to) +
NPCM7XX_REG_TDR1,
"npcm7xx-timer1", timer_of_rate(&npcm7xx_to),
200, (unsigned int)NPCM7XX_Tx_TDR_MASK_BITS,
clocksource_mmio_readl_down);
}
static int __init npcm7xx_timer_init(struct device_node *np)
{
struct clk *clk;
int ret;
ret = timer_of_init(np, &npcm7xx_to);
if (ret)
return ret;
/* Clock input is divided by PRESCALE + 1 before it is fed */
/* to the counter */
npcm7xx_to.of_clk.rate = npcm7xx_to.of_clk.rate /
(NPCM7XX_Tx_MIN_PRESCALE + 1);
/* Enable the clock for timer1, if it exists */
clk = of_clk_get(np, 1);
if (clk) {
if (!IS_ERR(clk))
clk_prepare_enable(clk);
else
pr_warn("%pOF: Failed to get clock for timer1: %pe", np, clk);
}
npcm7xx_clocksource_init();
npcm7xx_clockevents_init();
pr_info("Enabling NPCM7xx clocksource timer base: %px, IRQ: %d ",
timer_of_base(&npcm7xx_to), timer_of_irq(&npcm7xx_to));
return 0;
}
TIMER_OF_DECLARE(wpcm450, "nuvoton,wpcm450-timer", npcm7xx_timer_init);
TIMER_OF_DECLARE(npcm7xx, "nuvoton,npcm750-timer", npcm7xx_timer_init);
|
linux-master
|
drivers/clocksource/timer-npcm7xx.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* IXP4 timer driver
* Copyright (C) 2019 Linus Walleij <[email protected]>
*
* Based on arch/arm/mach-ixp4xx/common.c
* Copyright 2002 (C) Intel Corporation
* Copyright 2003-2004 (C) MontaVista, Software, Inc.
* Copyright (C) Deepak Saxena <[email protected]>
*/
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/clockchips.h>
#include <linux/clocksource.h>
#include <linux/sched_clock.h>
#include <linux/slab.h>
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
/*
* Constants to make it easy to access Timer Control/Status registers
*/
#define IXP4XX_OSTS_OFFSET 0x00 /* Continuous Timestamp */
#define IXP4XX_OST1_OFFSET 0x04 /* Timer 1 Timestamp */
#define IXP4XX_OSRT1_OFFSET 0x08 /* Timer 1 Reload */
#define IXP4XX_OST2_OFFSET 0x0C /* Timer 2 Timestamp */
#define IXP4XX_OSRT2_OFFSET 0x10 /* Timer 2 Reload */
#define IXP4XX_OSST_OFFSET 0x20 /* Timer Status */
/*
* Timer register values and bit definitions
*/
#define IXP4XX_OST_ENABLE 0x00000001
#define IXP4XX_OST_ONE_SHOT 0x00000002
/* Low order bits of reload value ignored */
#define IXP4XX_OST_RELOAD_MASK 0x00000003
#define IXP4XX_OST_DISABLED 0x00000000
#define IXP4XX_OSST_TIMER_1_PEND 0x00000001
#define IXP4XX_OSST_TIMER_2_PEND 0x00000002
#define IXP4XX_OSST_TIMER_TS_PEND 0x00000004
/* Remaining registers are for the watchdog and defined in the watchdog driver */
struct ixp4xx_timer {
void __iomem *base;
u32 latch;
struct clock_event_device clkevt;
#ifdef CONFIG_ARM
struct delay_timer delay_timer;
#endif
};
/*
* A local singleton used by sched_clock and delay timer reads, which are
* fast and stateless
*/
static struct ixp4xx_timer *local_ixp4xx_timer;
static inline struct ixp4xx_timer *
to_ixp4xx_timer(struct clock_event_device *evt)
{
return container_of(evt, struct ixp4xx_timer, clkevt);
}
static unsigned long ixp4xx_read_timer(void)
{
return __raw_readl(local_ixp4xx_timer->base + IXP4XX_OSTS_OFFSET);
}
static u64 notrace ixp4xx_read_sched_clock(void)
{
return ixp4xx_read_timer();
}
static u64 ixp4xx_clocksource_read(struct clocksource *c)
{
return ixp4xx_read_timer();
}
static irqreturn_t ixp4xx_timer_interrupt(int irq, void *dev_id)
{
struct ixp4xx_timer *tmr = dev_id;
struct clock_event_device *evt = &tmr->clkevt;
/* Clear Pending Interrupt */
__raw_writel(IXP4XX_OSST_TIMER_1_PEND,
tmr->base + IXP4XX_OSST_OFFSET);
evt->event_handler(evt);
return IRQ_HANDLED;
}
static int ixp4xx_set_next_event(unsigned long cycles,
struct clock_event_device *evt)
{
struct ixp4xx_timer *tmr = to_ixp4xx_timer(evt);
u32 val;
val = __raw_readl(tmr->base + IXP4XX_OSRT1_OFFSET);
/* Keep enable/oneshot bits */
val &= IXP4XX_OST_RELOAD_MASK;
__raw_writel((cycles & ~IXP4XX_OST_RELOAD_MASK) | val,
tmr->base + IXP4XX_OSRT1_OFFSET);
return 0;
}
static int ixp4xx_shutdown(struct clock_event_device *evt)
{
struct ixp4xx_timer *tmr = to_ixp4xx_timer(evt);
u32 val;
val = __raw_readl(tmr->base + IXP4XX_OSRT1_OFFSET);
val &= ~IXP4XX_OST_ENABLE;
__raw_writel(val, tmr->base + IXP4XX_OSRT1_OFFSET);
return 0;
}
static int ixp4xx_set_oneshot(struct clock_event_device *evt)
{
struct ixp4xx_timer *tmr = to_ixp4xx_timer(evt);
__raw_writel(IXP4XX_OST_ENABLE | IXP4XX_OST_ONE_SHOT,
tmr->base + IXP4XX_OSRT1_OFFSET);
return 0;
}
static int ixp4xx_set_periodic(struct clock_event_device *evt)
{
struct ixp4xx_timer *tmr = to_ixp4xx_timer(evt);
u32 val;
val = tmr->latch & ~IXP4XX_OST_RELOAD_MASK;
val |= IXP4XX_OST_ENABLE;
__raw_writel(val, tmr->base + IXP4XX_OSRT1_OFFSET);
return 0;
}
static int ixp4xx_resume(struct clock_event_device *evt)
{
struct ixp4xx_timer *tmr = to_ixp4xx_timer(evt);
u32 val;
val = __raw_readl(tmr->base + IXP4XX_OSRT1_OFFSET);
val |= IXP4XX_OST_ENABLE;
__raw_writel(val, tmr->base + IXP4XX_OSRT1_OFFSET);
return 0;
}
/*
* IXP4xx timer tick
* We use OS timer1 on the CPU for the timer tick and the timestamp
* counter as a source of real clock ticks to account for missed jiffies.
*/
static __init int ixp4xx_timer_register(void __iomem *base,
int timer_irq,
unsigned int timer_freq)
{
struct ixp4xx_timer *tmr;
int ret;
tmr = kzalloc(sizeof(*tmr), GFP_KERNEL);
if (!tmr)
return -ENOMEM;
tmr->base = base;
/*
* The timer register doesn't allow to specify the two least
* significant bits of the timeout value and assumes them being zero.
* So make sure the latch is the best value with the two least
* significant bits unset.
*/
tmr->latch = DIV_ROUND_CLOSEST(timer_freq,
(IXP4XX_OST_RELOAD_MASK + 1) * HZ)
* (IXP4XX_OST_RELOAD_MASK + 1);
local_ixp4xx_timer = tmr;
/* Reset/disable counter */
__raw_writel(0, tmr->base + IXP4XX_OSRT1_OFFSET);
/* Clear any pending interrupt on timer 1 */
__raw_writel(IXP4XX_OSST_TIMER_1_PEND,
tmr->base + IXP4XX_OSST_OFFSET);
/* Reset time-stamp counter */
__raw_writel(0, tmr->base + IXP4XX_OSTS_OFFSET);
clocksource_mmio_init(NULL, "OSTS", timer_freq, 200, 32,
ixp4xx_clocksource_read);
tmr->clkevt.name = "ixp4xx timer1";
tmr->clkevt.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
tmr->clkevt.rating = 200;
tmr->clkevt.set_state_shutdown = ixp4xx_shutdown;
tmr->clkevt.set_state_periodic = ixp4xx_set_periodic;
tmr->clkevt.set_state_oneshot = ixp4xx_set_oneshot;
tmr->clkevt.tick_resume = ixp4xx_resume;
tmr->clkevt.set_next_event = ixp4xx_set_next_event;
tmr->clkevt.cpumask = cpumask_of(0);
tmr->clkevt.irq = timer_irq;
ret = request_irq(timer_irq, ixp4xx_timer_interrupt,
IRQF_TIMER, "IXP4XX-TIMER1", tmr);
if (ret) {
pr_crit("no timer IRQ\n");
return -ENODEV;
}
clockevents_config_and_register(&tmr->clkevt, timer_freq,
0xf, 0xfffffffe);
sched_clock_register(ixp4xx_read_sched_clock, 32, timer_freq);
#ifdef CONFIG_ARM
/* Also use this timer for delays */
tmr->delay_timer.read_current_timer = ixp4xx_read_timer;
tmr->delay_timer.freq = timer_freq;
register_current_timer_delay(&tmr->delay_timer);
#endif
return 0;
}
static struct platform_device ixp4xx_watchdog_device = {
.name = "ixp4xx-watchdog",
.id = -1,
};
/*
* This probe gets called after the timer is already up and running. The main
* function on this platform is to spawn the watchdog device as a child.
*/
static int ixp4xx_timer_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
/* Pass the base address as platform data and nothing else */
ixp4xx_watchdog_device.dev.platform_data = local_ixp4xx_timer->base;
ixp4xx_watchdog_device.dev.parent = dev;
return platform_device_register(&ixp4xx_watchdog_device);
}
static const struct of_device_id ixp4xx_timer_dt_id[] = {
{ .compatible = "intel,ixp4xx-timer", },
{ /* sentinel */ },
};
static struct platform_driver ixp4xx_timer_driver = {
.probe = ixp4xx_timer_probe,
.driver = {
.name = "ixp4xx-timer",
.of_match_table = ixp4xx_timer_dt_id,
.suppress_bind_attrs = true,
},
};
builtin_platform_driver(ixp4xx_timer_driver);
static __init int ixp4xx_of_timer_init(struct device_node *np)
{
void __iomem *base;
int irq;
int ret;
base = of_iomap(np, 0);
if (!base) {
pr_crit("IXP4xx: can't remap timer\n");
return -ENODEV;
}
irq = irq_of_parse_and_map(np, 0);
if (irq <= 0) {
pr_err("Can't parse IRQ\n");
ret = -EINVAL;
goto out_unmap;
}
/* TODO: get some fixed clocks into the device tree */
ret = ixp4xx_timer_register(base, irq, 66666000);
if (ret)
goto out_unmap;
return 0;
out_unmap:
iounmap(base);
return ret;
}
TIMER_OF_DECLARE(ixp4xx, "intel,ixp4xx-timer", ixp4xx_of_timer_init);
|
linux-master
|
drivers/clocksource/timer-ixp4xx.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018 Socionext Inc.
*/
#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqreturn.h>
#include <linux/sched_clock.h>
#include "timer-of.h"
#define MLB_TMR_TMCSR_OFS 0x0
#define MLB_TMR_TMR_OFS 0x4
#define MLB_TMR_TMRLR1_OFS 0x8
#define MLB_TMR_TMRLR2_OFS 0xc
#define MLB_TMR_REGSZPCH 0x10
#define MLB_TMR_TMCSR_OUTL BIT(5)
#define MLB_TMR_TMCSR_RELD BIT(4)
#define MLB_TMR_TMCSR_INTE BIT(3)
#define MLB_TMR_TMCSR_UF BIT(2)
#define MLB_TMR_TMCSR_CNTE BIT(1)
#define MLB_TMR_TMCSR_TRG BIT(0)
#define MLB_TMR_TMCSR_CSL_DIV2 0
#define MLB_TMR_DIV_CNT 2
#define MLB_TMR_SRC_CH 1
#define MLB_TMR_EVT_CH 0
#define MLB_TMR_SRC_CH_OFS (MLB_TMR_REGSZPCH * MLB_TMR_SRC_CH)
#define MLB_TMR_EVT_CH_OFS (MLB_TMR_REGSZPCH * MLB_TMR_EVT_CH)
#define MLB_TMR_SRC_TMCSR_OFS (MLB_TMR_SRC_CH_OFS + MLB_TMR_TMCSR_OFS)
#define MLB_TMR_SRC_TMR_OFS (MLB_TMR_SRC_CH_OFS + MLB_TMR_TMR_OFS)
#define MLB_TMR_SRC_TMRLR1_OFS (MLB_TMR_SRC_CH_OFS + MLB_TMR_TMRLR1_OFS)
#define MLB_TMR_SRC_TMRLR2_OFS (MLB_TMR_SRC_CH_OFS + MLB_TMR_TMRLR2_OFS)
#define MLB_TMR_EVT_TMCSR_OFS (MLB_TMR_EVT_CH_OFS + MLB_TMR_TMCSR_OFS)
#define MLB_TMR_EVT_TMR_OFS (MLB_TMR_EVT_CH_OFS + MLB_TMR_TMR_OFS)
#define MLB_TMR_EVT_TMRLR1_OFS (MLB_TMR_EVT_CH_OFS + MLB_TMR_TMRLR1_OFS)
#define MLB_TMR_EVT_TMRLR2_OFS (MLB_TMR_EVT_CH_OFS + MLB_TMR_TMRLR2_OFS)
#define MLB_TIMER_RATING 500
#define MLB_TIMER_ONESHOT 0
#define MLB_TIMER_PERIODIC 1
static irqreturn_t mlb_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *clk = dev_id;
struct timer_of *to = to_timer_of(clk);
u32 val;
val = readl_relaxed(timer_of_base(to) + MLB_TMR_EVT_TMCSR_OFS);
val &= ~MLB_TMR_TMCSR_UF;
writel_relaxed(val, timer_of_base(to) + MLB_TMR_EVT_TMCSR_OFS);
clk->event_handler(clk);
return IRQ_HANDLED;
}
static void mlb_evt_timer_start(struct timer_of *to, bool periodic)
{
u32 val = MLB_TMR_TMCSR_CSL_DIV2;
val |= MLB_TMR_TMCSR_CNTE | MLB_TMR_TMCSR_TRG | MLB_TMR_TMCSR_INTE;
if (periodic)
val |= MLB_TMR_TMCSR_RELD;
writel_relaxed(val, timer_of_base(to) + MLB_TMR_EVT_TMCSR_OFS);
}
static void mlb_evt_timer_stop(struct timer_of *to)
{
u32 val = readl_relaxed(timer_of_base(to) + MLB_TMR_EVT_TMCSR_OFS);
val &= ~MLB_TMR_TMCSR_CNTE;
writel_relaxed(val, timer_of_base(to) + MLB_TMR_EVT_TMCSR_OFS);
}
static void mlb_evt_timer_register_count(struct timer_of *to, unsigned long cnt)
{
writel_relaxed(cnt, timer_of_base(to) + MLB_TMR_EVT_TMRLR1_OFS);
}
static int mlb_set_state_periodic(struct clock_event_device *clk)
{
struct timer_of *to = to_timer_of(clk);
mlb_evt_timer_stop(to);
mlb_evt_timer_register_count(to, to->of_clk.period);
mlb_evt_timer_start(to, MLB_TIMER_PERIODIC);
return 0;
}
static int mlb_set_state_oneshot(struct clock_event_device *clk)
{
struct timer_of *to = to_timer_of(clk);
mlb_evt_timer_stop(to);
mlb_evt_timer_start(to, MLB_TIMER_ONESHOT);
return 0;
}
static int mlb_set_state_shutdown(struct clock_event_device *clk)
{
struct timer_of *to = to_timer_of(clk);
mlb_evt_timer_stop(to);
return 0;
}
static int mlb_clkevt_next_event(unsigned long event,
struct clock_event_device *clk)
{
struct timer_of *to = to_timer_of(clk);
mlb_evt_timer_stop(to);
mlb_evt_timer_register_count(to, event);
mlb_evt_timer_start(to, MLB_TIMER_ONESHOT);
return 0;
}
static int mlb_config_clock_source(struct timer_of *to)
{
u32 val = MLB_TMR_TMCSR_CSL_DIV2;
writel_relaxed(val, timer_of_base(to) + MLB_TMR_SRC_TMCSR_OFS);
writel_relaxed(~0, timer_of_base(to) + MLB_TMR_SRC_TMRLR1_OFS);
writel_relaxed(~0, timer_of_base(to) + MLB_TMR_SRC_TMRLR2_OFS);
val |= MLB_TMR_TMCSR_RELD | MLB_TMR_TMCSR_CNTE | MLB_TMR_TMCSR_TRG;
writel_relaxed(val, timer_of_base(to) + MLB_TMR_SRC_TMCSR_OFS);
return 0;
}
static int mlb_config_clock_event(struct timer_of *to)
{
writel_relaxed(0, timer_of_base(to) + MLB_TMR_EVT_TMCSR_OFS);
return 0;
}
static struct timer_of to = {
.flags = TIMER_OF_IRQ | TIMER_OF_BASE | TIMER_OF_CLOCK,
.clkevt = {
.name = "mlb-clkevt",
.rating = MLB_TIMER_RATING,
.cpumask = cpu_possible_mask,
.features = CLOCK_EVT_FEAT_DYNIRQ | CLOCK_EVT_FEAT_ONESHOT,
.set_state_oneshot = mlb_set_state_oneshot,
.set_state_periodic = mlb_set_state_periodic,
.set_state_shutdown = mlb_set_state_shutdown,
.set_next_event = mlb_clkevt_next_event,
},
.of_irq = {
.flags = IRQF_TIMER | IRQF_IRQPOLL,
.handler = mlb_timer_interrupt,
},
};
static u64 notrace mlb_timer_sched_read(void)
{
return ~readl_relaxed(timer_of_base(&to) + MLB_TMR_SRC_TMR_OFS);
}
static int __init mlb_timer_init(struct device_node *node)
{
int ret;
unsigned long rate;
ret = timer_of_init(node, &to);
if (ret)
return ret;
rate = timer_of_rate(&to) / MLB_TMR_DIV_CNT;
mlb_config_clock_source(&to);
clocksource_mmio_init(timer_of_base(&to) + MLB_TMR_SRC_TMR_OFS,
node->name, rate, MLB_TIMER_RATING, 32,
clocksource_mmio_readl_down);
sched_clock_register(mlb_timer_sched_read, 32, rate);
mlb_config_clock_event(&to);
clockevents_config_and_register(&to.clkevt, timer_of_rate(&to), 15,
0xffffffff);
return 0;
}
TIMER_OF_DECLARE(mlb_peritimer, "socionext,milbeaut-timer",
mlb_timer_init);
|
linux-master
|
drivers/clocksource/timer-milbeaut.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2014 Oleksij Rempel <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/clk.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/bitops.h>
#define DRIVER_NAME "asm9260-timer"
/*
* this device provide 4 offsets for each register:
* 0x0 - plain read write mode
* 0x4 - set mode, OR logic.
* 0x8 - clr mode, XOR logic.
* 0xc - togle mode.
*/
#define SET_REG 4
#define CLR_REG 8
#define HW_IR 0x0000 /* RW. Interrupt */
#define BM_IR_CR0 BIT(4)
#define BM_IR_MR3 BIT(3)
#define BM_IR_MR2 BIT(2)
#define BM_IR_MR1 BIT(1)
#define BM_IR_MR0 BIT(0)
#define HW_TCR 0x0010 /* RW. Timer controller */
/* BM_C*_RST
* Timer Counter and the Prescale Counter are synchronously reset on the
* next positive edge of PCLK. The counters remain reset until TCR[1] is
* returned to zero. */
#define BM_C3_RST BIT(7)
#define BM_C2_RST BIT(6)
#define BM_C1_RST BIT(5)
#define BM_C0_RST BIT(4)
/* BM_C*_EN
* 1 - Timer Counter and Prescale Counter are enabled for counting
* 0 - counters are disabled */
#define BM_C3_EN BIT(3)
#define BM_C2_EN BIT(2)
#define BM_C1_EN BIT(1)
#define BM_C0_EN BIT(0)
#define HW_DIR 0x0020 /* RW. Direction? */
/* 00 - count up
* 01 - count down
* 10 - ?? 2^n/2 */
#define BM_DIR_COUNT_UP 0
#define BM_DIR_COUNT_DOWN 1
#define BM_DIR0_SHIFT 0
#define BM_DIR1_SHIFT 4
#define BM_DIR2_SHIFT 8
#define BM_DIR3_SHIFT 12
#define BM_DIR_DEFAULT (BM_DIR_COUNT_UP << BM_DIR0_SHIFT | \
BM_DIR_COUNT_UP << BM_DIR1_SHIFT | \
BM_DIR_COUNT_UP << BM_DIR2_SHIFT | \
BM_DIR_COUNT_UP << BM_DIR3_SHIFT)
#define HW_TC0 0x0030 /* RO. Timer counter 0 */
/* HW_TC*. Timer counter owerflow (0xffff.ffff to 0x0000.0000) do not generate
* interrupt. This registers can be used to detect overflow */
#define HW_TC1 0x0040
#define HW_TC2 0x0050
#define HW_TC3 0x0060
#define HW_PR 0x0070 /* RW. prescaler */
#define BM_PR_DISABLE 0
#define HW_PC 0x0080 /* RO. Prescaler counter */
#define HW_MCR 0x0090 /* RW. Match control */
/* enable interrupt on match */
#define BM_MCR_INT_EN(n) (1 << (n * 3 + 0))
/* enable TC reset on match */
#define BM_MCR_RES_EN(n) (1 << (n * 3 + 1))
/* enable stop TC on match */
#define BM_MCR_STOP_EN(n) (1 << (n * 3 + 2))
#define HW_MR0 0x00a0 /* RW. Match reg */
#define HW_MR1 0x00b0
#define HW_MR2 0x00C0
#define HW_MR3 0x00D0
#define HW_CTCR 0x0180 /* Counter control */
#define BM_CTCR0_SHIFT 0
#define BM_CTCR1_SHIFT 2
#define BM_CTCR2_SHIFT 4
#define BM_CTCR3_SHIFT 6
#define BM_CTCR_TM 0 /* Timer mode. Every rising PCLK edge. */
#define BM_CTCR_DEFAULT (BM_CTCR_TM << BM_CTCR0_SHIFT | \
BM_CTCR_TM << BM_CTCR1_SHIFT | \
BM_CTCR_TM << BM_CTCR2_SHIFT | \
BM_CTCR_TM << BM_CTCR3_SHIFT)
static struct asm9260_timer_priv {
void __iomem *base;
unsigned long ticks_per_jiffy;
} priv;
static int asm9260_timer_set_next_event(unsigned long delta,
struct clock_event_device *evt)
{
/* configure match count for TC0 */
writel_relaxed(delta, priv.base + HW_MR0);
/* enable TC0 */
writel_relaxed(BM_C0_EN, priv.base + HW_TCR + SET_REG);
return 0;
}
static inline void __asm9260_timer_shutdown(struct clock_event_device *evt)
{
/* stop timer0 */
writel_relaxed(BM_C0_EN, priv.base + HW_TCR + CLR_REG);
}
static int asm9260_timer_shutdown(struct clock_event_device *evt)
{
__asm9260_timer_shutdown(evt);
return 0;
}
static int asm9260_timer_set_oneshot(struct clock_event_device *evt)
{
__asm9260_timer_shutdown(evt);
/* enable reset and stop on match */
writel_relaxed(BM_MCR_RES_EN(0) | BM_MCR_STOP_EN(0),
priv.base + HW_MCR + SET_REG);
return 0;
}
static int asm9260_timer_set_periodic(struct clock_event_device *evt)
{
__asm9260_timer_shutdown(evt);
/* disable reset and stop on match */
writel_relaxed(BM_MCR_RES_EN(0) | BM_MCR_STOP_EN(0),
priv.base + HW_MCR + CLR_REG);
/* configure match count for TC0 */
writel_relaxed(priv.ticks_per_jiffy, priv.base + HW_MR0);
/* enable TC0 */
writel_relaxed(BM_C0_EN, priv.base + HW_TCR + SET_REG);
return 0;
}
static struct clock_event_device event_dev = {
.name = DRIVER_NAME,
.rating = 200,
.features = CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_ONESHOT,
.set_next_event = asm9260_timer_set_next_event,
.set_state_shutdown = asm9260_timer_shutdown,
.set_state_periodic = asm9260_timer_set_periodic,
.set_state_oneshot = asm9260_timer_set_oneshot,
.tick_resume = asm9260_timer_shutdown,
};
static irqreturn_t asm9260_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evt = dev_id;
evt->event_handler(evt);
writel_relaxed(BM_IR_MR0, priv.base + HW_IR);
return IRQ_HANDLED;
}
/*
* ---------------------------------------------------------------------------
* Timer initialization
* ---------------------------------------------------------------------------
*/
static int __init asm9260_timer_init(struct device_node *np)
{
int irq;
struct clk *clk;
int ret;
unsigned long rate;
priv.base = of_io_request_and_map(np, 0, np->name);
if (IS_ERR(priv.base)) {
pr_err("%pOFn: unable to map resource\n", np);
return PTR_ERR(priv.base);
}
clk = of_clk_get(np, 0);
if (IS_ERR(clk)) {
pr_err("Failed to get clk!\n");
return PTR_ERR(clk);
}
ret = clk_prepare_enable(clk);
if (ret) {
pr_err("Failed to enable clk!\n");
return ret;
}
irq = irq_of_parse_and_map(np, 0);
ret = request_irq(irq, asm9260_timer_interrupt, IRQF_TIMER,
DRIVER_NAME, &event_dev);
if (ret) {
pr_err("Failed to setup irq!\n");
return ret;
}
/* set all timers for count-up */
writel_relaxed(BM_DIR_DEFAULT, priv.base + HW_DIR);
/* disable divider */
writel_relaxed(BM_PR_DISABLE, priv.base + HW_PR);
/* make sure all timers use every rising PCLK edge. */
writel_relaxed(BM_CTCR_DEFAULT, priv.base + HW_CTCR);
/* enable interrupt for TC0 and clean setting for all other lines */
writel_relaxed(BM_MCR_INT_EN(0) , priv.base + HW_MCR);
rate = clk_get_rate(clk);
clocksource_mmio_init(priv.base + HW_TC1, DRIVER_NAME, rate,
200, 32, clocksource_mmio_readl_up);
/* Seems like we can't use counter without match register even if
* actions for MR are disabled. So, set MR to max value. */
writel_relaxed(0xffffffff, priv.base + HW_MR1);
/* enable TC1 */
writel_relaxed(BM_C1_EN, priv.base + HW_TCR + SET_REG);
priv.ticks_per_jiffy = DIV_ROUND_CLOSEST(rate, HZ);
event_dev.cpumask = cpumask_of(0);
clockevents_config_and_register(&event_dev, rate, 0x2c00, 0xfffffffe);
return 0;
}
TIMER_OF_DECLARE(asm9260_timer, "alphascale,asm9260-timer",
asm9260_timer_init);
|
linux-master
|
drivers/clocksource/asm9260_timer.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Keystone broadcast clock-event
*
* Copyright 2013 Texas Instruments, Inc.
*
* Author: Ivan Khoronzhuk <[email protected]>
*/
#include <linux/clk.h>
#include <linux/clockchips.h>
#include <linux/clocksource.h>
#include <linux/interrupt.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#define TIMER_NAME "timer-keystone"
/* Timer register offsets */
#define TIM12 0x10
#define TIM34 0x14
#define PRD12 0x18
#define PRD34 0x1c
#define TCR 0x20
#define TGCR 0x24
#define INTCTLSTAT 0x44
/* Timer register bitfields */
#define TCR_ENAMODE_MASK 0xC0
#define TCR_ENAMODE_ONESHOT_MASK 0x40
#define TCR_ENAMODE_PERIODIC_MASK 0x80
#define TGCR_TIM_UNRESET_MASK 0x03
#define INTCTLSTAT_ENINT_MASK 0x01
/**
* struct keystone_timer: holds timer's data
* @base: timer memory base address
* @hz_period: cycles per HZ period
* @event_dev: event device based on timer
*/
static struct keystone_timer {
void __iomem *base;
unsigned long hz_period;
struct clock_event_device event_dev;
} timer;
static inline u32 keystone_timer_readl(unsigned long rg)
{
return readl_relaxed(timer.base + rg);
}
static inline void keystone_timer_writel(u32 val, unsigned long rg)
{
writel_relaxed(val, timer.base + rg);
}
/**
* keystone_timer_barrier: write memory barrier
* use explicit barrier to avoid using readl/writel non relaxed function
* variants, because in our case non relaxed variants hide the true places
* where barrier is needed.
*/
static inline void keystone_timer_barrier(void)
{
__iowmb();
}
/**
* keystone_timer_config: configures timer to work in oneshot/periodic modes.
* @ mask: mask of the mode to configure
* @ period: cycles number to configure for
*/
static int keystone_timer_config(u64 period, int mask)
{
u32 tcr;
u32 off;
tcr = keystone_timer_readl(TCR);
off = tcr & ~(TCR_ENAMODE_MASK);
/* set enable mode */
tcr |= mask;
/* disable timer */
keystone_timer_writel(off, TCR);
/* here we have to be sure the timer has been disabled */
keystone_timer_barrier();
/* reset counter to zero, set new period */
keystone_timer_writel(0, TIM12);
keystone_timer_writel(0, TIM34);
keystone_timer_writel(period & 0xffffffff, PRD12);
keystone_timer_writel(period >> 32, PRD34);
/*
* enable timer
* here we have to be sure that CNTLO, CNTHI, PRDLO, PRDHI registers
* have been written.
*/
keystone_timer_barrier();
keystone_timer_writel(tcr, TCR);
return 0;
}
static void keystone_timer_disable(void)
{
u32 tcr;
tcr = keystone_timer_readl(TCR);
/* disable timer */
tcr &= ~(TCR_ENAMODE_MASK);
keystone_timer_writel(tcr, TCR);
}
static irqreturn_t keystone_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evt = dev_id;
evt->event_handler(evt);
return IRQ_HANDLED;
}
static int keystone_set_next_event(unsigned long cycles,
struct clock_event_device *evt)
{
return keystone_timer_config(cycles, TCR_ENAMODE_ONESHOT_MASK);
}
static int keystone_shutdown(struct clock_event_device *evt)
{
keystone_timer_disable();
return 0;
}
static int keystone_set_periodic(struct clock_event_device *evt)
{
keystone_timer_config(timer.hz_period, TCR_ENAMODE_PERIODIC_MASK);
return 0;
}
static int __init keystone_timer_init(struct device_node *np)
{
struct clock_event_device *event_dev = &timer.event_dev;
unsigned long rate;
struct clk *clk;
int irq, error;
irq = irq_of_parse_and_map(np, 0);
if (!irq) {
pr_err("%s: failed to map interrupts\n", __func__);
return -EINVAL;
}
timer.base = of_iomap(np, 0);
if (!timer.base) {
pr_err("%s: failed to map registers\n", __func__);
return -ENXIO;
}
clk = of_clk_get(np, 0);
if (IS_ERR(clk)) {
pr_err("%s: failed to get clock\n", __func__);
iounmap(timer.base);
return PTR_ERR(clk);
}
error = clk_prepare_enable(clk);
if (error) {
pr_err("%s: failed to enable clock\n", __func__);
goto err;
}
rate = clk_get_rate(clk);
/* disable, use internal clock source */
keystone_timer_writel(0, TCR);
/* here we have to be sure the timer has been disabled */
keystone_timer_barrier();
/* reset timer as 64-bit, no pre-scaler, plus features are disabled */
keystone_timer_writel(0, TGCR);
/* unreset timer */
keystone_timer_writel(TGCR_TIM_UNRESET_MASK, TGCR);
/* init counter to zero */
keystone_timer_writel(0, TIM12);
keystone_timer_writel(0, TIM34);
timer.hz_period = DIV_ROUND_UP(rate, HZ);
/* enable timer interrupts */
keystone_timer_writel(INTCTLSTAT_ENINT_MASK, INTCTLSTAT);
error = request_irq(irq, keystone_timer_interrupt, IRQF_TIMER,
TIMER_NAME, event_dev);
if (error) {
pr_err("%s: failed to setup irq\n", __func__);
goto err;
}
/* setup clockevent */
event_dev->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
event_dev->set_next_event = keystone_set_next_event;
event_dev->set_state_shutdown = keystone_shutdown;
event_dev->set_state_periodic = keystone_set_periodic;
event_dev->set_state_oneshot = keystone_shutdown;
event_dev->cpumask = cpu_possible_mask;
event_dev->owner = THIS_MODULE;
event_dev->name = TIMER_NAME;
event_dev->irq = irq;
clockevents_config_and_register(event_dev, rate, 1, ULONG_MAX);
pr_info("keystone timer clock @%lu Hz\n", rate);
return 0;
err:
clk_put(clk);
iounmap(timer.base);
return error;
}
TIMER_OF_DECLARE(keystone_timer, "ti,keystone-timer",
keystone_timer_init);
|
linux-master
|
drivers/clocksource/timer-keystone.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* Ingenic XBurst SoCs SYSOST clocks driver
* Copyright (c) 2020 周琰杰 (Zhou Yanjie) <[email protected]>
*/
#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/clockchips.h>
#include <linux/clocksource.h>
#include <linux/interrupt.h>
#include <linux/mfd/syscon.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/sched_clock.h>
#include <linux/slab.h>
#include <linux/syscore_ops.h>
#include <dt-bindings/clock/ingenic,sysost.h>
/* OST register offsets */
#define OST_REG_OSTCCR 0x00
#define OST_REG_OSTCR 0x08
#define OST_REG_OSTFR 0x0c
#define OST_REG_OSTMR 0x10
#define OST_REG_OST1DFR 0x14
#define OST_REG_OST1CNT 0x18
#define OST_REG_OST2CNTL 0x20
#define OST_REG_OSTCNT2HBUF 0x24
#define OST_REG_OSTESR 0x34
#define OST_REG_OSTECR 0x38
/* bits within the OSTCCR register */
#define OSTCCR_PRESCALE1_MASK 0x3
#define OSTCCR_PRESCALE2_MASK 0xc
/* bits within the OSTCR register */
#define OSTCR_OST1CLR BIT(0)
#define OSTCR_OST2CLR BIT(1)
/* bits within the OSTFR register */
#define OSTFR_FFLAG BIT(0)
/* bits within the OSTMR register */
#define OSTMR_FMASK BIT(0)
/* bits within the OSTESR register */
#define OSTESR_OST1ENS BIT(0)
#define OSTESR_OST2ENS BIT(1)
/* bits within the OSTECR register */
#define OSTECR_OST1ENC BIT(0)
#define OSTECR_OST2ENC BIT(1)
struct ingenic_soc_info {
unsigned int num_channels;
};
struct ingenic_ost_clk_info {
struct clk_init_data init_data;
u8 ostccr_reg;
};
struct ingenic_ost_clk {
struct clk_hw hw;
unsigned int idx;
struct ingenic_ost *ost;
const struct ingenic_ost_clk_info *info;
};
struct ingenic_ost {
void __iomem *base;
const struct ingenic_soc_info *soc_info;
struct clk *clk, *percpu_timer_clk, *global_timer_clk;
struct clock_event_device cevt;
struct clocksource cs;
char name[20];
struct clk_hw_onecell_data *clocks;
};
static struct ingenic_ost *ingenic_ost;
static inline struct ingenic_ost_clk *to_ost_clk(struct clk_hw *hw)
{
return container_of(hw, struct ingenic_ost_clk, hw);
}
static unsigned long ingenic_ost_percpu_timer_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct ingenic_ost_clk *ost_clk = to_ost_clk(hw);
const struct ingenic_ost_clk_info *info = ost_clk->info;
unsigned int prescale;
prescale = readl(ost_clk->ost->base + info->ostccr_reg);
prescale = FIELD_GET(OSTCCR_PRESCALE1_MASK, prescale);
return parent_rate >> (prescale * 2);
}
static unsigned long ingenic_ost_global_timer_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct ingenic_ost_clk *ost_clk = to_ost_clk(hw);
const struct ingenic_ost_clk_info *info = ost_clk->info;
unsigned int prescale;
prescale = readl(ost_clk->ost->base + info->ostccr_reg);
prescale = FIELD_GET(OSTCCR_PRESCALE2_MASK, prescale);
return parent_rate >> (prescale * 2);
}
static u8 ingenic_ost_get_prescale(unsigned long rate, unsigned long req_rate)
{
u8 prescale;
for (prescale = 0; prescale < 2; prescale++)
if ((rate >> (prescale * 2)) <= req_rate)
return prescale;
return 2; /* /16 divider */
}
static long ingenic_ost_round_rate(struct clk_hw *hw, unsigned long req_rate,
unsigned long *parent_rate)
{
unsigned long rate = *parent_rate;
u8 prescale;
if (req_rate > rate)
return rate;
prescale = ingenic_ost_get_prescale(rate, req_rate);
return rate >> (prescale * 2);
}
static int ingenic_ost_percpu_timer_set_rate(struct clk_hw *hw, unsigned long req_rate,
unsigned long parent_rate)
{
struct ingenic_ost_clk *ost_clk = to_ost_clk(hw);
const struct ingenic_ost_clk_info *info = ost_clk->info;
u8 prescale = ingenic_ost_get_prescale(parent_rate, req_rate);
int val;
val = readl(ost_clk->ost->base + info->ostccr_reg);
val &= ~OSTCCR_PRESCALE1_MASK;
val |= FIELD_PREP(OSTCCR_PRESCALE1_MASK, prescale);
writel(val, ost_clk->ost->base + info->ostccr_reg);
return 0;
}
static int ingenic_ost_global_timer_set_rate(struct clk_hw *hw, unsigned long req_rate,
unsigned long parent_rate)
{
struct ingenic_ost_clk *ost_clk = to_ost_clk(hw);
const struct ingenic_ost_clk_info *info = ost_clk->info;
u8 prescale = ingenic_ost_get_prescale(parent_rate, req_rate);
int val;
val = readl(ost_clk->ost->base + info->ostccr_reg);
val &= ~OSTCCR_PRESCALE2_MASK;
val |= FIELD_PREP(OSTCCR_PRESCALE2_MASK, prescale);
writel(val, ost_clk->ost->base + info->ostccr_reg);
return 0;
}
static const struct clk_ops ingenic_ost_percpu_timer_ops = {
.recalc_rate = ingenic_ost_percpu_timer_recalc_rate,
.round_rate = ingenic_ost_round_rate,
.set_rate = ingenic_ost_percpu_timer_set_rate,
};
static const struct clk_ops ingenic_ost_global_timer_ops = {
.recalc_rate = ingenic_ost_global_timer_recalc_rate,
.round_rate = ingenic_ost_round_rate,
.set_rate = ingenic_ost_global_timer_set_rate,
};
static const char * const ingenic_ost_clk_parents[] = { "ext" };
static const struct ingenic_ost_clk_info x1000_ost_clk_info[] = {
[OST_CLK_PERCPU_TIMER] = {
.init_data = {
.name = "percpu timer",
.parent_names = ingenic_ost_clk_parents,
.num_parents = ARRAY_SIZE(ingenic_ost_clk_parents),
.ops = &ingenic_ost_percpu_timer_ops,
.flags = CLK_SET_RATE_UNGATE,
},
.ostccr_reg = OST_REG_OSTCCR,
},
[OST_CLK_GLOBAL_TIMER] = {
.init_data = {
.name = "global timer",
.parent_names = ingenic_ost_clk_parents,
.num_parents = ARRAY_SIZE(ingenic_ost_clk_parents),
.ops = &ingenic_ost_global_timer_ops,
.flags = CLK_SET_RATE_UNGATE,
},
.ostccr_reg = OST_REG_OSTCCR,
},
};
static u64 notrace ingenic_ost_global_timer_read_cntl(void)
{
struct ingenic_ost *ost = ingenic_ost;
unsigned int count;
count = readl(ost->base + OST_REG_OST2CNTL);
return count;
}
static u64 notrace ingenic_ost_clocksource_read(struct clocksource *cs)
{
return ingenic_ost_global_timer_read_cntl();
}
static inline struct ingenic_ost *to_ingenic_ost(struct clock_event_device *evt)
{
return container_of(evt, struct ingenic_ost, cevt);
}
static int ingenic_ost_cevt_set_state_shutdown(struct clock_event_device *evt)
{
struct ingenic_ost *ost = to_ingenic_ost(evt);
writel(OSTECR_OST1ENC, ost->base + OST_REG_OSTECR);
return 0;
}
static int ingenic_ost_cevt_set_next(unsigned long next,
struct clock_event_device *evt)
{
struct ingenic_ost *ost = to_ingenic_ost(evt);
writel((u32)~OSTFR_FFLAG, ost->base + OST_REG_OSTFR);
writel(next, ost->base + OST_REG_OST1DFR);
writel(OSTCR_OST1CLR, ost->base + OST_REG_OSTCR);
writel(OSTESR_OST1ENS, ost->base + OST_REG_OSTESR);
writel((u32)~OSTMR_FMASK, ost->base + OST_REG_OSTMR);
return 0;
}
static irqreturn_t ingenic_ost_cevt_cb(int irq, void *dev_id)
{
struct clock_event_device *evt = dev_id;
struct ingenic_ost *ost = to_ingenic_ost(evt);
writel(OSTECR_OST1ENC, ost->base + OST_REG_OSTECR);
if (evt->event_handler)
evt->event_handler(evt);
return IRQ_HANDLED;
}
static int __init ingenic_ost_register_clock(struct ingenic_ost *ost,
unsigned int idx, const struct ingenic_ost_clk_info *info,
struct clk_hw_onecell_data *clocks)
{
struct ingenic_ost_clk *ost_clk;
int val, err;
ost_clk = kzalloc(sizeof(*ost_clk), GFP_KERNEL);
if (!ost_clk)
return -ENOMEM;
ost_clk->hw.init = &info->init_data;
ost_clk->idx = idx;
ost_clk->info = info;
ost_clk->ost = ost;
/* Reset clock divider */
val = readl(ost->base + info->ostccr_reg);
val &= ~(OSTCCR_PRESCALE1_MASK | OSTCCR_PRESCALE2_MASK);
writel(val, ost->base + info->ostccr_reg);
err = clk_hw_register(NULL, &ost_clk->hw);
if (err) {
kfree(ost_clk);
return err;
}
clocks->hws[idx] = &ost_clk->hw;
return 0;
}
static struct clk * __init ingenic_ost_get_clock(struct device_node *np, int id)
{
struct of_phandle_args args;
args.np = np;
args.args_count = 1;
args.args[0] = id;
return of_clk_get_from_provider(&args);
}
static int __init ingenic_ost_percpu_timer_init(struct device_node *np,
struct ingenic_ost *ost)
{
unsigned int timer_virq, channel = OST_CLK_PERCPU_TIMER;
unsigned long rate;
int err;
ost->percpu_timer_clk = ingenic_ost_get_clock(np, channel);
if (IS_ERR(ost->percpu_timer_clk))
return PTR_ERR(ost->percpu_timer_clk);
err = clk_prepare_enable(ost->percpu_timer_clk);
if (err)
goto err_clk_put;
rate = clk_get_rate(ost->percpu_timer_clk);
if (!rate) {
err = -EINVAL;
goto err_clk_disable;
}
timer_virq = of_irq_get(np, 0);
if (!timer_virq) {
err = -EINVAL;
goto err_clk_disable;
}
snprintf(ost->name, sizeof(ost->name), "OST percpu timer");
err = request_irq(timer_virq, ingenic_ost_cevt_cb, IRQF_TIMER,
ost->name, &ost->cevt);
if (err)
goto err_irq_dispose_mapping;
ost->cevt.cpumask = cpumask_of(smp_processor_id());
ost->cevt.features = CLOCK_EVT_FEAT_ONESHOT;
ost->cevt.name = ost->name;
ost->cevt.rating = 400;
ost->cevt.set_state_shutdown = ingenic_ost_cevt_set_state_shutdown;
ost->cevt.set_next_event = ingenic_ost_cevt_set_next;
clockevents_config_and_register(&ost->cevt, rate, 4, 0xffffffff);
return 0;
err_irq_dispose_mapping:
irq_dispose_mapping(timer_virq);
err_clk_disable:
clk_disable_unprepare(ost->percpu_timer_clk);
err_clk_put:
clk_put(ost->percpu_timer_clk);
return err;
}
static int __init ingenic_ost_global_timer_init(struct device_node *np,
struct ingenic_ost *ost)
{
unsigned int channel = OST_CLK_GLOBAL_TIMER;
struct clocksource *cs = &ost->cs;
unsigned long rate;
int err;
ost->global_timer_clk = ingenic_ost_get_clock(np, channel);
if (IS_ERR(ost->global_timer_clk))
return PTR_ERR(ost->global_timer_clk);
err = clk_prepare_enable(ost->global_timer_clk);
if (err)
goto err_clk_put;
rate = clk_get_rate(ost->global_timer_clk);
if (!rate) {
err = -EINVAL;
goto err_clk_disable;
}
/* Clear counter CNT registers */
writel(OSTCR_OST2CLR, ost->base + OST_REG_OSTCR);
/* Enable OST channel */
writel(OSTESR_OST2ENS, ost->base + OST_REG_OSTESR);
cs->name = "ingenic-ost";
cs->rating = 400;
cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
cs->mask = CLOCKSOURCE_MASK(32);
cs->read = ingenic_ost_clocksource_read;
err = clocksource_register_hz(cs, rate);
if (err)
goto err_clk_disable;
return 0;
err_clk_disable:
clk_disable_unprepare(ost->global_timer_clk);
err_clk_put:
clk_put(ost->global_timer_clk);
return err;
}
static const struct ingenic_soc_info x1000_soc_info = {
.num_channels = 2,
};
static const struct of_device_id __maybe_unused ingenic_ost_of_matches[] __initconst = {
{ .compatible = "ingenic,x1000-ost", .data = &x1000_soc_info },
{ /* sentinel */ }
};
static int __init ingenic_ost_probe(struct device_node *np)
{
const struct of_device_id *id = of_match_node(ingenic_ost_of_matches, np);
struct ingenic_ost *ost;
unsigned int i;
int ret;
ost = kzalloc(sizeof(*ost), GFP_KERNEL);
if (!ost)
return -ENOMEM;
ost->base = of_io_request_and_map(np, 0, of_node_full_name(np));
if (IS_ERR(ost->base)) {
pr_err("%s: Failed to map OST registers\n", __func__);
ret = PTR_ERR(ost->base);
goto err_free_ost;
}
ost->clk = of_clk_get_by_name(np, "ost");
if (IS_ERR(ost->clk)) {
ret = PTR_ERR(ost->clk);
pr_crit("%s: Cannot get OST clock\n", __func__);
goto err_free_ost;
}
ret = clk_prepare_enable(ost->clk);
if (ret) {
pr_crit("%s: Unable to enable OST clock\n", __func__);
goto err_put_clk;
}
ost->soc_info = id->data;
ost->clocks = kzalloc(struct_size(ost->clocks, hws, ost->soc_info->num_channels),
GFP_KERNEL);
if (!ost->clocks) {
ret = -ENOMEM;
goto err_clk_disable;
}
ost->clocks->num = ost->soc_info->num_channels;
for (i = 0; i < ost->clocks->num; i++) {
ret = ingenic_ost_register_clock(ost, i, &x1000_ost_clk_info[i], ost->clocks);
if (ret) {
pr_crit("%s: Cannot register clock %d\n", __func__, i);
goto err_unregister_ost_clocks;
}
}
ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, ost->clocks);
if (ret) {
pr_crit("%s: Cannot add OF clock provider\n", __func__);
goto err_unregister_ost_clocks;
}
ingenic_ost = ost;
return 0;
err_unregister_ost_clocks:
for (i = 0; i < ost->clocks->num; i++)
if (ost->clocks->hws[i])
clk_hw_unregister(ost->clocks->hws[i]);
kfree(ost->clocks);
err_clk_disable:
clk_disable_unprepare(ost->clk);
err_put_clk:
clk_put(ost->clk);
err_free_ost:
kfree(ost);
return ret;
}
static int __init ingenic_ost_init(struct device_node *np)
{
struct ingenic_ost *ost;
unsigned long rate;
int ret;
ret = ingenic_ost_probe(np);
if (ret) {
pr_crit("%s: Failed to initialize OST clocks: %d\n", __func__, ret);
return ret;
}
of_node_clear_flag(np, OF_POPULATED);
ost = ingenic_ost;
if (IS_ERR(ost))
return PTR_ERR(ost);
ret = ingenic_ost_global_timer_init(np, ost);
if (ret) {
pr_crit("%s: Unable to init global timer: %x\n", __func__, ret);
goto err_free_ingenic_ost;
}
ret = ingenic_ost_percpu_timer_init(np, ost);
if (ret)
goto err_ost_global_timer_cleanup;
/* Register the sched_clock at the end as there's no way to undo it */
rate = clk_get_rate(ost->global_timer_clk);
sched_clock_register(ingenic_ost_global_timer_read_cntl, 32, rate);
return 0;
err_ost_global_timer_cleanup:
clocksource_unregister(&ost->cs);
clk_disable_unprepare(ost->global_timer_clk);
clk_put(ost->global_timer_clk);
err_free_ingenic_ost:
kfree(ost);
return ret;
}
TIMER_OF_DECLARE(x1000_ost, "ingenic,x1000-ost", ingenic_ost_init);
|
linux-master
|
drivers/clocksource/ingenic-sysost.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* linux/arch/arm/mach-at91/at91rm9200_time.c
*
* Copyright (C) 2003 SAN People
* Copyright (C) 2003 ATMEL
*/
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/clk.h>
#include <linux/clockchips.h>
#include <linux/export.h>
#include <linux/mfd/syscon.h>
#include <linux/mfd/syscon/atmel-st.h>
#include <linux/of_irq.h>
#include <linux/regmap.h>
static unsigned long last_crtr;
static u32 irqmask;
static struct clock_event_device clkevt;
static struct regmap *regmap_st;
static int timer_latch;
/*
* The ST_CRTR is updated asynchronously to the master clock ... but
* the updates as seen by the CPU don't seem to be strictly monotonic.
* Waiting until we read the same value twice avoids glitching.
*/
static inline unsigned long read_CRTR(void)
{
unsigned int x1, x2;
regmap_read(regmap_st, AT91_ST_CRTR, &x1);
do {
regmap_read(regmap_st, AT91_ST_CRTR, &x2);
if (x1 == x2)
break;
x1 = x2;
} while (1);
return x1;
}
/*
* IRQ handler for the timer.
*/
static irqreturn_t at91rm9200_timer_interrupt(int irq, void *dev_id)
{
u32 sr;
regmap_read(regmap_st, AT91_ST_SR, &sr);
sr &= irqmask;
/*
* irqs should be disabled here, but as the irq is shared they are only
* guaranteed to be off if the timer irq is registered first.
*/
WARN_ON_ONCE(!irqs_disabled());
/* simulate "oneshot" timer with alarm */
if (sr & AT91_ST_ALMS) {
clkevt.event_handler(&clkevt);
return IRQ_HANDLED;
}
/* periodic mode should handle delayed ticks */
if (sr & AT91_ST_PITS) {
u32 crtr = read_CRTR();
while (((crtr - last_crtr) & AT91_ST_CRTV) >= timer_latch) {
last_crtr += timer_latch;
clkevt.event_handler(&clkevt);
}
return IRQ_HANDLED;
}
/* this irq is shared ... */
return IRQ_NONE;
}
static u64 read_clk32k(struct clocksource *cs)
{
return read_CRTR();
}
static struct clocksource clk32k = {
.name = "32k_counter",
.rating = 150,
.read = read_clk32k,
.mask = CLOCKSOURCE_MASK(20),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static void clkdev32k_disable_and_flush_irq(void)
{
unsigned int val;
/* Disable and flush pending timer interrupts */
regmap_write(regmap_st, AT91_ST_IDR, AT91_ST_PITS | AT91_ST_ALMS);
regmap_read(regmap_st, AT91_ST_SR, &val);
last_crtr = read_CRTR();
}
static int clkevt32k_shutdown(struct clock_event_device *evt)
{
clkdev32k_disable_and_flush_irq();
irqmask = 0;
regmap_write(regmap_st, AT91_ST_IER, irqmask);
return 0;
}
static int clkevt32k_set_oneshot(struct clock_event_device *dev)
{
clkdev32k_disable_and_flush_irq();
/*
* ALM for oneshot irqs, set by next_event()
* before 32 seconds have passed.
*/
irqmask = AT91_ST_ALMS;
regmap_write(regmap_st, AT91_ST_RTAR, last_crtr);
regmap_write(regmap_st, AT91_ST_IER, irqmask);
return 0;
}
static int clkevt32k_set_periodic(struct clock_event_device *dev)
{
clkdev32k_disable_and_flush_irq();
/* PIT for periodic irqs; fixed rate of 1/HZ */
irqmask = AT91_ST_PITS;
regmap_write(regmap_st, AT91_ST_PIMR, timer_latch);
regmap_write(regmap_st, AT91_ST_IER, irqmask);
return 0;
}
static int
clkevt32k_next_event(unsigned long delta, struct clock_event_device *dev)
{
u32 alm;
unsigned int val;
BUG_ON(delta < 2);
/* The alarm IRQ uses absolute time (now+delta), not the relative
* time (delta) in our calling convention. Like all clockevents
* using such "match" hardware, we have a race to defend against.
*
* Our defense here is to have set up the clockevent device so the
* delta is at least two. That way we never end up writing RTAR
* with the value then held in CRTR ... which would mean the match
* wouldn't trigger until 32 seconds later, after CRTR wraps.
*/
alm = read_CRTR();
/* Cancel any pending alarm; flush any pending IRQ */
regmap_write(regmap_st, AT91_ST_RTAR, alm);
regmap_read(regmap_st, AT91_ST_SR, &val);
/* Schedule alarm by writing RTAR. */
alm += delta;
regmap_write(regmap_st, AT91_ST_RTAR, alm);
return 0;
}
static struct clock_event_device clkevt = {
.name = "at91_tick",
.features = CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_ONESHOT,
.rating = 150,
.set_next_event = clkevt32k_next_event,
.set_state_shutdown = clkevt32k_shutdown,
.set_state_periodic = clkevt32k_set_periodic,
.set_state_oneshot = clkevt32k_set_oneshot,
.tick_resume = clkevt32k_shutdown,
};
/*
* ST (system timer) module supports both clockevents and clocksource.
*/
static int __init atmel_st_timer_init(struct device_node *node)
{
struct clk *sclk;
unsigned int sclk_rate, val;
int irq, ret;
regmap_st = syscon_node_to_regmap(node);
if (IS_ERR(regmap_st)) {
pr_err("Unable to get regmap\n");
return PTR_ERR(regmap_st);
}
/* Disable all timer interrupts, and clear any pending ones */
regmap_write(regmap_st, AT91_ST_IDR,
AT91_ST_PITS | AT91_ST_WDOVF | AT91_ST_RTTINC | AT91_ST_ALMS);
regmap_read(regmap_st, AT91_ST_SR, &val);
/* Get the interrupts property */
irq = irq_of_parse_and_map(node, 0);
if (!irq) {
pr_err("Unable to get IRQ from DT\n");
return -EINVAL;
}
/* Make IRQs happen for the system timer */
ret = request_irq(irq, at91rm9200_timer_interrupt,
IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL,
"at91_tick", regmap_st);
if (ret) {
pr_err("Unable to setup IRQ\n");
return ret;
}
sclk = of_clk_get(node, 0);
if (IS_ERR(sclk)) {
pr_err("Unable to get slow clock\n");
return PTR_ERR(sclk);
}
ret = clk_prepare_enable(sclk);
if (ret) {
pr_err("Could not enable slow clock\n");
return ret;
}
sclk_rate = clk_get_rate(sclk);
if (!sclk_rate) {
pr_err("Invalid slow clock rate\n");
return -EINVAL;
}
timer_latch = (sclk_rate + HZ / 2) / HZ;
/* The 32KiHz "Slow Clock" (tick every 30517.58 nanoseconds) is used
* directly for the clocksource and all clockevents, after adjusting
* its prescaler from the 1 Hz default.
*/
regmap_write(regmap_st, AT91_ST_RTMR, 1);
/* Setup timer clockevent, with minimum of two ticks (important!!) */
clkevt.cpumask = cpumask_of(0);
clockevents_config_and_register(&clkevt, sclk_rate,
2, AT91_ST_ALMV);
/* register clocksource */
return clocksource_register_hz(&clk32k, sclk_rate);
}
TIMER_OF_DECLARE(atmel_st_timer, "atmel,at91rm9200-st",
atmel_st_timer_init);
|
linux-master
|
drivers/clocksource/timer-atmel-st.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2015 ARM Limited
*
* Author: Vladimir Murzin <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/clk.h>
#include <linux/clockchips.h>
#include <linux/clocksource.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/of_address.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/sched_clock.h>
#include <linux/slab.h>
#define TIMER_CTRL 0x0
#define TIMER_CTRL_ENABLE BIT(0)
#define TIMER_CTRL_IE BIT(3)
#define TIMER_VALUE 0x4
#define TIMER_RELOAD 0x8
#define TIMER_INT 0xc
struct clockevent_mps2 {
void __iomem *reg;
u32 clock_count_per_tick;
struct clock_event_device clkevt;
};
static void __iomem *sched_clock_base;
static u64 notrace mps2_sched_read(void)
{
return ~readl_relaxed(sched_clock_base + TIMER_VALUE);
}
static inline struct clockevent_mps2 *to_mps2_clkevt(struct clock_event_device *c)
{
return container_of(c, struct clockevent_mps2, clkevt);
}
static void clockevent_mps2_writel(u32 val, struct clock_event_device *c, u32 offset)
{
writel_relaxed(val, to_mps2_clkevt(c)->reg + offset);
}
static int mps2_timer_shutdown(struct clock_event_device *ce)
{
clockevent_mps2_writel(0, ce, TIMER_RELOAD);
clockevent_mps2_writel(0, ce, TIMER_CTRL);
return 0;
}
static int mps2_timer_set_next_event(unsigned long next, struct clock_event_device *ce)
{
clockevent_mps2_writel(next, ce, TIMER_VALUE);
clockevent_mps2_writel(TIMER_CTRL_IE | TIMER_CTRL_ENABLE, ce, TIMER_CTRL);
return 0;
}
static int mps2_timer_set_periodic(struct clock_event_device *ce)
{
u32 clock_count_per_tick = to_mps2_clkevt(ce)->clock_count_per_tick;
clockevent_mps2_writel(clock_count_per_tick, ce, TIMER_RELOAD);
clockevent_mps2_writel(clock_count_per_tick, ce, TIMER_VALUE);
clockevent_mps2_writel(TIMER_CTRL_IE | TIMER_CTRL_ENABLE, ce, TIMER_CTRL);
return 0;
}
static irqreturn_t mps2_timer_interrupt(int irq, void *dev_id)
{
struct clockevent_mps2 *ce = dev_id;
u32 status = readl_relaxed(ce->reg + TIMER_INT);
if (!status) {
pr_warn("spurious interrupt\n");
return IRQ_NONE;
}
writel_relaxed(1, ce->reg + TIMER_INT);
ce->clkevt.event_handler(&ce->clkevt);
return IRQ_HANDLED;
}
static int __init mps2_clockevent_init(struct device_node *np)
{
void __iomem *base;
struct clk *clk = NULL;
struct clockevent_mps2 *ce;
u32 rate;
int irq, ret;
const char *name = "mps2-clkevt";
ret = of_property_read_u32(np, "clock-frequency", &rate);
if (ret) {
clk = of_clk_get(np, 0);
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
pr_err("failed to get clock for clockevent: %d\n", ret);
goto out;
}
ret = clk_prepare_enable(clk);
if (ret) {
pr_err("failed to enable clock for clockevent: %d\n", ret);
goto out_clk_put;
}
rate = clk_get_rate(clk);
}
base = of_iomap(np, 0);
if (!base) {
ret = -EADDRNOTAVAIL;
pr_err("failed to map register for clockevent: %d\n", ret);
goto out_clk_disable;
}
irq = irq_of_parse_and_map(np, 0);
if (!irq) {
ret = -ENOENT;
pr_err("failed to get irq for clockevent: %d\n", ret);
goto out_iounmap;
}
ce = kzalloc(sizeof(*ce), GFP_KERNEL);
if (!ce) {
ret = -ENOMEM;
goto out_iounmap;
}
ce->reg = base;
ce->clock_count_per_tick = DIV_ROUND_CLOSEST(rate, HZ);
ce->clkevt.irq = irq;
ce->clkevt.name = name;
ce->clkevt.rating = 200;
ce->clkevt.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
ce->clkevt.cpumask = cpu_possible_mask;
ce->clkevt.set_state_shutdown = mps2_timer_shutdown;
ce->clkevt.set_state_periodic = mps2_timer_set_periodic;
ce->clkevt.set_state_oneshot = mps2_timer_shutdown;
ce->clkevt.set_next_event = mps2_timer_set_next_event;
/* Ensure timer is disabled */
writel_relaxed(0, base + TIMER_CTRL);
ret = request_irq(irq, mps2_timer_interrupt, IRQF_TIMER, name, ce);
if (ret) {
pr_err("failed to request irq for clockevent: %d\n", ret);
goto out_kfree;
}
clockevents_config_and_register(&ce->clkevt, rate, 0xf, 0xffffffff);
return 0;
out_kfree:
kfree(ce);
out_iounmap:
iounmap(base);
out_clk_disable:
/* clk_{disable, unprepare, put}() can handle NULL as a parameter */
clk_disable_unprepare(clk);
out_clk_put:
clk_put(clk);
out:
return ret;
}
static int __init mps2_clocksource_init(struct device_node *np)
{
void __iomem *base;
struct clk *clk = NULL;
u32 rate;
int ret;
const char *name = "mps2-clksrc";
ret = of_property_read_u32(np, "clock-frequency", &rate);
if (ret) {
clk = of_clk_get(np, 0);
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
pr_err("failed to get clock for clocksource: %d\n", ret);
goto out;
}
ret = clk_prepare_enable(clk);
if (ret) {
pr_err("failed to enable clock for clocksource: %d\n", ret);
goto out_clk_put;
}
rate = clk_get_rate(clk);
}
base = of_iomap(np, 0);
if (!base) {
ret = -EADDRNOTAVAIL;
pr_err("failed to map register for clocksource: %d\n", ret);
goto out_clk_disable;
}
/* Ensure timer is disabled */
writel_relaxed(0, base + TIMER_CTRL);
/* ... and set it up as free-running clocksource */
writel_relaxed(0xffffffff, base + TIMER_VALUE);
writel_relaxed(0xffffffff, base + TIMER_RELOAD);
writel_relaxed(TIMER_CTRL_ENABLE, base + TIMER_CTRL);
ret = clocksource_mmio_init(base + TIMER_VALUE, name,
rate, 200, 32,
clocksource_mmio_readl_down);
if (ret) {
pr_err("failed to init clocksource: %d\n", ret);
goto out_iounmap;
}
sched_clock_base = base;
sched_clock_register(mps2_sched_read, 32, rate);
return 0;
out_iounmap:
iounmap(base);
out_clk_disable:
/* clk_{disable, unprepare, put}() can handle NULL as a parameter */
clk_disable_unprepare(clk);
out_clk_put:
clk_put(clk);
out:
return ret;
}
static int __init mps2_timer_init(struct device_node *np)
{
static int has_clocksource, has_clockevent;
int ret;
if (!has_clocksource) {
ret = mps2_clocksource_init(np);
if (!ret) {
has_clocksource = 1;
return 0;
}
}
if (!has_clockevent) {
ret = mps2_clockevent_init(np);
if (!ret) {
has_clockevent = 1;
return 0;
}
}
return 0;
}
TIMER_OF_DECLARE(mps2_timer, "arm,mps2-timer", mps2_timer_init);
|
linux-master
|
drivers/clocksource/mps2-timer.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Rockchip timer support
*
* Copyright (C) Daniel Lezcano <[email protected]>
*/
#include <linux/clk.h>
#include <linux/clockchips.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/sched_clock.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#define TIMER_NAME "rk_timer"
#define TIMER_LOAD_COUNT0 0x00
#define TIMER_LOAD_COUNT1 0x04
#define TIMER_CURRENT_VALUE0 0x08
#define TIMER_CURRENT_VALUE1 0x0C
#define TIMER_CONTROL_REG3288 0x10
#define TIMER_CONTROL_REG3399 0x1c
#define TIMER_INT_STATUS 0x18
#define TIMER_DISABLE 0x0
#define TIMER_ENABLE 0x1
#define TIMER_MODE_FREE_RUNNING (0 << 1)
#define TIMER_MODE_USER_DEFINED_COUNT (1 << 1)
#define TIMER_INT_UNMASK (1 << 2)
struct rk_timer {
void __iomem *base;
void __iomem *ctrl;
struct clk *clk;
struct clk *pclk;
u32 freq;
int irq;
};
struct rk_clkevt {
struct clock_event_device ce;
struct rk_timer timer;
};
static struct rk_clkevt *rk_clkevt;
static struct rk_timer *rk_clksrc;
static inline struct rk_timer *rk_timer(struct clock_event_device *ce)
{
return &container_of(ce, struct rk_clkevt, ce)->timer;
}
static inline void rk_timer_disable(struct rk_timer *timer)
{
writel_relaxed(TIMER_DISABLE, timer->ctrl);
}
static inline void rk_timer_enable(struct rk_timer *timer, u32 flags)
{
writel_relaxed(TIMER_ENABLE | flags, timer->ctrl);
}
static void rk_timer_update_counter(unsigned long cycles,
struct rk_timer *timer)
{
writel_relaxed(cycles, timer->base + TIMER_LOAD_COUNT0);
writel_relaxed(0, timer->base + TIMER_LOAD_COUNT1);
}
static void rk_timer_interrupt_clear(struct rk_timer *timer)
{
writel_relaxed(1, timer->base + TIMER_INT_STATUS);
}
static inline int rk_timer_set_next_event(unsigned long cycles,
struct clock_event_device *ce)
{
struct rk_timer *timer = rk_timer(ce);
rk_timer_disable(timer);
rk_timer_update_counter(cycles, timer);
rk_timer_enable(timer, TIMER_MODE_USER_DEFINED_COUNT |
TIMER_INT_UNMASK);
return 0;
}
static int rk_timer_shutdown(struct clock_event_device *ce)
{
struct rk_timer *timer = rk_timer(ce);
rk_timer_disable(timer);
return 0;
}
static int rk_timer_set_periodic(struct clock_event_device *ce)
{
struct rk_timer *timer = rk_timer(ce);
rk_timer_disable(timer);
rk_timer_update_counter(timer->freq / HZ - 1, timer);
rk_timer_enable(timer, TIMER_MODE_FREE_RUNNING | TIMER_INT_UNMASK);
return 0;
}
static irqreturn_t rk_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *ce = dev_id;
struct rk_timer *timer = rk_timer(ce);
rk_timer_interrupt_clear(timer);
if (clockevent_state_oneshot(ce))
rk_timer_disable(timer);
ce->event_handler(ce);
return IRQ_HANDLED;
}
static u64 notrace rk_timer_sched_read(void)
{
return ~readl_relaxed(rk_clksrc->base + TIMER_CURRENT_VALUE0);
}
static int __init
rk_timer_probe(struct rk_timer *timer, struct device_node *np)
{
struct clk *timer_clk;
struct clk *pclk;
int ret = -EINVAL, irq;
u32 ctrl_reg = TIMER_CONTROL_REG3288;
timer->base = of_iomap(np, 0);
if (!timer->base) {
pr_err("Failed to get base address for '%s'\n", TIMER_NAME);
return -ENXIO;
}
if (of_device_is_compatible(np, "rockchip,rk3399-timer"))
ctrl_reg = TIMER_CONTROL_REG3399;
timer->ctrl = timer->base + ctrl_reg;
pclk = of_clk_get_by_name(np, "pclk");
if (IS_ERR(pclk)) {
ret = PTR_ERR(pclk);
pr_err("Failed to get pclk for '%s'\n", TIMER_NAME);
goto out_unmap;
}
ret = clk_prepare_enable(pclk);
if (ret) {
pr_err("Failed to enable pclk for '%s'\n", TIMER_NAME);
goto out_unmap;
}
timer->pclk = pclk;
timer_clk = of_clk_get_by_name(np, "timer");
if (IS_ERR(timer_clk)) {
ret = PTR_ERR(timer_clk);
pr_err("Failed to get timer clock for '%s'\n", TIMER_NAME);
goto out_timer_clk;
}
ret = clk_prepare_enable(timer_clk);
if (ret) {
pr_err("Failed to enable timer clock\n");
goto out_timer_clk;
}
timer->clk = timer_clk;
timer->freq = clk_get_rate(timer_clk);
irq = irq_of_parse_and_map(np, 0);
if (!irq) {
ret = -EINVAL;
pr_err("Failed to map interrupts for '%s'\n", TIMER_NAME);
goto out_irq;
}
timer->irq = irq;
rk_timer_interrupt_clear(timer);
rk_timer_disable(timer);
return 0;
out_irq:
clk_disable_unprepare(timer_clk);
out_timer_clk:
clk_disable_unprepare(pclk);
out_unmap:
iounmap(timer->base);
return ret;
}
static void __init rk_timer_cleanup(struct rk_timer *timer)
{
clk_disable_unprepare(timer->clk);
clk_disable_unprepare(timer->pclk);
iounmap(timer->base);
}
static int __init rk_clkevt_init(struct device_node *np)
{
struct clock_event_device *ce;
int ret = -EINVAL;
rk_clkevt = kzalloc(sizeof(struct rk_clkevt), GFP_KERNEL);
if (!rk_clkevt) {
ret = -ENOMEM;
goto out;
}
ret = rk_timer_probe(&rk_clkevt->timer, np);
if (ret)
goto out_probe;
ce = &rk_clkevt->ce;
ce->name = TIMER_NAME;
ce->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT |
CLOCK_EVT_FEAT_DYNIRQ;
ce->set_next_event = rk_timer_set_next_event;
ce->set_state_shutdown = rk_timer_shutdown;
ce->set_state_periodic = rk_timer_set_periodic;
ce->irq = rk_clkevt->timer.irq;
ce->cpumask = cpu_possible_mask;
ce->rating = 250;
ret = request_irq(rk_clkevt->timer.irq, rk_timer_interrupt, IRQF_TIMER,
TIMER_NAME, ce);
if (ret) {
pr_err("Failed to initialize '%s': %d\n",
TIMER_NAME, ret);
goto out_irq;
}
clockevents_config_and_register(&rk_clkevt->ce,
rk_clkevt->timer.freq, 1, UINT_MAX);
return 0;
out_irq:
rk_timer_cleanup(&rk_clkevt->timer);
out_probe:
kfree(rk_clkevt);
out:
/* Leave rk_clkevt not NULL to prevent future init */
rk_clkevt = ERR_PTR(ret);
return ret;
}
static int __init rk_clksrc_init(struct device_node *np)
{
int ret = -EINVAL;
rk_clksrc = kzalloc(sizeof(struct rk_timer), GFP_KERNEL);
if (!rk_clksrc) {
ret = -ENOMEM;
goto out;
}
ret = rk_timer_probe(rk_clksrc, np);
if (ret)
goto out_probe;
rk_timer_update_counter(UINT_MAX, rk_clksrc);
rk_timer_enable(rk_clksrc, 0);
ret = clocksource_mmio_init(rk_clksrc->base + TIMER_CURRENT_VALUE0,
TIMER_NAME, rk_clksrc->freq, 250, 32,
clocksource_mmio_readl_down);
if (ret) {
pr_err("Failed to register clocksource\n");
goto out_clocksource;
}
sched_clock_register(rk_timer_sched_read, 32, rk_clksrc->freq);
return 0;
out_clocksource:
rk_timer_cleanup(rk_clksrc);
out_probe:
kfree(rk_clksrc);
out:
/* Leave rk_clksrc not NULL to prevent future init */
rk_clksrc = ERR_PTR(ret);
return ret;
}
static int __init rk_timer_init(struct device_node *np)
{
if (!rk_clkevt)
return rk_clkevt_init(np);
if (!rk_clksrc)
return rk_clksrc_init(np);
pr_err("Too many timer definitions for '%s'\n", TIMER_NAME);
return -EINVAL;
}
TIMER_OF_DECLARE(rk3288_timer, "rockchip,rk3288-timer", rk_timer_init);
TIMER_OF_DECLARE(rk3399_timer, "rockchip,rk3399-timer", rk_timer_init);
|
linux-master
|
drivers/clocksource/timer-rockchip.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* Marvell Armada 370/XP SoC timer handling.
*
* Copyright (C) 2012 Marvell
*
* Lior Amsalem <[email protected]>
* Gregory CLEMENT <[email protected]>
* Thomas Petazzoni <[email protected]>
*
* Timer 0 is used as free-running clocksource, while timer 1 is
* used as clock_event_device.
*
* ---
* Clocksource driver for Armada 370 and Armada XP SoC.
* This driver implements one compatible string for each SoC, given
* each has its own characteristics:
*
* * Armada 370 has no 25 MHz fixed timer.
*
* * Armada XP cannot work properly without such 25 MHz fixed timer as
* doing otherwise leads to using a clocksource whose frequency varies
* when doing cpufreq frequency changes.
*
* See Documentation/devicetree/bindings/timer/marvell,armada-370-xp-timer.txt
*/
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/kernel.h>
#include <linux/clk.h>
#include <linux/cpu.h>
#include <linux/timer.h>
#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/sched_clock.h>
#include <linux/percpu.h>
#include <linux/syscore_ops.h>
#include <asm/delay.h>
/*
* Timer block registers.
*/
#define TIMER_CTRL_OFF 0x0000
#define TIMER0_EN BIT(0)
#define TIMER0_RELOAD_EN BIT(1)
#define TIMER0_25MHZ BIT(11)
#define TIMER0_DIV(div) ((div) << 19)
#define TIMER1_EN BIT(2)
#define TIMER1_RELOAD_EN BIT(3)
#define TIMER1_25MHZ BIT(12)
#define TIMER1_DIV(div) ((div) << 22)
#define TIMER_EVENTS_STATUS 0x0004
#define TIMER0_CLR_MASK (~0x1)
#define TIMER1_CLR_MASK (~0x100)
#define TIMER0_RELOAD_OFF 0x0010
#define TIMER0_VAL_OFF 0x0014
#define TIMER1_RELOAD_OFF 0x0018
#define TIMER1_VAL_OFF 0x001c
#define LCL_TIMER_EVENTS_STATUS 0x0028
/* Global timers are connected to the coherency fabric clock, and the
below divider reduces their incrementing frequency. */
#define TIMER_DIVIDER_SHIFT 5
#define TIMER_DIVIDER (1 << TIMER_DIVIDER_SHIFT)
/*
* SoC-specific data.
*/
static void __iomem *timer_base, *local_base;
static unsigned int timer_clk;
static bool timer25Mhz = true;
static u32 enable_mask;
/*
* Number of timer ticks per jiffy.
*/
static u32 ticks_per_jiffy;
static struct clock_event_device __percpu *armada_370_xp_evt;
static void local_timer_ctrl_clrset(u32 clr, u32 set)
{
writel((readl(local_base + TIMER_CTRL_OFF) & ~clr) | set,
local_base + TIMER_CTRL_OFF);
}
static u64 notrace armada_370_xp_read_sched_clock(void)
{
return ~readl(timer_base + TIMER0_VAL_OFF);
}
/*
* Clockevent handling.
*/
static int
armada_370_xp_clkevt_next_event(unsigned long delta,
struct clock_event_device *dev)
{
/*
* Clear clockevent timer interrupt.
*/
writel(TIMER0_CLR_MASK, local_base + LCL_TIMER_EVENTS_STATUS);
/*
* Setup new clockevent timer value.
*/
writel(delta, local_base + TIMER0_VAL_OFF);
/*
* Enable the timer.
*/
local_timer_ctrl_clrset(TIMER0_RELOAD_EN, enable_mask);
return 0;
}
static int armada_370_xp_clkevt_shutdown(struct clock_event_device *evt)
{
/*
* Disable timer.
*/
local_timer_ctrl_clrset(TIMER0_EN, 0);
/*
* ACK pending timer interrupt.
*/
writel(TIMER0_CLR_MASK, local_base + LCL_TIMER_EVENTS_STATUS);
return 0;
}
static int armada_370_xp_clkevt_set_periodic(struct clock_event_device *evt)
{
/*
* Setup timer to fire at 1/HZ intervals.
*/
writel(ticks_per_jiffy - 1, local_base + TIMER0_RELOAD_OFF);
writel(ticks_per_jiffy - 1, local_base + TIMER0_VAL_OFF);
/*
* Enable timer.
*/
local_timer_ctrl_clrset(0, TIMER0_RELOAD_EN | enable_mask);
return 0;
}
static int armada_370_xp_clkevt_irq;
static irqreturn_t armada_370_xp_timer_interrupt(int irq, void *dev_id)
{
/*
* ACK timer interrupt and call event handler.
*/
struct clock_event_device *evt = dev_id;
writel(TIMER0_CLR_MASK, local_base + LCL_TIMER_EVENTS_STATUS);
evt->event_handler(evt);
return IRQ_HANDLED;
}
/*
* Setup the local clock events for a CPU.
*/
static int armada_370_xp_timer_starting_cpu(unsigned int cpu)
{
struct clock_event_device *evt = per_cpu_ptr(armada_370_xp_evt, cpu);
u32 clr = 0, set = 0;
if (timer25Mhz)
set = TIMER0_25MHZ;
else
clr = TIMER0_25MHZ;
local_timer_ctrl_clrset(clr, set);
evt->name = "armada_370_xp_per_cpu_tick";
evt->features = CLOCK_EVT_FEAT_ONESHOT |
CLOCK_EVT_FEAT_PERIODIC;
evt->shift = 32;
evt->rating = 300;
evt->set_next_event = armada_370_xp_clkevt_next_event;
evt->set_state_shutdown = armada_370_xp_clkevt_shutdown;
evt->set_state_periodic = armada_370_xp_clkevt_set_periodic;
evt->set_state_oneshot = armada_370_xp_clkevt_shutdown;
evt->tick_resume = armada_370_xp_clkevt_shutdown;
evt->irq = armada_370_xp_clkevt_irq;
evt->cpumask = cpumask_of(cpu);
clockevents_config_and_register(evt, timer_clk, 1, 0xfffffffe);
enable_percpu_irq(evt->irq, 0);
return 0;
}
static int armada_370_xp_timer_dying_cpu(unsigned int cpu)
{
struct clock_event_device *evt = per_cpu_ptr(armada_370_xp_evt, cpu);
evt->set_state_shutdown(evt);
disable_percpu_irq(evt->irq);
return 0;
}
static u32 timer0_ctrl_reg, timer0_local_ctrl_reg;
static int armada_370_xp_timer_suspend(void)
{
timer0_ctrl_reg = readl(timer_base + TIMER_CTRL_OFF);
timer0_local_ctrl_reg = readl(local_base + TIMER_CTRL_OFF);
return 0;
}
static void armada_370_xp_timer_resume(void)
{
writel(0xffffffff, timer_base + TIMER0_VAL_OFF);
writel(0xffffffff, timer_base + TIMER0_RELOAD_OFF);
writel(timer0_ctrl_reg, timer_base + TIMER_CTRL_OFF);
writel(timer0_local_ctrl_reg, local_base + TIMER_CTRL_OFF);
}
static struct syscore_ops armada_370_xp_timer_syscore_ops = {
.suspend = armada_370_xp_timer_suspend,
.resume = armada_370_xp_timer_resume,
};
static unsigned long armada_370_delay_timer_read(void)
{
return ~readl(timer_base + TIMER0_VAL_OFF);
}
static struct delay_timer armada_370_delay_timer = {
.read_current_timer = armada_370_delay_timer_read,
};
static int __init armada_370_xp_timer_common_init(struct device_node *np)
{
u32 clr = 0, set = 0;
int res;
timer_base = of_iomap(np, 0);
if (!timer_base) {
pr_err("Failed to iomap\n");
return -ENXIO;
}
local_base = of_iomap(np, 1);
if (!local_base) {
pr_err("Failed to iomap\n");
return -ENXIO;
}
if (timer25Mhz) {
set = TIMER0_25MHZ;
enable_mask = TIMER0_EN;
} else {
clr = TIMER0_25MHZ;
enable_mask = TIMER0_EN | TIMER0_DIV(TIMER_DIVIDER_SHIFT);
}
atomic_io_modify(timer_base + TIMER_CTRL_OFF, clr | set, set);
local_timer_ctrl_clrset(clr, set);
/*
* We use timer 0 as clocksource, and private(local) timer 0
* for clockevents
*/
armada_370_xp_clkevt_irq = irq_of_parse_and_map(np, 4);
ticks_per_jiffy = (timer_clk + HZ / 2) / HZ;
/*
* Setup free-running clocksource timer (interrupts
* disabled).
*/
writel(0xffffffff, timer_base + TIMER0_VAL_OFF);
writel(0xffffffff, timer_base + TIMER0_RELOAD_OFF);
atomic_io_modify(timer_base + TIMER_CTRL_OFF,
TIMER0_RELOAD_EN | enable_mask,
TIMER0_RELOAD_EN | enable_mask);
armada_370_delay_timer.freq = timer_clk;
register_current_timer_delay(&armada_370_delay_timer);
/*
* Set scale and timer for sched_clock.
*/
sched_clock_register(armada_370_xp_read_sched_clock, 32, timer_clk);
res = clocksource_mmio_init(timer_base + TIMER0_VAL_OFF,
"armada_370_xp_clocksource",
timer_clk, 300, 32, clocksource_mmio_readl_down);
if (res) {
pr_err("Failed to initialize clocksource mmio\n");
return res;
}
armada_370_xp_evt = alloc_percpu(struct clock_event_device);
if (!armada_370_xp_evt)
return -ENOMEM;
/*
* Setup clockevent timer (interrupt-driven).
*/
res = request_percpu_irq(armada_370_xp_clkevt_irq,
armada_370_xp_timer_interrupt,
"armada_370_xp_per_cpu_tick",
armada_370_xp_evt);
/* Immediately configure the timer on the boot CPU */
if (res) {
pr_err("Failed to request percpu irq\n");
return res;
}
res = cpuhp_setup_state(CPUHP_AP_ARMADA_TIMER_STARTING,
"clockevents/armada:starting",
armada_370_xp_timer_starting_cpu,
armada_370_xp_timer_dying_cpu);
if (res) {
pr_err("Failed to setup hotplug state and timer\n");
return res;
}
register_syscore_ops(&armada_370_xp_timer_syscore_ops);
return 0;
}
static int __init armada_xp_timer_init(struct device_node *np)
{
struct clk *clk = of_clk_get_by_name(np, "fixed");
int ret;
if (IS_ERR(clk)) {
pr_err("Failed to get clock\n");
return PTR_ERR(clk);
}
ret = clk_prepare_enable(clk);
if (ret)
return ret;
timer_clk = clk_get_rate(clk);
return armada_370_xp_timer_common_init(np);
}
TIMER_OF_DECLARE(armada_xp, "marvell,armada-xp-timer",
armada_xp_timer_init);
static int __init armada_375_timer_init(struct device_node *np)
{
struct clk *clk;
int ret;
clk = of_clk_get_by_name(np, "fixed");
if (!IS_ERR(clk)) {
ret = clk_prepare_enable(clk);
if (ret)
return ret;
timer_clk = clk_get_rate(clk);
} else {
/*
* This fallback is required in order to retain proper
* devicetree backwards compatibility.
*/
clk = of_clk_get(np, 0);
/* Must have at least a clock */
if (IS_ERR(clk)) {
pr_err("Failed to get clock\n");
return PTR_ERR(clk);
}
ret = clk_prepare_enable(clk);
if (ret)
return ret;
timer_clk = clk_get_rate(clk) / TIMER_DIVIDER;
timer25Mhz = false;
}
return armada_370_xp_timer_common_init(np);
}
TIMER_OF_DECLARE(armada_375, "marvell,armada-375-timer",
armada_375_timer_init);
static int __init armada_370_timer_init(struct device_node *np)
{
struct clk *clk;
int ret;
clk = of_clk_get(np, 0);
if (IS_ERR(clk)) {
pr_err("Failed to get clock\n");
return PTR_ERR(clk);
}
ret = clk_prepare_enable(clk);
if (ret)
return ret;
timer_clk = clk_get_rate(clk) / TIMER_DIVIDER;
timer25Mhz = false;
return armada_370_xp_timer_common_init(np);
}
TIMER_OF_DECLARE(armada_370, "marvell,armada-370-timer",
armada_370_timer_init);
|
linux-master
|
drivers/clocksource/timer-armada-370-xp.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2006 Jim Cromie
*
* This is a clocksource driver for the Geode SCx200's 1 or 27 MHz
* high-resolution timer. The Geode SC-1100 (at least) has a buggy
* time stamp counter (TSC), which loses time unless 'idle=poll' is
* given as a boot-arg. In its absence, the Generic Timekeeping code
* will detect and de-rate the bad TSC, allowing this timer to take
* over timekeeping duties.
*
* Based on work by John Stultz, and Ted Phelps (in a 2.6.12-rc6 patch)
*/
#include <linux/clocksource.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/scx200.h>
#define NAME "scx200_hrt"
static int mhz27;
module_param(mhz27, int, 0); /* load time only */
MODULE_PARM_DESC(mhz27, "count at 27.0 MHz (default is 1.0 MHz)");
static int ppm;
module_param(ppm, int, 0); /* load time only */
MODULE_PARM_DESC(ppm, "+-adjust to actual XO freq (ppm)");
/* HiRes Timer configuration register address */
#define SCx200_TMCNFG_OFFSET (SCx200_TIMER_OFFSET + 5)
/* and config settings */
#define HR_TMEN (1 << 0) /* timer interrupt enable */
#define HR_TMCLKSEL (1 << 1) /* 1|0 counts at 27|1 MHz */
#define HR_TM27MPD (1 << 2) /* 1 turns off input clock (power-down) */
/* The base timer frequency, * 27 if selected */
#define HRT_FREQ 1000000
static u64 read_hrt(struct clocksource *cs)
{
/* Read the timer value */
return (u64) inl(scx200_cb_base + SCx200_TIMER_OFFSET);
}
static struct clocksource cs_hrt = {
.name = "scx200_hrt",
.rating = 250,
.read = read_hrt,
.mask = CLOCKSOURCE_MASK(32),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
/* mult, shift are set based on mhz27 flag */
};
static int __init init_hrt_clocksource(void)
{
u32 freq;
/* Make sure scx200 has initialized the configuration block */
if (!scx200_cb_present())
return -ENODEV;
/* Reserve the timer's ISA io-region for ourselves */
if (!request_region(scx200_cb_base + SCx200_TIMER_OFFSET,
SCx200_TIMER_SIZE,
"NatSemi SCx200 High-Resolution Timer")) {
pr_warn("unable to lock timer region\n");
return -ENODEV;
}
/* write timer config */
outb(HR_TMEN | (mhz27 ? HR_TMCLKSEL : 0),
scx200_cb_base + SCx200_TMCNFG_OFFSET);
freq = (HRT_FREQ + ppm);
if (mhz27)
freq *= 27;
pr_info("enabling scx200 high-res timer (%s MHz +%d ppm)\n", mhz27 ? "27":"1", ppm);
return clocksource_register_hz(&cs_hrt, freq);
}
module_init(init_hrt_clocksource);
MODULE_AUTHOR("Jim Cromie <[email protected]>");
MODULE_DESCRIPTION("clocksource on SCx200 HiRes Timer");
MODULE_LICENSE("GPL");
|
linux-master
|
drivers/clocksource/scx200_hrt.c
|
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2022 Hewlett-Packard Enterprise Development Company, L.P. */
#include <linux/clk.h>
#include <linux/clockchips.h>
#include <linux/clocksource.h>
#include <linux/interrupt.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/sched_clock.h>
#define TIMER0_FREQ 1000000
#define GXP_TIMER_CNT_OFS 0x00
#define GXP_TIMESTAMP_OFS 0x08
#define GXP_TIMER_CTRL_OFS 0x14
/* TCS Stands for Timer Control/Status: these are masks to be used in */
/* the Timer Count Registers */
#define MASK_TCS_ENABLE 0x01
#define MASK_TCS_PERIOD 0x02
#define MASK_TCS_RELOAD 0x04
#define MASK_TCS_TC 0x80
struct gxp_timer {
void __iomem *counter;
void __iomem *control;
struct clock_event_device evt;
};
static struct gxp_timer *gxp_timer;
static void __iomem *system_clock __ro_after_init;
static inline struct gxp_timer *to_gxp_timer(struct clock_event_device *evt_dev)
{
return container_of(evt_dev, struct gxp_timer, evt);
}
static u64 notrace gxp_sched_read(void)
{
return readl_relaxed(system_clock);
}
static int gxp_time_set_next_event(unsigned long event, struct clock_event_device *evt_dev)
{
struct gxp_timer *timer = to_gxp_timer(evt_dev);
/* Stop counting and disable interrupt before updating */
writeb_relaxed(MASK_TCS_TC, timer->control);
writel_relaxed(event, timer->counter);
writeb_relaxed(MASK_TCS_TC | MASK_TCS_ENABLE, timer->control);
return 0;
}
static irqreturn_t gxp_timer_interrupt(int irq, void *dev_id)
{
struct gxp_timer *timer = (struct gxp_timer *)dev_id;
if (!(readb_relaxed(timer->control) & MASK_TCS_TC))
return IRQ_NONE;
writeb_relaxed(MASK_TCS_TC, timer->control);
timer->evt.event_handler(&timer->evt);
return IRQ_HANDLED;
}
static int __init gxp_timer_init(struct device_node *node)
{
void __iomem *base;
struct clk *clk;
u32 freq;
int ret, irq;
gxp_timer = kzalloc(sizeof(*gxp_timer), GFP_KERNEL);
if (!gxp_timer) {
ret = -ENOMEM;
pr_err("Can't allocate gxp_timer");
return ret;
}
clk = of_clk_get(node, 0);
if (IS_ERR(clk)) {
ret = (int)PTR_ERR(clk);
pr_err("%pOFn clock not found: %d\n", node, ret);
goto err_free;
}
ret = clk_prepare_enable(clk);
if (ret) {
pr_err("%pOFn clock enable failed: %d\n", node, ret);
goto err_clk_enable;
}
base = of_iomap(node, 0);
if (!base) {
ret = -ENXIO;
pr_err("Can't map timer base registers");
goto err_iomap;
}
/* Set the offsets to the clock register and timer registers */
gxp_timer->counter = base + GXP_TIMER_CNT_OFS;
gxp_timer->control = base + GXP_TIMER_CTRL_OFS;
system_clock = base + GXP_TIMESTAMP_OFS;
gxp_timer->evt.name = node->name;
gxp_timer->evt.rating = 300;
gxp_timer->evt.features = CLOCK_EVT_FEAT_ONESHOT;
gxp_timer->evt.set_next_event = gxp_time_set_next_event;
gxp_timer->evt.cpumask = cpumask_of(0);
irq = irq_of_parse_and_map(node, 0);
if (irq <= 0) {
ret = -EINVAL;
pr_err("GXP Timer Can't parse IRQ %d", irq);
goto err_exit;
}
freq = clk_get_rate(clk);
ret = clocksource_mmio_init(system_clock, node->name, freq,
300, 32, clocksource_mmio_readl_up);
if (ret) {
pr_err("%pOFn init clocksource failed: %d", node, ret);
goto err_exit;
}
sched_clock_register(gxp_sched_read, 32, freq);
irq = irq_of_parse_and_map(node, 0);
if (irq <= 0) {
ret = -EINVAL;
pr_err("%pOFn Can't parse IRQ %d", node, irq);
goto err_exit;
}
clockevents_config_and_register(&gxp_timer->evt, TIMER0_FREQ,
0xf, 0xffffffff);
ret = request_irq(irq, gxp_timer_interrupt, IRQF_TIMER | IRQF_SHARED,
node->name, gxp_timer);
if (ret) {
pr_err("%pOFn request_irq() failed: %d", node, ret);
goto err_exit;
}
pr_debug("gxp: system timer (irq = %d)\n", irq);
return 0;
err_exit:
iounmap(base);
err_iomap:
clk_disable_unprepare(clk);
err_clk_enable:
clk_put(clk);
err_free:
kfree(gxp_timer);
return ret;
}
/*
* This probe gets called after the timer is already up and running. This will create
* the watchdog device as a child since the registers are shared.
*/
static int gxp_timer_probe(struct platform_device *pdev)
{
struct platform_device *gxp_watchdog_device;
struct device *dev = &pdev->dev;
int ret;
if (!gxp_timer) {
pr_err("Gxp Timer not initialized, cannot create watchdog");
return -ENOMEM;
}
gxp_watchdog_device = platform_device_alloc("gxp-wdt", -1);
if (!gxp_watchdog_device) {
pr_err("Timer failed to allocate gxp-wdt");
return -ENOMEM;
}
/* Pass the base address (counter) as platform data and nothing else */
gxp_watchdog_device->dev.platform_data = gxp_timer->counter;
gxp_watchdog_device->dev.parent = dev;
ret = platform_device_add(gxp_watchdog_device);
if (ret)
platform_device_put(gxp_watchdog_device);
return ret;
}
static const struct of_device_id gxp_timer_of_match[] = {
{ .compatible = "hpe,gxp-timer", },
{},
};
static struct platform_driver gxp_timer_driver = {
.probe = gxp_timer_probe,
.driver = {
.name = "gxp-timer",
.of_match_table = gxp_timer_of_match,
.suppress_bind_attrs = true,
},
};
builtin_platform_driver(gxp_timer_driver);
TIMER_OF_DECLARE(gxp, "hpe,gxp-timer", gxp_timer_init);
|
linux-master
|
drivers/clocksource/timer-gxp.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2012 Regents of the University of California
* Copyright (C) 2017 SiFive
*
* All RISC-V systems have a timer attached to every hart. These timers can
* either be read from the "time" and "timeh" CSRs, and can use the SBI to
* setup events, or directly accessed using MMIO registers.
*/
#define pr_fmt(fmt) "riscv-timer: " fmt
#include <linux/acpi.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <linux/cpu.h>
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/module.h>
#include <linux/sched_clock.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/interrupt.h>
#include <linux/of_irq.h>
#include <clocksource/timer-riscv.h>
#include <asm/smp.h>
#include <asm/hwcap.h>
#include <asm/sbi.h>
#include <asm/timex.h>
static DEFINE_STATIC_KEY_FALSE(riscv_sstc_available);
static bool riscv_timer_cannot_wake_cpu;
static int riscv_clock_next_event(unsigned long delta,
struct clock_event_device *ce)
{
u64 next_tval = get_cycles64() + delta;
csr_set(CSR_IE, IE_TIE);
if (static_branch_likely(&riscv_sstc_available)) {
#if defined(CONFIG_32BIT)
csr_write(CSR_STIMECMP, next_tval & 0xFFFFFFFF);
csr_write(CSR_STIMECMPH, next_tval >> 32);
#else
csr_write(CSR_STIMECMP, next_tval);
#endif
} else
sbi_set_timer(next_tval);
return 0;
}
static unsigned int riscv_clock_event_irq;
static DEFINE_PER_CPU(struct clock_event_device, riscv_clock_event) = {
.name = "riscv_timer_clockevent",
.features = CLOCK_EVT_FEAT_ONESHOT,
.rating = 100,
.set_next_event = riscv_clock_next_event,
};
/*
* It is guaranteed that all the timers across all the harts are synchronized
* within one tick of each other, so while this could technically go
* backwards when hopping between CPUs, practically it won't happen.
*/
static unsigned long long riscv_clocksource_rdtime(struct clocksource *cs)
{
return get_cycles64();
}
static u64 notrace riscv_sched_clock(void)
{
return get_cycles64();
}
static struct clocksource riscv_clocksource = {
.name = "riscv_clocksource",
.rating = 400,
.mask = CLOCKSOURCE_MASK(64),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
.read = riscv_clocksource_rdtime,
#if IS_ENABLED(CONFIG_GENERIC_GETTIMEOFDAY)
.vdso_clock_mode = VDSO_CLOCKMODE_ARCHTIMER,
#else
.vdso_clock_mode = VDSO_CLOCKMODE_NONE,
#endif
};
static int riscv_timer_starting_cpu(unsigned int cpu)
{
struct clock_event_device *ce = per_cpu_ptr(&riscv_clock_event, cpu);
ce->cpumask = cpumask_of(cpu);
ce->irq = riscv_clock_event_irq;
if (riscv_timer_cannot_wake_cpu)
ce->features |= CLOCK_EVT_FEAT_C3STOP;
clockevents_config_and_register(ce, riscv_timebase, 100, 0x7fffffff);
enable_percpu_irq(riscv_clock_event_irq,
irq_get_trigger_type(riscv_clock_event_irq));
return 0;
}
static int riscv_timer_dying_cpu(unsigned int cpu)
{
disable_percpu_irq(riscv_clock_event_irq);
return 0;
}
void riscv_cs_get_mult_shift(u32 *mult, u32 *shift)
{
*mult = riscv_clocksource.mult;
*shift = riscv_clocksource.shift;
}
EXPORT_SYMBOL_GPL(riscv_cs_get_mult_shift);
/* called directly from the low-level interrupt handler */
static irqreturn_t riscv_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evdev = this_cpu_ptr(&riscv_clock_event);
csr_clear(CSR_IE, IE_TIE);
evdev->event_handler(evdev);
return IRQ_HANDLED;
}
static int __init riscv_timer_init_common(void)
{
int error;
struct irq_domain *domain;
struct fwnode_handle *intc_fwnode = riscv_get_intc_hwnode();
domain = irq_find_matching_fwnode(intc_fwnode, DOMAIN_BUS_ANY);
if (!domain) {
pr_err("Failed to find irq_domain for INTC node [%pfwP]\n",
intc_fwnode);
return -ENODEV;
}
riscv_clock_event_irq = irq_create_mapping(domain, RV_IRQ_TIMER);
if (!riscv_clock_event_irq) {
pr_err("Failed to map timer interrupt for node [%pfwP]\n", intc_fwnode);
return -ENODEV;
}
error = clocksource_register_hz(&riscv_clocksource, riscv_timebase);
if (error) {
pr_err("RISCV timer registration failed [%d]\n", error);
return error;
}
sched_clock_register(riscv_sched_clock, 64, riscv_timebase);
error = request_percpu_irq(riscv_clock_event_irq,
riscv_timer_interrupt,
"riscv-timer", &riscv_clock_event);
if (error) {
pr_err("registering percpu irq failed [%d]\n", error);
return error;
}
if (riscv_isa_extension_available(NULL, SSTC)) {
pr_info("Timer interrupt in S-mode is available via sstc extension\n");
static_branch_enable(&riscv_sstc_available);
}
error = cpuhp_setup_state(CPUHP_AP_RISCV_TIMER_STARTING,
"clockevents/riscv/timer:starting",
riscv_timer_starting_cpu, riscv_timer_dying_cpu);
if (error)
pr_err("cpu hp setup state failed for RISCV timer [%d]\n",
error);
return error;
}
static int __init riscv_timer_init_dt(struct device_node *n)
{
int cpuid, error;
unsigned long hartid;
struct device_node *child;
error = riscv_of_processor_hartid(n, &hartid);
if (error < 0) {
pr_warn("Invalid hartid for node [%pOF] error = [%lu]\n",
n, hartid);
return error;
}
cpuid = riscv_hartid_to_cpuid(hartid);
if (cpuid < 0) {
pr_warn("Invalid cpuid for hartid [%lu]\n", hartid);
return cpuid;
}
if (cpuid != smp_processor_id())
return 0;
child = of_find_compatible_node(NULL, NULL, "riscv,timer");
if (child) {
riscv_timer_cannot_wake_cpu = of_property_read_bool(child,
"riscv,timer-cannot-wake-cpu");
of_node_put(child);
}
return riscv_timer_init_common();
}
TIMER_OF_DECLARE(riscv_timer, "riscv", riscv_timer_init_dt);
#ifdef CONFIG_ACPI
static int __init riscv_timer_acpi_init(struct acpi_table_header *table)
{
return riscv_timer_init_common();
}
TIMER_ACPI_DECLARE(aclint_mtimer, ACPI_SIG_RHCT, riscv_timer_acpi_init);
#endif
|
linux-master
|
drivers/clocksource/timer-riscv.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Freescale FlexTimer Module (FTM) timer driver.
*
* Copyright 2014 Freescale Semiconductor, Inc.
*/
#include <linux/clk.h>
#include <linux/clockchips.h>
#include <linux/clocksource.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/sched_clock.h>
#include <linux/slab.h>
#include <linux/fsl/ftm.h>
#define FTM_SC_CLK(c) ((c) << FTM_SC_CLK_MASK_SHIFT)
struct ftm_clock_device {
void __iomem *clksrc_base;
void __iomem *clkevt_base;
unsigned long periodic_cyc;
unsigned long ps;
bool big_endian;
};
static struct ftm_clock_device *priv;
static inline u32 ftm_readl(void __iomem *addr)
{
if (priv->big_endian)
return ioread32be(addr);
else
return ioread32(addr);
}
static inline void ftm_writel(u32 val, void __iomem *addr)
{
if (priv->big_endian)
iowrite32be(val, addr);
else
iowrite32(val, addr);
}
static inline void ftm_counter_enable(void __iomem *base)
{
u32 val;
/* select and enable counter clock source */
val = ftm_readl(base + FTM_SC);
val &= ~(FTM_SC_PS_MASK | FTM_SC_CLK_MASK);
val |= priv->ps | FTM_SC_CLK(1);
ftm_writel(val, base + FTM_SC);
}
static inline void ftm_counter_disable(void __iomem *base)
{
u32 val;
/* disable counter clock source */
val = ftm_readl(base + FTM_SC);
val &= ~(FTM_SC_PS_MASK | FTM_SC_CLK_MASK);
ftm_writel(val, base + FTM_SC);
}
static inline void ftm_irq_acknowledge(void __iomem *base)
{
u32 val;
val = ftm_readl(base + FTM_SC);
val &= ~FTM_SC_TOF;
ftm_writel(val, base + FTM_SC);
}
static inline void ftm_irq_enable(void __iomem *base)
{
u32 val;
val = ftm_readl(base + FTM_SC);
val |= FTM_SC_TOIE;
ftm_writel(val, base + FTM_SC);
}
static inline void ftm_irq_disable(void __iomem *base)
{
u32 val;
val = ftm_readl(base + FTM_SC);
val &= ~FTM_SC_TOIE;
ftm_writel(val, base + FTM_SC);
}
static inline void ftm_reset_counter(void __iomem *base)
{
/*
* The CNT register contains the FTM counter value.
* Reset clears the CNT register. Writing any value to COUNT
* updates the counter with its initial value, CNTIN.
*/
ftm_writel(0x00, base + FTM_CNT);
}
static u64 notrace ftm_read_sched_clock(void)
{
return ftm_readl(priv->clksrc_base + FTM_CNT);
}
static int ftm_set_next_event(unsigned long delta,
struct clock_event_device *unused)
{
/*
* The CNNIN and MOD are all double buffer registers, writing
* to the MOD register latches the value into a buffer. The MOD
* register is updated with the value of its write buffer with
* the following scenario:
* a, the counter source clock is disabled.
*/
ftm_counter_disable(priv->clkevt_base);
/* Force the value of CNTIN to be loaded into the FTM counter */
ftm_reset_counter(priv->clkevt_base);
/*
* The counter increments until the value of MOD is reached,
* at which point the counter is reloaded with the value of CNTIN.
* The TOF (the overflow flag) bit is set when the FTM counter
* changes from MOD to CNTIN. So we should using the delta - 1.
*/
ftm_writel(delta - 1, priv->clkevt_base + FTM_MOD);
ftm_counter_enable(priv->clkevt_base);
ftm_irq_enable(priv->clkevt_base);
return 0;
}
static int ftm_set_oneshot(struct clock_event_device *evt)
{
ftm_counter_disable(priv->clkevt_base);
return 0;
}
static int ftm_set_periodic(struct clock_event_device *evt)
{
ftm_set_next_event(priv->periodic_cyc, evt);
return 0;
}
static irqreturn_t ftm_evt_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evt = dev_id;
ftm_irq_acknowledge(priv->clkevt_base);
if (likely(clockevent_state_oneshot(evt))) {
ftm_irq_disable(priv->clkevt_base);
ftm_counter_disable(priv->clkevt_base);
}
evt->event_handler(evt);
return IRQ_HANDLED;
}
static struct clock_event_device ftm_clockevent = {
.name = "Freescale ftm timer",
.features = CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_ONESHOT,
.set_state_periodic = ftm_set_periodic,
.set_state_oneshot = ftm_set_oneshot,
.set_next_event = ftm_set_next_event,
.rating = 300,
};
static int __init ftm_clockevent_init(unsigned long freq, int irq)
{
int err;
ftm_writel(0x00, priv->clkevt_base + FTM_CNTIN);
ftm_writel(~0u, priv->clkevt_base + FTM_MOD);
ftm_reset_counter(priv->clkevt_base);
err = request_irq(irq, ftm_evt_interrupt, IRQF_TIMER | IRQF_IRQPOLL,
"Freescale ftm timer", &ftm_clockevent);
if (err) {
pr_err("ftm: setup irq failed: %d\n", err);
return err;
}
ftm_clockevent.cpumask = cpumask_of(0);
ftm_clockevent.irq = irq;
clockevents_config_and_register(&ftm_clockevent,
freq / (1 << priv->ps),
1, 0xffff);
ftm_counter_enable(priv->clkevt_base);
return 0;
}
static int __init ftm_clocksource_init(unsigned long freq)
{
int err;
ftm_writel(0x00, priv->clksrc_base + FTM_CNTIN);
ftm_writel(~0u, priv->clksrc_base + FTM_MOD);
ftm_reset_counter(priv->clksrc_base);
sched_clock_register(ftm_read_sched_clock, 16, freq / (1 << priv->ps));
err = clocksource_mmio_init(priv->clksrc_base + FTM_CNT, "fsl-ftm",
freq / (1 << priv->ps), 300, 16,
clocksource_mmio_readl_up);
if (err) {
pr_err("ftm: init clock source mmio failed: %d\n", err);
return err;
}
ftm_counter_enable(priv->clksrc_base);
return 0;
}
static int __init __ftm_clk_init(struct device_node *np, char *cnt_name,
char *ftm_name)
{
struct clk *clk;
int err;
clk = of_clk_get_by_name(np, cnt_name);
if (IS_ERR(clk)) {
pr_err("ftm: Cannot get \"%s\": %ld\n", cnt_name, PTR_ERR(clk));
return PTR_ERR(clk);
}
err = clk_prepare_enable(clk);
if (err) {
pr_err("ftm: clock failed to prepare+enable \"%s\": %d\n",
cnt_name, err);
return err;
}
clk = of_clk_get_by_name(np, ftm_name);
if (IS_ERR(clk)) {
pr_err("ftm: Cannot get \"%s\": %ld\n", ftm_name, PTR_ERR(clk));
return PTR_ERR(clk);
}
err = clk_prepare_enable(clk);
if (err)
pr_err("ftm: clock failed to prepare+enable \"%s\": %d\n",
ftm_name, err);
return clk_get_rate(clk);
}
static unsigned long __init ftm_clk_init(struct device_node *np)
{
long freq;
freq = __ftm_clk_init(np, "ftm-evt-counter-en", "ftm-evt");
if (freq <= 0)
return 0;
freq = __ftm_clk_init(np, "ftm-src-counter-en", "ftm-src");
if (freq <= 0)
return 0;
return freq;
}
static int __init ftm_calc_closest_round_cyc(unsigned long freq)
{
priv->ps = 0;
/* The counter register is only using the lower 16 bits, and
* if the 'freq' value is to big here, then the periodic_cyc
* may exceed 0xFFFF.
*/
do {
priv->periodic_cyc = DIV_ROUND_CLOSEST(freq,
HZ * (1 << priv->ps++));
} while (priv->periodic_cyc > 0xFFFF);
if (priv->ps > FTM_PS_MAX) {
pr_err("ftm: the prescaler is %lu > %d\n",
priv->ps, FTM_PS_MAX);
return -EINVAL;
}
return 0;
}
static int __init ftm_timer_init(struct device_node *np)
{
unsigned long freq;
int ret, irq;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
ret = -ENXIO;
priv->clkevt_base = of_iomap(np, 0);
if (!priv->clkevt_base) {
pr_err("ftm: unable to map event timer registers\n");
goto err_clkevt;
}
priv->clksrc_base = of_iomap(np, 1);
if (!priv->clksrc_base) {
pr_err("ftm: unable to map source timer registers\n");
goto err_clksrc;
}
ret = -EINVAL;
irq = irq_of_parse_and_map(np, 0);
if (irq <= 0) {
pr_err("ftm: unable to get IRQ from DT, %d\n", irq);
goto err;
}
priv->big_endian = of_property_read_bool(np, "big-endian");
freq = ftm_clk_init(np);
if (!freq)
goto err;
ret = ftm_calc_closest_round_cyc(freq);
if (ret)
goto err;
ret = ftm_clocksource_init(freq);
if (ret)
goto err;
ret = ftm_clockevent_init(freq, irq);
if (ret)
goto err;
return 0;
err:
iounmap(priv->clksrc_base);
err_clksrc:
iounmap(priv->clkevt_base);
err_clkevt:
kfree(priv);
return ret;
}
TIMER_OF_DECLARE(flextimer, "fsl,ftm-timer", ftm_timer_init);
|
linux-master
|
drivers/clocksource/timer-fsl-ftm.c
|
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2012 Simon Arlott
*/
#include <linux/bitops.h>
#include <linux/clockchips.h>
#include <linux/clocksource.h>
#include <linux/interrupt.h>
#include <linux/irqreturn.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/sched_clock.h>
#include <asm/irq.h>
#define REG_CONTROL 0x00
#define REG_COUNTER_LO 0x04
#define REG_COUNTER_HI 0x08
#define REG_COMPARE(n) (0x0c + (n) * 4)
#define MAX_TIMER 3
#define DEFAULT_TIMER 3
struct bcm2835_timer {
void __iomem *control;
void __iomem *compare;
int match_mask;
struct clock_event_device evt;
};
static void __iomem *system_clock __read_mostly;
static u64 notrace bcm2835_sched_read(void)
{
return readl_relaxed(system_clock);
}
static int bcm2835_time_set_next_event(unsigned long event,
struct clock_event_device *evt_dev)
{
struct bcm2835_timer *timer = container_of(evt_dev,
struct bcm2835_timer, evt);
writel_relaxed(readl_relaxed(system_clock) + event,
timer->compare);
return 0;
}
static irqreturn_t bcm2835_time_interrupt(int irq, void *dev_id)
{
struct bcm2835_timer *timer = dev_id;
void (*event_handler)(struct clock_event_device *);
if (readl_relaxed(timer->control) & timer->match_mask) {
writel_relaxed(timer->match_mask, timer->control);
event_handler = READ_ONCE(timer->evt.event_handler);
if (event_handler)
event_handler(&timer->evt);
return IRQ_HANDLED;
} else {
return IRQ_NONE;
}
}
static int __init bcm2835_timer_init(struct device_node *node)
{
void __iomem *base;
u32 freq;
int irq, ret;
struct bcm2835_timer *timer;
base = of_iomap(node, 0);
if (!base) {
pr_err("Can't remap registers\n");
return -ENXIO;
}
ret = of_property_read_u32(node, "clock-frequency", &freq);
if (ret) {
pr_err("Can't read clock-frequency\n");
goto err_iounmap;
}
system_clock = base + REG_COUNTER_LO;
sched_clock_register(bcm2835_sched_read, 32, freq);
clocksource_mmio_init(base + REG_COUNTER_LO, node->name,
freq, 300, 32, clocksource_mmio_readl_up);
irq = irq_of_parse_and_map(node, DEFAULT_TIMER);
if (irq <= 0) {
pr_err("Can't parse IRQ\n");
ret = -EINVAL;
goto err_iounmap;
}
timer = kzalloc(sizeof(*timer), GFP_KERNEL);
if (!timer) {
ret = -ENOMEM;
goto err_iounmap;
}
timer->control = base + REG_CONTROL;
timer->compare = base + REG_COMPARE(DEFAULT_TIMER);
timer->match_mask = BIT(DEFAULT_TIMER);
timer->evt.name = node->name;
timer->evt.rating = 300;
timer->evt.features = CLOCK_EVT_FEAT_ONESHOT;
timer->evt.set_next_event = bcm2835_time_set_next_event;
timer->evt.cpumask = cpumask_of(0);
ret = request_irq(irq, bcm2835_time_interrupt, IRQF_TIMER | IRQF_SHARED,
node->name, timer);
if (ret) {
pr_err("Can't set up timer IRQ\n");
goto err_timer_free;
}
clockevents_config_and_register(&timer->evt, freq, 0xf, 0xffffffff);
pr_info("bcm2835: system timer (irq = %d)\n", irq);
return 0;
err_timer_free:
kfree(timer);
err_iounmap:
iounmap(base);
return ret;
}
TIMER_OF_DECLARE(bcm2835, "brcm,bcm2835-system-timer",
bcm2835_timer_init);
|
linux-master
|
drivers/clocksource/bcm2835_timer.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* Pistachio clocksource based on general-purpose timers
*
* Copyright (C) 2015 Imagination Technologies
*/
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/clk.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/mfd/syscon.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/sched_clock.h>
#include <linux/time.h>
/* Top level reg */
#define CR_TIMER_CTRL_CFG 0x00
#define TIMER_ME_GLOBAL BIT(0)
#define CR_TIMER_REV 0x10
/* Timer specific registers */
#define TIMER_CFG 0x20
#define TIMER_ME_LOCAL BIT(0)
#define TIMER_RELOAD_VALUE 0x24
#define TIMER_CURRENT_VALUE 0x28
#define TIMER_CURRENT_OVERFLOW_VALUE 0x2C
#define TIMER_IRQ_STATUS 0x30
#define TIMER_IRQ_CLEAR 0x34
#define TIMER_IRQ_MASK 0x38
#define PERIP_TIMER_CONTROL 0x90
/* Timer specific configuration Values */
#define RELOAD_VALUE 0xffffffff
struct pistachio_clocksource {
void __iomem *base;
raw_spinlock_t lock;
struct clocksource cs;
};
static struct pistachio_clocksource pcs_gpt;
#define to_pistachio_clocksource(cs) \
container_of(cs, struct pistachio_clocksource, cs)
static inline u32 gpt_readl(void __iomem *base, u32 offset, u32 gpt_id)
{
return readl(base + 0x20 * gpt_id + offset);
}
static inline void gpt_writel(void __iomem *base, u32 value, u32 offset,
u32 gpt_id)
{
writel(value, base + 0x20 * gpt_id + offset);
}
static u64 notrace
pistachio_clocksource_read_cycles(struct clocksource *cs)
{
struct pistachio_clocksource *pcs = to_pistachio_clocksource(cs);
__maybe_unused u32 overflow;
u32 counter;
unsigned long flags;
/*
* The counter value is only refreshed after the overflow value is read.
* And they must be read in strict order, hence raw spin lock added.
*/
raw_spin_lock_irqsave(&pcs->lock, flags);
overflow = gpt_readl(pcs->base, TIMER_CURRENT_OVERFLOW_VALUE, 0);
counter = gpt_readl(pcs->base, TIMER_CURRENT_VALUE, 0);
raw_spin_unlock_irqrestore(&pcs->lock, flags);
return (u64)~counter;
}
static u64 notrace pistachio_read_sched_clock(void)
{
return pistachio_clocksource_read_cycles(&pcs_gpt.cs);
}
static void pistachio_clksrc_set_mode(struct clocksource *cs, int timeridx,
int enable)
{
struct pistachio_clocksource *pcs = to_pistachio_clocksource(cs);
u32 val;
val = gpt_readl(pcs->base, TIMER_CFG, timeridx);
if (enable)
val |= TIMER_ME_LOCAL;
else
val &= ~TIMER_ME_LOCAL;
gpt_writel(pcs->base, val, TIMER_CFG, timeridx);
}
static void pistachio_clksrc_enable(struct clocksource *cs, int timeridx)
{
struct pistachio_clocksource *pcs = to_pistachio_clocksource(cs);
/* Disable GPT local before loading reload value */
pistachio_clksrc_set_mode(cs, timeridx, false);
gpt_writel(pcs->base, RELOAD_VALUE, TIMER_RELOAD_VALUE, timeridx);
pistachio_clksrc_set_mode(cs, timeridx, true);
}
static void pistachio_clksrc_disable(struct clocksource *cs, int timeridx)
{
/* Disable GPT local */
pistachio_clksrc_set_mode(cs, timeridx, false);
}
static int pistachio_clocksource_enable(struct clocksource *cs)
{
pistachio_clksrc_enable(cs, 0);
return 0;
}
static void pistachio_clocksource_disable(struct clocksource *cs)
{
pistachio_clksrc_disable(cs, 0);
}
/* Desirable clock source for pistachio platform */
static struct pistachio_clocksource pcs_gpt = {
.cs = {
.name = "gptimer",
.rating = 300,
.enable = pistachio_clocksource_enable,
.disable = pistachio_clocksource_disable,
.read = pistachio_clocksource_read_cycles,
.mask = CLOCKSOURCE_MASK(32),
.flags = CLOCK_SOURCE_IS_CONTINUOUS |
CLOCK_SOURCE_SUSPEND_NONSTOP,
},
};
static int __init pistachio_clksrc_of_init(struct device_node *node)
{
struct clk *sys_clk, *fast_clk;
struct regmap *periph_regs;
unsigned long rate;
int ret;
pcs_gpt.base = of_iomap(node, 0);
if (!pcs_gpt.base) {
pr_err("cannot iomap\n");
return -ENXIO;
}
periph_regs = syscon_regmap_lookup_by_phandle(node, "img,cr-periph");
if (IS_ERR(periph_regs)) {
pr_err("cannot get peripheral regmap (%ld)\n",
PTR_ERR(periph_regs));
return PTR_ERR(periph_regs);
}
/* Switch to using the fast counter clock */
ret = regmap_update_bits(periph_regs, PERIP_TIMER_CONTROL,
0xf, 0x0);
if (ret)
return ret;
sys_clk = of_clk_get_by_name(node, "sys");
if (IS_ERR(sys_clk)) {
pr_err("clock get failed (%ld)\n", PTR_ERR(sys_clk));
return PTR_ERR(sys_clk);
}
fast_clk = of_clk_get_by_name(node, "fast");
if (IS_ERR(fast_clk)) {
pr_err("clock get failed (%lu)\n", PTR_ERR(fast_clk));
return PTR_ERR(fast_clk);
}
ret = clk_prepare_enable(sys_clk);
if (ret < 0) {
pr_err("failed to enable clock (%d)\n", ret);
return ret;
}
ret = clk_prepare_enable(fast_clk);
if (ret < 0) {
pr_err("failed to enable clock (%d)\n", ret);
clk_disable_unprepare(sys_clk);
return ret;
}
rate = clk_get_rate(fast_clk);
/* Disable irq's for clocksource usage */
gpt_writel(pcs_gpt.base, 0, TIMER_IRQ_MASK, 0);
gpt_writel(pcs_gpt.base, 0, TIMER_IRQ_MASK, 1);
gpt_writel(pcs_gpt.base, 0, TIMER_IRQ_MASK, 2);
gpt_writel(pcs_gpt.base, 0, TIMER_IRQ_MASK, 3);
/* Enable timer block */
writel(TIMER_ME_GLOBAL, pcs_gpt.base);
raw_spin_lock_init(&pcs_gpt.lock);
sched_clock_register(pistachio_read_sched_clock, 32, rate);
return clocksource_register_hz(&pcs_gpt.cs, rate);
}
TIMER_OF_DECLARE(pistachio_gptimer, "img,pistachio-gptimer",
pistachio_clksrc_of_init);
|
linux-master
|
drivers/clocksource/timer-pistachio.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2008 STMicroelectronics
* Copyright (C) 2010 Alessandro Rubini
* Copyright (C) 2010 Linus Walleij for ST-Ericsson
*/
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/clockchips.h>
#include <linux/clocksource.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/clk.h>
#include <linux/jiffies.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/sched_clock.h>
#include <asm/mach/time.h>
/*
* The MTU device hosts four different counters, with 4 set of
* registers. These are register names.
*/
#define MTU_IMSC 0x00 /* Interrupt mask set/clear */
#define MTU_RIS 0x04 /* Raw interrupt status */
#define MTU_MIS 0x08 /* Masked interrupt status */
#define MTU_ICR 0x0C /* Interrupt clear register */
/* per-timer registers take 0..3 as argument */
#define MTU_LR(x) (0x10 + 0x10 * (x) + 0x00) /* Load value */
#define MTU_VAL(x) (0x10 + 0x10 * (x) + 0x04) /* Current value */
#define MTU_CR(x) (0x10 + 0x10 * (x) + 0x08) /* Control reg */
#define MTU_BGLR(x) (0x10 + 0x10 * (x) + 0x0c) /* At next overflow */
/* bits for the control register */
#define MTU_CRn_ENA 0x80
#define MTU_CRn_PERIODIC 0x40 /* if 0 = free-running */
#define MTU_CRn_PRESCALE_MASK 0x0c
#define MTU_CRn_PRESCALE_1 0x00
#define MTU_CRn_PRESCALE_16 0x04
#define MTU_CRn_PRESCALE_256 0x08
#define MTU_CRn_32BITS 0x02
#define MTU_CRn_ONESHOT 0x01 /* if 0 = wraps reloading from BGLR*/
/* Other registers are usual amba/primecell registers, currently not used */
#define MTU_ITCR 0xff0
#define MTU_ITOP 0xff4
#define MTU_PERIPH_ID0 0xfe0
#define MTU_PERIPH_ID1 0xfe4
#define MTU_PERIPH_ID2 0xfe8
#define MTU_PERIPH_ID3 0xfeC
#define MTU_PCELL0 0xff0
#define MTU_PCELL1 0xff4
#define MTU_PCELL2 0xff8
#define MTU_PCELL3 0xffC
static void __iomem *mtu_base;
static bool clkevt_periodic;
static u32 clk_prescale;
static u32 nmdk_cycle; /* write-once */
static struct delay_timer mtu_delay_timer;
/*
* Override the global weak sched_clock symbol with this
* local implementation which uses the clocksource to get some
* better resolution when scheduling the kernel.
*/
static u64 notrace nomadik_read_sched_clock(void)
{
if (unlikely(!mtu_base))
return 0;
return -readl(mtu_base + MTU_VAL(0));
}
static unsigned long nmdk_timer_read_current_timer(void)
{
return ~readl_relaxed(mtu_base + MTU_VAL(0));
}
/* Clockevent device: use one-shot mode */
static int nmdk_clkevt_next(unsigned long evt, struct clock_event_device *ev)
{
writel(1 << 1, mtu_base + MTU_IMSC);
writel(evt, mtu_base + MTU_LR(1));
/* Load highest value, enable device, enable interrupts */
writel(MTU_CRn_ONESHOT | clk_prescale |
MTU_CRn_32BITS | MTU_CRn_ENA,
mtu_base + MTU_CR(1));
return 0;
}
static void nmdk_clkevt_reset(void)
{
if (clkevt_periodic) {
/* Timer: configure load and background-load, and fire it up */
writel(nmdk_cycle, mtu_base + MTU_LR(1));
writel(nmdk_cycle, mtu_base + MTU_BGLR(1));
writel(MTU_CRn_PERIODIC | clk_prescale |
MTU_CRn_32BITS | MTU_CRn_ENA,
mtu_base + MTU_CR(1));
writel(1 << 1, mtu_base + MTU_IMSC);
} else {
/* Generate an interrupt to start the clockevent again */
(void) nmdk_clkevt_next(nmdk_cycle, NULL);
}
}
static int nmdk_clkevt_shutdown(struct clock_event_device *evt)
{
writel(0, mtu_base + MTU_IMSC);
/* disable timer */
writel(0, mtu_base + MTU_CR(1));
/* load some high default value */
writel(0xffffffff, mtu_base + MTU_LR(1));
return 0;
}
static int nmdk_clkevt_set_oneshot(struct clock_event_device *evt)
{
clkevt_periodic = false;
return 0;
}
static int nmdk_clkevt_set_periodic(struct clock_event_device *evt)
{
clkevt_periodic = true;
nmdk_clkevt_reset();
return 0;
}
static void nmdk_clksrc_reset(void)
{
/* Disable */
writel(0, mtu_base + MTU_CR(0));
/* ClockSource: configure load and background-load, and fire it up */
writel(nmdk_cycle, mtu_base + MTU_LR(0));
writel(nmdk_cycle, mtu_base + MTU_BGLR(0));
writel(clk_prescale | MTU_CRn_32BITS | MTU_CRn_ENA,
mtu_base + MTU_CR(0));
}
static void nmdk_clkevt_resume(struct clock_event_device *cedev)
{
nmdk_clkevt_reset();
nmdk_clksrc_reset();
}
static struct clock_event_device nmdk_clkevt = {
.name = "mtu_1",
.features = CLOCK_EVT_FEAT_ONESHOT |
CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_DYNIRQ,
.rating = 200,
.set_state_shutdown = nmdk_clkevt_shutdown,
.set_state_periodic = nmdk_clkevt_set_periodic,
.set_state_oneshot = nmdk_clkevt_set_oneshot,
.set_next_event = nmdk_clkevt_next,
.resume = nmdk_clkevt_resume,
};
/*
* IRQ Handler for timer 1 of the MTU block.
*/
static irqreturn_t nmdk_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evdev = dev_id;
writel(1 << 1, mtu_base + MTU_ICR); /* Interrupt clear reg */
evdev->event_handler(evdev);
return IRQ_HANDLED;
}
static int __init nmdk_timer_init(void __iomem *base, int irq,
struct clk *pclk, struct clk *clk)
{
unsigned long rate;
int ret;
int min_ticks;
mtu_base = base;
BUG_ON(clk_prepare_enable(pclk));
BUG_ON(clk_prepare_enable(clk));
/*
* Tick rate is 2.4MHz for Nomadik and 2.4Mhz, 100MHz or 133 MHz
* for ux500, and in one specific Ux500 case 32768 Hz.
*
* Use a divide-by-16 counter if the tick rate is more than 32MHz.
* At 32 MHz, the timer (with 32 bit counter) can be programmed
* to wake-up at a max 127s a head in time. Dividing a 2.4 MHz timer
* with 16 gives too low timer resolution.
*/
rate = clk_get_rate(clk);
if (rate > 32000000) {
rate /= 16;
clk_prescale = MTU_CRn_PRESCALE_16;
} else {
clk_prescale = MTU_CRn_PRESCALE_1;
}
/* Cycles for periodic mode */
nmdk_cycle = DIV_ROUND_CLOSEST(rate, HZ);
/* Timer 0 is the free running clocksource */
nmdk_clksrc_reset();
ret = clocksource_mmio_init(mtu_base + MTU_VAL(0), "mtu_0",
rate, 200, 32, clocksource_mmio_readl_down);
if (ret) {
pr_err("timer: failed to initialize clock source %s\n", "mtu_0");
return ret;
}
sched_clock_register(nomadik_read_sched_clock, 32, rate);
/* Timer 1 is used for events, register irq and clockevents */
if (request_irq(irq, nmdk_timer_interrupt, IRQF_TIMER,
"Nomadik Timer Tick", &nmdk_clkevt))
pr_err("%s: request_irq() failed\n", "Nomadik Timer Tick");
nmdk_clkevt.cpumask = cpumask_of(0);
nmdk_clkevt.irq = irq;
if (rate < 100000)
min_ticks = 5;
else
min_ticks = 2;
clockevents_config_and_register(&nmdk_clkevt, rate, min_ticks,
0xffffffffU);
mtu_delay_timer.read_current_timer = &nmdk_timer_read_current_timer;
mtu_delay_timer.freq = rate;
register_current_timer_delay(&mtu_delay_timer);
return 0;
}
static int __init nmdk_timer_of_init(struct device_node *node)
{
struct clk *pclk;
struct clk *clk;
void __iomem *base;
int irq;
base = of_iomap(node, 0);
if (!base) {
pr_err("Can't remap registers\n");
return -ENXIO;
}
pclk = of_clk_get_by_name(node, "apb_pclk");
if (IS_ERR(pclk)) {
pr_err("could not get apb_pclk\n");
return PTR_ERR(pclk);
}
clk = of_clk_get_by_name(node, "timclk");
if (IS_ERR(clk)) {
pr_err("could not get timclk\n");
return PTR_ERR(clk);
}
irq = irq_of_parse_and_map(node, 0);
if (irq <= 0) {
pr_err("Can't parse IRQ\n");
return -EINVAL;
}
return nmdk_timer_init(base, irq, pclk, clk);
}
TIMER_OF_DECLARE(nomadik_mtu, "st,nomadik-mtu",
nmdk_timer_of_init);
|
linux-master
|
drivers/clocksource/nomadik-mtu.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Integrator/AP timer driver
* Copyright (C) 2000-2003 Deep Blue Solutions Ltd
* Copyright (c) 2014, Linaro Limited
*/
#include <linux/clk.h>
#include <linux/clocksource.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/sched_clock.h>
#include "timer-sp.h"
static void __iomem * sched_clk_base;
static u64 notrace integrator_read_sched_clock(void)
{
return -readl(sched_clk_base + TIMER_VALUE);
}
static int __init integrator_clocksource_init(unsigned long inrate,
void __iomem *base)
{
u32 ctrl = TIMER_CTRL_ENABLE | TIMER_CTRL_PERIODIC;
unsigned long rate = inrate;
int ret;
if (rate >= 1500000) {
rate /= 16;
ctrl |= TIMER_CTRL_DIV16;
}
writel(0xffff, base + TIMER_LOAD);
writel(ctrl, base + TIMER_CTRL);
ret = clocksource_mmio_init(base + TIMER_VALUE, "timer2",
rate, 200, 16, clocksource_mmio_readl_down);
if (ret)
return ret;
sched_clk_base = base;
sched_clock_register(integrator_read_sched_clock, 16, rate);
return 0;
}
static unsigned long timer_reload;
static void __iomem * clkevt_base;
/*
* IRQ handler for the timer
*/
static irqreturn_t integrator_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evt = dev_id;
/* clear the interrupt */
writel(1, clkevt_base + TIMER_INTCLR);
evt->event_handler(evt);
return IRQ_HANDLED;
}
static int clkevt_shutdown(struct clock_event_device *evt)
{
u32 ctrl = readl(clkevt_base + TIMER_CTRL) & ~TIMER_CTRL_ENABLE;
/* Disable timer */
writel(ctrl, clkevt_base + TIMER_CTRL);
return 0;
}
static int clkevt_set_oneshot(struct clock_event_device *evt)
{
u32 ctrl = readl(clkevt_base + TIMER_CTRL) &
~(TIMER_CTRL_ENABLE | TIMER_CTRL_PERIODIC);
/* Leave the timer disabled, .set_next_event will enable it */
writel(ctrl, clkevt_base + TIMER_CTRL);
return 0;
}
static int clkevt_set_periodic(struct clock_event_device *evt)
{
u32 ctrl = readl(clkevt_base + TIMER_CTRL) & ~TIMER_CTRL_ENABLE;
/* Disable timer */
writel(ctrl, clkevt_base + TIMER_CTRL);
/* Enable the timer and start the periodic tick */
writel(timer_reload, clkevt_base + TIMER_LOAD);
ctrl |= TIMER_CTRL_PERIODIC | TIMER_CTRL_ENABLE;
writel(ctrl, clkevt_base + TIMER_CTRL);
return 0;
}
static int clkevt_set_next_event(unsigned long next, struct clock_event_device *evt)
{
unsigned long ctrl = readl(clkevt_base + TIMER_CTRL);
writel(ctrl & ~TIMER_CTRL_ENABLE, clkevt_base + TIMER_CTRL);
writel(next, clkevt_base + TIMER_LOAD);
writel(ctrl | TIMER_CTRL_ENABLE, clkevt_base + TIMER_CTRL);
return 0;
}
static struct clock_event_device integrator_clockevent = {
.name = "timer1",
.features = CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_ONESHOT,
.set_state_shutdown = clkevt_shutdown,
.set_state_periodic = clkevt_set_periodic,
.set_state_oneshot = clkevt_set_oneshot,
.tick_resume = clkevt_shutdown,
.set_next_event = clkevt_set_next_event,
.rating = 300,
};
static int integrator_clockevent_init(unsigned long inrate,
void __iomem *base, int irq)
{
unsigned long rate = inrate;
unsigned int ctrl = 0;
int ret;
clkevt_base = base;
/* Calculate and program a divisor */
if (rate > 0x100000 * HZ) {
rate /= 256;
ctrl |= TIMER_CTRL_DIV256;
} else if (rate > 0x10000 * HZ) {
rate /= 16;
ctrl |= TIMER_CTRL_DIV16;
}
timer_reload = rate / HZ;
writel(ctrl, clkevt_base + TIMER_CTRL);
ret = request_irq(irq, integrator_timer_interrupt,
IRQF_TIMER | IRQF_IRQPOLL, "timer",
&integrator_clockevent);
if (ret)
return ret;
clockevents_config_and_register(&integrator_clockevent,
rate,
1,
0xffffU);
return 0;
}
static int __init integrator_ap_timer_init_of(struct device_node *node)
{
const char *path;
void __iomem *base;
int err;
int irq;
struct clk *clk;
unsigned long rate;
struct device_node *alias_node;
base = of_io_request_and_map(node, 0, "integrator-timer");
if (IS_ERR(base))
return PTR_ERR(base);
clk = of_clk_get(node, 0);
if (IS_ERR(clk)) {
pr_err("No clock for %pOFn\n", node);
return PTR_ERR(clk);
}
clk_prepare_enable(clk);
rate = clk_get_rate(clk);
writel(0, base + TIMER_CTRL);
err = of_property_read_string(of_aliases,
"arm,timer-primary", &path);
if (err) {
pr_warn("Failed to read property\n");
return err;
}
alias_node = of_find_node_by_path(path);
/*
* The pointer is used as an identifier not as a pointer, we
* can drop the refcount on the of__node immediately after
* getting it.
*/
of_node_put(alias_node);
if (node == alias_node)
/* The primary timer lacks IRQ, use as clocksource */
return integrator_clocksource_init(rate, base);
err = of_property_read_string(of_aliases,
"arm,timer-secondary", &path);
if (err) {
pr_warn("Failed to read property\n");
return err;
}
alias_node = of_find_node_by_path(path);
of_node_put(alias_node);
if (node == alias_node) {
/* The secondary timer will drive the clock event */
irq = irq_of_parse_and_map(node, 0);
return integrator_clockevent_init(rate, base, irq);
}
pr_info("Timer @%p unused\n", base);
clk_disable_unprepare(clk);
return 0;
}
TIMER_OF_DECLARE(integrator_ap_timer, "arm,integrator-timer",
integrator_ap_timer_init_of);
|
linux-master
|
drivers/clocksource/timer-integrator-ap.c
|
// SPDX-License-Identifier: GPL-2.0+
#include <linux/clk.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <linux/cpuhotplug.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/err.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/sched_clock.h>
#include <linux/clk/clk-conf.h>
#include <clocksource/timer-ti-dm.h>
#include <dt-bindings/bus/ti-sysc.h>
/* For type1, set SYSC_OMAP2_CLOCKACTIVITY for fck off on idle, l4 clock on */
#define DMTIMER_TYPE1_ENABLE ((1 << 9) | (SYSC_IDLE_SMART << 3) | \
SYSC_OMAP2_ENAWAKEUP | SYSC_OMAP2_AUTOIDLE)
#define DMTIMER_TYPE1_DISABLE (SYSC_OMAP2_SOFTRESET | SYSC_OMAP2_AUTOIDLE)
#define DMTIMER_TYPE2_ENABLE (SYSC_IDLE_SMART_WKUP << 2)
#define DMTIMER_RESET_WAIT 100000
#define DMTIMER_INST_DONT_CARE ~0U
static int counter_32k;
static u32 clocksource;
static u32 clockevent;
/*
* Subset of the timer registers we use. Note that the register offsets
* depend on the timer revision detected.
*/
struct dmtimer_systimer {
void __iomem *base;
u8 sysc;
u8 irq_stat;
u8 irq_ena;
u8 pend;
u8 load;
u8 counter;
u8 ctrl;
u8 wakeup;
u8 ifctrl;
struct clk *fck;
struct clk *ick;
unsigned long rate;
};
struct dmtimer_clockevent {
struct clock_event_device dev;
struct dmtimer_systimer t;
u32 period;
};
struct dmtimer_clocksource {
struct clocksource dev;
struct dmtimer_systimer t;
unsigned int loadval;
};
/* Assumes v1 ip if bits [31:16] are zero */
static bool dmtimer_systimer_revision1(struct dmtimer_systimer *t)
{
u32 tidr = readl_relaxed(t->base);
return !(tidr >> 16);
}
static void dmtimer_systimer_enable(struct dmtimer_systimer *t)
{
u32 val;
if (dmtimer_systimer_revision1(t))
val = DMTIMER_TYPE1_ENABLE;
else
val = DMTIMER_TYPE2_ENABLE;
writel_relaxed(val, t->base + t->sysc);
}
static void dmtimer_systimer_disable(struct dmtimer_systimer *t)
{
if (!dmtimer_systimer_revision1(t))
return;
writel_relaxed(DMTIMER_TYPE1_DISABLE, t->base + t->sysc);
}
static int __init dmtimer_systimer_type1_reset(struct dmtimer_systimer *t)
{
void __iomem *syss = t->base + OMAP_TIMER_V1_SYS_STAT_OFFSET;
int ret;
u32 l;
dmtimer_systimer_enable(t);
writel_relaxed(BIT(1) | BIT(2), t->base + t->ifctrl);
ret = readl_poll_timeout_atomic(syss, l, l & BIT(0), 100,
DMTIMER_RESET_WAIT);
return ret;
}
/* Note we must use io_base instead of func_base for type2 OCP regs */
static int __init dmtimer_systimer_type2_reset(struct dmtimer_systimer *t)
{
void __iomem *sysc = t->base + t->sysc;
u32 l;
dmtimer_systimer_enable(t);
l = readl_relaxed(sysc);
l |= BIT(0);
writel_relaxed(l, sysc);
return readl_poll_timeout_atomic(sysc, l, !(l & BIT(0)), 100,
DMTIMER_RESET_WAIT);
}
static int __init dmtimer_systimer_reset(struct dmtimer_systimer *t)
{
int ret;
if (dmtimer_systimer_revision1(t))
ret = dmtimer_systimer_type1_reset(t);
else
ret = dmtimer_systimer_type2_reset(t);
if (ret < 0) {
pr_err("%s failed with %i\n", __func__, ret);
return ret;
}
return 0;
}
static const struct of_device_id counter_match_table[] = {
{ .compatible = "ti,omap-counter32k" },
{ /* Sentinel */ },
};
/*
* Check if the SoC als has a usable working 32 KiHz counter. The 32 KiHz
* counter is handled by timer-ti-32k, but we need to detect it as it
* affects the preferred dmtimer system timer configuration. There is
* typically no use for a dmtimer clocksource if the 32 KiHz counter is
* present, except on am437x as described below.
*/
static void __init dmtimer_systimer_check_counter32k(void)
{
struct device_node *np;
if (counter_32k)
return;
np = of_find_matching_node(NULL, counter_match_table);
if (!np) {
counter_32k = -ENODEV;
return;
}
if (of_device_is_available(np))
counter_32k = 1;
else
counter_32k = -ENODEV;
of_node_put(np);
}
static const struct of_device_id dmtimer_match_table[] = {
{ .compatible = "ti,omap2420-timer", },
{ .compatible = "ti,omap3430-timer", },
{ .compatible = "ti,omap4430-timer", },
{ .compatible = "ti,omap5430-timer", },
{ .compatible = "ti,am335x-timer", },
{ .compatible = "ti,am335x-timer-1ms", },
{ .compatible = "ti,dm814-timer", },
{ .compatible = "ti,dm816-timer", },
{ /* Sentinel */ },
};
/*
* Checks that system timers are configured to not reset and idle during
* the generic timer-ti-dm device driver probe. And that the system timer
* source clocks are properly configured. Also, let's not hog any DSP and
* PWM capable timers unnecessarily as system timers.
*/
static bool __init dmtimer_is_preferred(struct device_node *np)
{
if (!of_device_is_available(np))
return false;
if (!of_property_read_bool(np->parent,
"ti,no-reset-on-init"))
return false;
if (!of_property_read_bool(np->parent, "ti,no-idle"))
return false;
/* Secure gptimer12 is always clocked with a fixed source */
if (!of_property_read_bool(np, "ti,timer-secure")) {
if (!of_property_read_bool(np, "assigned-clocks"))
return false;
if (!of_property_read_bool(np, "assigned-clock-parents"))
return false;
}
if (of_property_read_bool(np, "ti,timer-dsp"))
return false;
if (of_property_read_bool(np, "ti,timer-pwm"))
return false;
return true;
}
/*
* Finds the first available usable always-on timer, and assigns it to either
* clockevent or clocksource depending if the counter_32k is available on the
* SoC or not.
*
* Some omap3 boards with unreliable oscillator must not use the counter_32k
* or dmtimer1 with 32 KiHz source. Additionally, the boards with unreliable
* oscillator should really set counter_32k as disabled, and delete dmtimer1
* ti,always-on property, but let's not count on it. For these quirky cases,
* we prefer using the always-on secure dmtimer12 with the internal 32 KiHz
* clock as the clocksource, and any available dmtimer as clockevent.
*
* For am437x, we are using am335x style dmtimer clocksource. It is unclear
* if this quirk handling is really needed, but let's change it separately
* based on testing as it might cause side effects.
*/
static void __init dmtimer_systimer_assign_alwon(void)
{
struct device_node *np;
u32 pa = 0;
bool quirk_unreliable_oscillator = false;
/* Quirk unreliable 32 KiHz oscillator with incomplete dts */
if (of_machine_is_compatible("ti,omap3-beagle-ab4")) {
quirk_unreliable_oscillator = true;
counter_32k = -ENODEV;
}
/* Quirk am437x using am335x style dmtimer clocksource */
if (of_machine_is_compatible("ti,am43"))
counter_32k = -ENODEV;
for_each_matching_node(np, dmtimer_match_table) {
struct resource res;
if (!dmtimer_is_preferred(np))
continue;
if (!of_property_read_bool(np, "ti,timer-alwon"))
continue;
if (of_address_to_resource(np, 0, &res))
continue;
pa = res.start;
/* Quirky omap3 boards must use dmtimer12 */
if (quirk_unreliable_oscillator && pa == 0x48318000)
continue;
of_node_put(np);
break;
}
/* Usually no need for dmtimer clocksource if we have counter32 */
if (counter_32k >= 0) {
clockevent = pa;
clocksource = 0;
} else {
clocksource = pa;
clockevent = DMTIMER_INST_DONT_CARE;
}
}
/* Finds the first usable dmtimer, used for the don't care case */
static u32 __init dmtimer_systimer_find_first_available(void)
{
struct device_node *np;
u32 pa = 0;
for_each_matching_node(np, dmtimer_match_table) {
struct resource res;
if (!dmtimer_is_preferred(np))
continue;
if (of_address_to_resource(np, 0, &res))
continue;
if (res.start == clocksource || res.start == clockevent)
continue;
pa = res.start;
of_node_put(np);
break;
}
return pa;
}
/* Selects the best clocksource and clockevent to use */
static void __init dmtimer_systimer_select_best(void)
{
dmtimer_systimer_check_counter32k();
dmtimer_systimer_assign_alwon();
if (clockevent == DMTIMER_INST_DONT_CARE)
clockevent = dmtimer_systimer_find_first_available();
pr_debug("%s: counter_32k: %i clocksource: %08x clockevent: %08x\n",
__func__, counter_32k, clocksource, clockevent);
}
/* Interface clocks are only available on some SoCs variants */
static int __init dmtimer_systimer_init_clock(struct dmtimer_systimer *t,
struct device_node *np,
const char *name,
unsigned long *rate)
{
struct clk *clock;
unsigned long r;
bool is_ick = false;
int error;
is_ick = !strncmp(name, "ick", 3);
clock = of_clk_get_by_name(np, name);
if ((PTR_ERR(clock) == -EINVAL) && is_ick)
return 0;
else if (IS_ERR(clock))
return PTR_ERR(clock);
error = clk_prepare_enable(clock);
if (error)
return error;
r = clk_get_rate(clock);
if (!r) {
clk_disable_unprepare(clock);
return -ENODEV;
}
if (is_ick)
t->ick = clock;
else
t->fck = clock;
*rate = r;
return 0;
}
static int __init dmtimer_systimer_setup(struct device_node *np,
struct dmtimer_systimer *t)
{
unsigned long rate;
u8 regbase;
int error;
if (!of_device_is_compatible(np->parent, "ti,sysc"))
return -EINVAL;
t->base = of_iomap(np, 0);
if (!t->base)
return -ENXIO;
/*
* Enable optional assigned-clock-parents configured at the timer
* node level. For regular device drivers, this is done automatically
* by bus related code such as platform_drv_probe().
*/
error = of_clk_set_defaults(np, false);
if (error < 0)
pr_err("%s: clock source init failed: %i\n", __func__, error);
/* For ti-sysc, we have timer clocks at the parent module level */
error = dmtimer_systimer_init_clock(t, np->parent, "fck", &rate);
if (error)
goto err_unmap;
t->rate = rate;
error = dmtimer_systimer_init_clock(t, np->parent, "ick", &rate);
if (error)
goto err_unmap;
if (dmtimer_systimer_revision1(t)) {
t->irq_stat = OMAP_TIMER_V1_STAT_OFFSET;
t->irq_ena = OMAP_TIMER_V1_INT_EN_OFFSET;
t->pend = _OMAP_TIMER_WRITE_PEND_OFFSET;
regbase = 0;
} else {
t->irq_stat = OMAP_TIMER_V2_IRQSTATUS;
t->irq_ena = OMAP_TIMER_V2_IRQENABLE_SET;
regbase = OMAP_TIMER_V2_FUNC_OFFSET;
t->pend = regbase + _OMAP_TIMER_WRITE_PEND_OFFSET;
}
t->sysc = OMAP_TIMER_OCP_CFG_OFFSET;
t->load = regbase + _OMAP_TIMER_LOAD_OFFSET;
t->counter = regbase + _OMAP_TIMER_COUNTER_OFFSET;
t->ctrl = regbase + _OMAP_TIMER_CTRL_OFFSET;
t->wakeup = regbase + _OMAP_TIMER_WAKEUP_EN_OFFSET;
t->ifctrl = regbase + _OMAP_TIMER_IF_CTRL_OFFSET;
dmtimer_systimer_reset(t);
dmtimer_systimer_enable(t);
pr_debug("dmtimer rev %08x sysc %08x\n", readl_relaxed(t->base),
readl_relaxed(t->base + t->sysc));
return 0;
err_unmap:
iounmap(t->base);
return error;
}
/* Clockevent */
static struct dmtimer_clockevent *
to_dmtimer_clockevent(struct clock_event_device *clockevent)
{
return container_of(clockevent, struct dmtimer_clockevent, dev);
}
static irqreturn_t dmtimer_clockevent_interrupt(int irq, void *data)
{
struct dmtimer_clockevent *clkevt = data;
struct dmtimer_systimer *t = &clkevt->t;
writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->irq_stat);
clkevt->dev.event_handler(&clkevt->dev);
return IRQ_HANDLED;
}
static int dmtimer_set_next_event(unsigned long cycles,
struct clock_event_device *evt)
{
struct dmtimer_clockevent *clkevt = to_dmtimer_clockevent(evt);
struct dmtimer_systimer *t = &clkevt->t;
void __iomem *pend = t->base + t->pend;
while (readl_relaxed(pend) & WP_TCRR)
cpu_relax();
writel_relaxed(0xffffffff - cycles, t->base + t->counter);
while (readl_relaxed(pend) & WP_TCLR)
cpu_relax();
writel_relaxed(OMAP_TIMER_CTRL_ST, t->base + t->ctrl);
return 0;
}
static int dmtimer_clockevent_shutdown(struct clock_event_device *evt)
{
struct dmtimer_clockevent *clkevt = to_dmtimer_clockevent(evt);
struct dmtimer_systimer *t = &clkevt->t;
void __iomem *ctrl = t->base + t->ctrl;
u32 l;
l = readl_relaxed(ctrl);
if (l & OMAP_TIMER_CTRL_ST) {
l &= ~BIT(0);
writel_relaxed(l, ctrl);
/* Flush posted write */
l = readl_relaxed(ctrl);
/* Wait for functional clock period x 3.5 */
udelay(3500000 / t->rate + 1);
}
writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->irq_stat);
return 0;
}
static int dmtimer_set_periodic(struct clock_event_device *evt)
{
struct dmtimer_clockevent *clkevt = to_dmtimer_clockevent(evt);
struct dmtimer_systimer *t = &clkevt->t;
void __iomem *pend = t->base + t->pend;
dmtimer_clockevent_shutdown(evt);
/* Looks like we need to first set the load value separately */
while (readl_relaxed(pend) & WP_TLDR)
cpu_relax();
writel_relaxed(clkevt->period, t->base + t->load);
while (readl_relaxed(pend) & WP_TCRR)
cpu_relax();
writel_relaxed(clkevt->period, t->base + t->counter);
while (readl_relaxed(pend) & WP_TCLR)
cpu_relax();
writel_relaxed(OMAP_TIMER_CTRL_AR | OMAP_TIMER_CTRL_ST,
t->base + t->ctrl);
return 0;
}
static void omap_clockevent_idle(struct clock_event_device *evt)
{
struct dmtimer_clockevent *clkevt = to_dmtimer_clockevent(evt);
struct dmtimer_systimer *t = &clkevt->t;
dmtimer_systimer_disable(t);
clk_disable(t->fck);
}
static void omap_clockevent_unidle(struct clock_event_device *evt)
{
struct dmtimer_clockevent *clkevt = to_dmtimer_clockevent(evt);
struct dmtimer_systimer *t = &clkevt->t;
int error;
error = clk_enable(t->fck);
if (error)
pr_err("could not enable timer fck on resume: %i\n", error);
dmtimer_systimer_enable(t);
writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->irq_ena);
writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->wakeup);
}
static int __init dmtimer_clkevt_init_common(struct dmtimer_clockevent *clkevt,
struct device_node *np,
unsigned int features,
const struct cpumask *cpumask,
const char *name,
int rating)
{
struct clock_event_device *dev;
struct dmtimer_systimer *t;
int error;
t = &clkevt->t;
dev = &clkevt->dev;
/*
* We mostly use cpuidle_coupled with ARM local timers for runtime,
* so there's probably no use for CLOCK_EVT_FEAT_DYNIRQ here.
*/
dev->features = features;
dev->rating = rating;
dev->set_next_event = dmtimer_set_next_event;
dev->set_state_shutdown = dmtimer_clockevent_shutdown;
dev->set_state_periodic = dmtimer_set_periodic;
dev->set_state_oneshot = dmtimer_clockevent_shutdown;
dev->set_state_oneshot_stopped = dmtimer_clockevent_shutdown;
dev->tick_resume = dmtimer_clockevent_shutdown;
dev->cpumask = cpumask;
dev->irq = irq_of_parse_and_map(np, 0);
if (!dev->irq)
return -ENXIO;
error = dmtimer_systimer_setup(np, &clkevt->t);
if (error)
return error;
clkevt->period = 0xffffffff - DIV_ROUND_CLOSEST(t->rate, HZ);
/*
* For clock-event timers we never read the timer counter and
* so we are not impacted by errata i103 and i767. Therefore,
* we can safely ignore this errata for clock-event timers.
*/
writel_relaxed(OMAP_TIMER_CTRL_POSTED, t->base + t->ifctrl);
error = request_irq(dev->irq, dmtimer_clockevent_interrupt,
IRQF_TIMER, name, clkevt);
if (error)
goto err_out_unmap;
writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->irq_ena);
writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->wakeup);
pr_info("TI gptimer %s: %s%lu Hz at %pOF\n",
name, of_property_read_bool(np, "ti,timer-alwon") ?
"always-on " : "", t->rate, np->parent);
return 0;
err_out_unmap:
iounmap(t->base);
return error;
}
static int __init dmtimer_clockevent_init(struct device_node *np)
{
struct dmtimer_clockevent *clkevt;
int error;
clkevt = kzalloc(sizeof(*clkevt), GFP_KERNEL);
if (!clkevt)
return -ENOMEM;
error = dmtimer_clkevt_init_common(clkevt, np,
CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_ONESHOT,
cpu_possible_mask, "clockevent",
300);
if (error)
goto err_out_free;
clockevents_config_and_register(&clkevt->dev, clkevt->t.rate,
3, /* Timer internal resync latency */
0xffffffff);
if (of_machine_is_compatible("ti,am33xx") ||
of_machine_is_compatible("ti,am43")) {
clkevt->dev.suspend = omap_clockevent_idle;
clkevt->dev.resume = omap_clockevent_unidle;
}
return 0;
err_out_free:
kfree(clkevt);
return error;
}
/* Dmtimer as percpu timer. See dra7 ARM architected timer wrap erratum i940 */
static DEFINE_PER_CPU(struct dmtimer_clockevent, dmtimer_percpu_timer);
static int __init dmtimer_percpu_timer_init(struct device_node *np, int cpu)
{
struct dmtimer_clockevent *clkevt;
int error;
if (!cpu_possible(cpu))
return -EINVAL;
if (!of_property_read_bool(np->parent, "ti,no-reset-on-init") ||
!of_property_read_bool(np->parent, "ti,no-idle"))
pr_warn("Incomplete dtb for percpu dmtimer %pOF\n", np->parent);
clkevt = per_cpu_ptr(&dmtimer_percpu_timer, cpu);
error = dmtimer_clkevt_init_common(clkevt, np, CLOCK_EVT_FEAT_ONESHOT,
cpumask_of(cpu), "percpu-dmtimer",
500);
if (error)
return error;
return 0;
}
/* See TRM for timer internal resynch latency */
static int omap_dmtimer_starting_cpu(unsigned int cpu)
{
struct dmtimer_clockevent *clkevt = per_cpu_ptr(&dmtimer_percpu_timer, cpu);
struct clock_event_device *dev = &clkevt->dev;
struct dmtimer_systimer *t = &clkevt->t;
clockevents_config_and_register(dev, t->rate, 3, ULONG_MAX);
irq_force_affinity(dev->irq, cpumask_of(cpu));
return 0;
}
static int __init dmtimer_percpu_timer_startup(void)
{
struct dmtimer_clockevent *clkevt = per_cpu_ptr(&dmtimer_percpu_timer, 0);
struct dmtimer_systimer *t = &clkevt->t;
if (t->sysc) {
cpuhp_setup_state(CPUHP_AP_TI_GP_TIMER_STARTING,
"clockevents/omap/gptimer:starting",
omap_dmtimer_starting_cpu, NULL);
}
return 0;
}
subsys_initcall(dmtimer_percpu_timer_startup);
static int __init dmtimer_percpu_quirk_init(struct device_node *np, u32 pa)
{
struct device_node *arm_timer;
arm_timer = of_find_compatible_node(NULL, NULL, "arm,armv7-timer");
if (of_device_is_available(arm_timer)) {
pr_warn_once("ARM architected timer wrap issue i940 detected\n");
return 0;
}
if (pa == 0x4882c000) /* dra7 dmtimer15 */
return dmtimer_percpu_timer_init(np, 0);
else if (pa == 0x4882e000) /* dra7 dmtimer16 */
return dmtimer_percpu_timer_init(np, 1);
return 0;
}
/* Clocksource */
static struct dmtimer_clocksource *
to_dmtimer_clocksource(struct clocksource *cs)
{
return container_of(cs, struct dmtimer_clocksource, dev);
}
static u64 dmtimer_clocksource_read_cycles(struct clocksource *cs)
{
struct dmtimer_clocksource *clksrc = to_dmtimer_clocksource(cs);
struct dmtimer_systimer *t = &clksrc->t;
return (u64)readl_relaxed(t->base + t->counter);
}
static void __iomem *dmtimer_sched_clock_counter;
static u64 notrace dmtimer_read_sched_clock(void)
{
return readl_relaxed(dmtimer_sched_clock_counter);
}
static void dmtimer_clocksource_suspend(struct clocksource *cs)
{
struct dmtimer_clocksource *clksrc = to_dmtimer_clocksource(cs);
struct dmtimer_systimer *t = &clksrc->t;
clksrc->loadval = readl_relaxed(t->base + t->counter);
dmtimer_systimer_disable(t);
clk_disable(t->fck);
}
static void dmtimer_clocksource_resume(struct clocksource *cs)
{
struct dmtimer_clocksource *clksrc = to_dmtimer_clocksource(cs);
struct dmtimer_systimer *t = &clksrc->t;
int error;
error = clk_enable(t->fck);
if (error)
pr_err("could not enable timer fck on resume: %i\n", error);
dmtimer_systimer_enable(t);
writel_relaxed(clksrc->loadval, t->base + t->counter);
writel_relaxed(OMAP_TIMER_CTRL_ST | OMAP_TIMER_CTRL_AR,
t->base + t->ctrl);
}
static int __init dmtimer_clocksource_init(struct device_node *np)
{
struct dmtimer_clocksource *clksrc;
struct dmtimer_systimer *t;
struct clocksource *dev;
int error;
clksrc = kzalloc(sizeof(*clksrc), GFP_KERNEL);
if (!clksrc)
return -ENOMEM;
dev = &clksrc->dev;
t = &clksrc->t;
error = dmtimer_systimer_setup(np, t);
if (error)
goto err_out_free;
dev->name = "dmtimer";
dev->rating = 300;
dev->read = dmtimer_clocksource_read_cycles;
dev->mask = CLOCKSOURCE_MASK(32);
dev->flags = CLOCK_SOURCE_IS_CONTINUOUS;
/* Unlike for clockevent, legacy code sets suspend only for am4 */
if (of_machine_is_compatible("ti,am43")) {
dev->suspend = dmtimer_clocksource_suspend;
dev->resume = dmtimer_clocksource_resume;
}
writel_relaxed(0, t->base + t->counter);
writel_relaxed(OMAP_TIMER_CTRL_ST | OMAP_TIMER_CTRL_AR,
t->base + t->ctrl);
pr_info("TI gptimer clocksource: %s%pOF\n",
of_property_read_bool(np, "ti,timer-alwon") ?
"always-on " : "", np->parent);
if (!dmtimer_sched_clock_counter) {
dmtimer_sched_clock_counter = t->base + t->counter;
sched_clock_register(dmtimer_read_sched_clock, 32, t->rate);
}
if (clocksource_register_hz(dev, t->rate))
pr_err("Could not register clocksource %pOF\n", np);
return 0;
err_out_free:
kfree(clksrc);
return -ENODEV;
}
/*
* To detect between a clocksource and clockevent, we assume the device tree
* has no interrupts configured for a clocksource timer.
*/
static int __init dmtimer_systimer_init(struct device_node *np)
{
struct resource res;
u32 pa;
/* One time init for the preferred timer configuration */
if (!clocksource && !clockevent)
dmtimer_systimer_select_best();
if (!clocksource && !clockevent) {
pr_err("%s: unable to detect system timers, update dtb?\n",
__func__);
return -EINVAL;
}
of_address_to_resource(np, 0, &res);
pa = (u32)res.start;
if (!pa)
return -EINVAL;
if (counter_32k <= 0 && clocksource == pa)
return dmtimer_clocksource_init(np);
if (clockevent == pa)
return dmtimer_clockevent_init(np);
if (of_machine_is_compatible("ti,dra7"))
return dmtimer_percpu_quirk_init(np, pa);
return 0;
}
TIMER_OF_DECLARE(systimer_omap2, "ti,omap2420-timer", dmtimer_systimer_init);
TIMER_OF_DECLARE(systimer_omap3, "ti,omap3430-timer", dmtimer_systimer_init);
TIMER_OF_DECLARE(systimer_omap4, "ti,omap4430-timer", dmtimer_systimer_init);
TIMER_OF_DECLARE(systimer_omap5, "ti,omap5430-timer", dmtimer_systimer_init);
TIMER_OF_DECLARE(systimer_am33x, "ti,am335x-timer", dmtimer_systimer_init);
TIMER_OF_DECLARE(systimer_am3ms, "ti,am335x-timer-1ms", dmtimer_systimer_init);
TIMER_OF_DECLARE(systimer_dm814, "ti,dm814-timer", dmtimer_systimer_init);
TIMER_OF_DECLARE(systimer_dm816, "ti,dm816-timer", dmtimer_systimer_init);
|
linux-master
|
drivers/clocksource/timer-ti-dm-systimer.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Cirrus Logic CLPS711X clocksource driver
*
* Copyright (C) 2014 Alexander Shiyan <[email protected]>
*/
#include <linux/clk.h>
#include <linux/clockchips.h>
#include <linux/clocksource.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/sched_clock.h>
#include <linux/slab.h>
enum {
CLPS711X_CLKSRC_CLOCKSOURCE,
CLPS711X_CLKSRC_CLOCKEVENT,
};
static void __iomem *tcd;
static u64 notrace clps711x_sched_clock_read(void)
{
return ~readw(tcd);
}
static void __init clps711x_clksrc_init(struct clk *clock, void __iomem *base)
{
unsigned long rate = clk_get_rate(clock);
tcd = base;
clocksource_mmio_init(tcd, "clps711x-clocksource", rate, 300, 16,
clocksource_mmio_readw_down);
sched_clock_register(clps711x_sched_clock_read, 16, rate);
}
static irqreturn_t clps711x_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evt = dev_id;
evt->event_handler(evt);
return IRQ_HANDLED;
}
static int __init _clps711x_clkevt_init(struct clk *clock, void __iomem *base,
unsigned int irq)
{
struct clock_event_device *clkevt;
unsigned long rate;
clkevt = kzalloc(sizeof(*clkevt), GFP_KERNEL);
if (!clkevt)
return -ENOMEM;
rate = clk_get_rate(clock);
/* Set Timer prescaler */
writew(DIV_ROUND_CLOSEST(rate, HZ), base);
clkevt->name = "clps711x-clockevent";
clkevt->rating = 300;
clkevt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_C3STOP;
clkevt->cpumask = cpumask_of(0);
clockevents_config_and_register(clkevt, HZ, 0, 0);
return request_irq(irq, clps711x_timer_interrupt, IRQF_TIMER,
"clps711x-timer", clkevt);
}
static int __init clps711x_timer_init(struct device_node *np)
{
unsigned int irq = irq_of_parse_and_map(np, 0);
struct clk *clock = of_clk_get(np, 0);
void __iomem *base = of_iomap(np, 0);
if (!base)
return -ENOMEM;
if (!irq)
return -EINVAL;
if (IS_ERR(clock))
return PTR_ERR(clock);
switch (of_alias_get_id(np, "timer")) {
case CLPS711X_CLKSRC_CLOCKSOURCE:
clps711x_clksrc_init(clock, base);
break;
case CLPS711X_CLKSRC_CLOCKEVENT:
return _clps711x_clkevt_init(clock, base, irq);
default:
return -EINVAL;
}
return 0;
}
TIMER_OF_DECLARE(clps711x, "cirrus,ep7209-timer", clps711x_timer_init);
|
linux-master
|
drivers/clocksource/clps711x-timer.c
|
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/sched_clock.h>
#include <linux/cpu.h>
#include <linux/of_irq.h>
#include <asm/reg_ops.h>
#include "timer-of.h"
#define PTIM_CCVR "cr<3, 14>"
#define PTIM_CTLR "cr<0, 14>"
#define PTIM_LVR "cr<6, 14>"
#define PTIM_TSR "cr<1, 14>"
static int csky_mptimer_irq;
static int csky_mptimer_set_next_event(unsigned long delta,
struct clock_event_device *ce)
{
mtcr(PTIM_LVR, delta);
return 0;
}
static int csky_mptimer_shutdown(struct clock_event_device *ce)
{
mtcr(PTIM_CTLR, 0);
return 0;
}
static int csky_mptimer_oneshot(struct clock_event_device *ce)
{
mtcr(PTIM_CTLR, 1);
return 0;
}
static int csky_mptimer_oneshot_stopped(struct clock_event_device *ce)
{
mtcr(PTIM_CTLR, 0);
return 0;
}
static DEFINE_PER_CPU(struct timer_of, csky_to) = {
.flags = TIMER_OF_CLOCK,
.clkevt = {
.rating = 300,
.features = CLOCK_EVT_FEAT_PERCPU |
CLOCK_EVT_FEAT_ONESHOT,
.set_state_shutdown = csky_mptimer_shutdown,
.set_state_oneshot = csky_mptimer_oneshot,
.set_state_oneshot_stopped = csky_mptimer_oneshot_stopped,
.set_next_event = csky_mptimer_set_next_event,
},
};
static irqreturn_t csky_timer_interrupt(int irq, void *dev)
{
struct timer_of *to = this_cpu_ptr(&csky_to);
mtcr(PTIM_TSR, 0);
to->clkevt.event_handler(&to->clkevt);
return IRQ_HANDLED;
}
/*
* clock event for percpu
*/
static int csky_mptimer_starting_cpu(unsigned int cpu)
{
struct timer_of *to = per_cpu_ptr(&csky_to, cpu);
to->clkevt.cpumask = cpumask_of(cpu);
enable_percpu_irq(csky_mptimer_irq, 0);
clockevents_config_and_register(&to->clkevt, timer_of_rate(to),
2, ULONG_MAX);
return 0;
}
static int csky_mptimer_dying_cpu(unsigned int cpu)
{
disable_percpu_irq(csky_mptimer_irq);
return 0;
}
/*
* clock source
*/
static u64 notrace sched_clock_read(void)
{
return (u64)mfcr(PTIM_CCVR);
}
static u64 clksrc_read(struct clocksource *c)
{
return (u64)mfcr(PTIM_CCVR);
}
struct clocksource csky_clocksource = {
.name = "csky",
.rating = 400,
.mask = CLOCKSOURCE_MASK(32),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
.read = clksrc_read,
};
static int __init csky_mptimer_init(struct device_node *np)
{
int ret, cpu, cpu_rollback;
struct timer_of *to = NULL;
/*
* Csky_mptimer is designed for C-SKY SMP multi-processors and
* every core has it's own private irq and regs for clkevt and
* clksrc.
*
* The regs is accessed by cpu instruction: mfcr/mtcr instead of
* mmio map style. So we needn't mmio-address in dts, but we still
* need to give clk and irq number.
*
* We use private irq for the mptimer and irq number is the same
* for every core. So we use request_percpu_irq() in timer_of_init.
*/
csky_mptimer_irq = irq_of_parse_and_map(np, 0);
if (csky_mptimer_irq <= 0)
return -EINVAL;
ret = request_percpu_irq(csky_mptimer_irq, csky_timer_interrupt,
"csky_mp_timer", &csky_to);
if (ret)
return -EINVAL;
for_each_possible_cpu(cpu) {
to = per_cpu_ptr(&csky_to, cpu);
ret = timer_of_init(np, to);
if (ret)
goto rollback;
}
clocksource_register_hz(&csky_clocksource, timer_of_rate(to));
sched_clock_register(sched_clock_read, 32, timer_of_rate(to));
ret = cpuhp_setup_state(CPUHP_AP_CSKY_TIMER_STARTING,
"clockevents/csky/timer:starting",
csky_mptimer_starting_cpu,
csky_mptimer_dying_cpu);
if (ret)
return -EINVAL;
return 0;
rollback:
for_each_possible_cpu(cpu_rollback) {
if (cpu_rollback == cpu)
break;
to = per_cpu_ptr(&csky_to, cpu_rollback);
timer_of_cleanup(to);
}
return -EINVAL;
}
TIMER_OF_DECLARE(csky_mptimer, "csky,mptimer", csky_mptimer_init);
|
linux-master
|
drivers/clocksource/timer-mp-csky.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/drivers/clocksource/acpi_pm.c
*
* This file contains the ACPI PM based clocksource.
*
* This code was largely moved from the i386 timer_pm.c file
* which was (C) Dominik Brodowski <[email protected]> 2003
* and contained the following comments:
*
* Driver to use the Power Management Timer (PMTMR) available in some
* southbridges as primary timing source for the Linux kernel.
*
* Based on parts of linux/drivers/acpi/hardware/hwtimer.c, timer_pit.c,
* timer_hpet.c, and on Arjan van de Ven's implementation for 2.4.
*/
#include <linux/acpi_pmtmr.h>
#include <linux/clocksource.h>
#include <linux/timex.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <asm/io.h>
#include <asm/time.h>
/*
* The I/O port the PMTMR resides at.
* The location is detected during setup_arch(),
* in arch/i386/kernel/acpi/boot.c
*/
u32 pmtmr_ioport __read_mostly;
static inline u32 read_pmtmr(void)
{
/* mask the output to 24 bits */
return inl(pmtmr_ioport) & ACPI_PM_MASK;
}
u32 acpi_pm_read_verified(void)
{
u32 v1 = 0, v2 = 0, v3 = 0;
/*
* It has been reported that because of various broken
* chipsets (ICH4, PIIX4 and PIIX4E) where the ACPI PM clock
* source is not latched, you must read it multiple
* times to ensure a safe value is read:
*/
do {
v1 = read_pmtmr();
v2 = read_pmtmr();
v3 = read_pmtmr();
} while (unlikely((v1 > v2 && v1 < v3) || (v2 > v3 && v2 < v1)
|| (v3 > v1 && v3 < v2)));
return v2;
}
static u64 acpi_pm_read(struct clocksource *cs)
{
return (u64)read_pmtmr();
}
static struct clocksource clocksource_acpi_pm = {
.name = "acpi_pm",
.rating = 200,
.read = acpi_pm_read,
.mask = (u64)ACPI_PM_MASK,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
#ifdef CONFIG_PCI
static int acpi_pm_good;
static int __init acpi_pm_good_setup(char *__str)
{
acpi_pm_good = 1;
return 1;
}
__setup("acpi_pm_good", acpi_pm_good_setup);
static u64 acpi_pm_read_slow(struct clocksource *cs)
{
return (u64)acpi_pm_read_verified();
}
static inline void acpi_pm_need_workaround(void)
{
clocksource_acpi_pm.read = acpi_pm_read_slow;
clocksource_acpi_pm.rating = 120;
}
/*
* PIIX4 Errata:
*
* The power management timer may return improper results when read.
* Although the timer value settles properly after incrementing,
* while incrementing there is a 3 ns window every 69.8 ns where the
* timer value is indeterminate (a 4.2% chance that the data will be
* incorrect when read). As a result, the ACPI free running count up
* timer specification is violated due to erroneous reads.
*/
static void acpi_pm_check_blacklist(struct pci_dev *dev)
{
if (acpi_pm_good)
return;
/* the bug has been fixed in PIIX4M */
if (dev->revision < 3) {
pr_warn("* Found PM-Timer Bug on the chipset. Due to workarounds for a bug,\n"
"* this clock source is slow. Consider trying other clock sources\n");
acpi_pm_need_workaround();
}
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3,
acpi_pm_check_blacklist);
static void acpi_pm_check_graylist(struct pci_dev *dev)
{
if (acpi_pm_good)
return;
pr_warn("* The chipset may have PM-Timer Bug. Due to workarounds for a bug,\n"
"* this clock source is slow. If you are sure your timer does not have\n"
"* this bug, please use \"acpi_pm_good\" to disable the workaround\n");
acpi_pm_need_workaround();
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0,
acpi_pm_check_graylist);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_LE,
acpi_pm_check_graylist);
#endif
#ifndef CONFIG_X86_64
#include <asm/mach_timer.h>
#define PMTMR_EXPECTED_RATE \
((CALIBRATE_LATCH * (PMTMR_TICKS_PER_SEC >> 10)) / (PIT_TICK_RATE>>10))
/*
* Some boards have the PMTMR running way too fast. We check
* the PMTMR rate against PIT channel 2 to catch these cases.
*/
static int verify_pmtmr_rate(void)
{
u64 value1, value2;
unsigned long count, delta;
mach_prepare_counter();
value1 = clocksource_acpi_pm.read(&clocksource_acpi_pm);
mach_countup(&count);
value2 = clocksource_acpi_pm.read(&clocksource_acpi_pm);
delta = (value2 - value1) & ACPI_PM_MASK;
/* Check that the PMTMR delta is within 5% of what we expect */
if (delta < (PMTMR_EXPECTED_RATE * 19) / 20 ||
delta > (PMTMR_EXPECTED_RATE * 21) / 20) {
pr_info("PM-Timer running at invalid rate: %lu%% of normal - aborting.\n",
100UL * delta / PMTMR_EXPECTED_RATE);
return -1;
}
return 0;
}
#else
#define verify_pmtmr_rate() (0)
#endif
/* Number of monotonicity checks to perform during initialization */
#define ACPI_PM_MONOTONICITY_CHECKS 10
/* Number of reads we try to get two different values */
#define ACPI_PM_READ_CHECKS 10000
static int __init init_acpi_pm_clocksource(void)
{
u64 value1, value2;
unsigned int i, j = 0;
if (!pmtmr_ioport)
return -ENODEV;
/* "verify" this timing source: */
for (j = 0; j < ACPI_PM_MONOTONICITY_CHECKS; j++) {
udelay(100 * j);
value1 = clocksource_acpi_pm.read(&clocksource_acpi_pm);
for (i = 0; i < ACPI_PM_READ_CHECKS; i++) {
value2 = clocksource_acpi_pm.read(&clocksource_acpi_pm);
if (value2 == value1)
continue;
if (value2 > value1)
break;
if ((value2 < value1) && ((value2) < 0xFFF))
break;
pr_info("PM-Timer had inconsistent results: %#llx, %#llx - aborting.\n",
value1, value2);
pmtmr_ioport = 0;
return -EINVAL;
}
if (i == ACPI_PM_READ_CHECKS) {
pr_info("PM-Timer failed consistency check (%#llx) - aborting.\n",
value1);
pmtmr_ioport = 0;
return -ENODEV;
}
}
if (verify_pmtmr_rate() != 0){
pmtmr_ioport = 0;
return -ENODEV;
}
if (tsc_clocksource_watchdog_disabled())
clocksource_acpi_pm.flags |= CLOCK_SOURCE_MUST_VERIFY;
return clocksource_register_hz(&clocksource_acpi_pm, PMTMR_TICKS_PER_SEC);
}
/* We use fs_initcall because we want the PCI fixups to have run
* but we still need to load before device_initcall
*/
fs_initcall(init_acpi_pm_clocksource);
/*
* Allow an override of the IOPort. Stupid BIOSes do not tell us about
* the PMTimer, but we might know where it is.
*/
static int __init parse_pmtmr(char *arg)
{
unsigned int base;
int ret;
ret = kstrtouint(arg, 16, &base);
if (ret) {
pr_warn("PMTMR: invalid 'pmtmr=' value: '%s'\n", arg);
return 1;
}
pr_info("PMTMR IOPort override: 0x%04x -> 0x%04x\n", pmtmr_ioport,
base);
pmtmr_ioport = base;
return 1;
}
__setup("pmtmr=", parse_pmtmr);
|
linux-master
|
drivers/clocksource/acpi_pm.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2012-2013 Freescale Semiconductor, Inc.
*/
#include <linux/interrupt.h>
#include <linux/clockchips.h>
#include <linux/clk.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/sched_clock.h>
/*
* Each pit takes 0x10 Bytes register space
*/
#define PITMCR 0x00
#define PIT0_OFFSET 0x100
#define PITn_OFFSET(n) (PIT0_OFFSET + 0x10 * (n))
#define PITLDVAL 0x00
#define PITCVAL 0x04
#define PITTCTRL 0x08
#define PITTFLG 0x0c
#define PITMCR_MDIS (0x1 << 1)
#define PITTCTRL_TEN (0x1 << 0)
#define PITTCTRL_TIE (0x1 << 1)
#define PITCTRL_CHN (0x1 << 2)
#define PITTFLG_TIF 0x1
static void __iomem *clksrc_base;
static void __iomem *clkevt_base;
static unsigned long cycle_per_jiffy;
static inline void pit_timer_enable(void)
{
__raw_writel(PITTCTRL_TEN | PITTCTRL_TIE, clkevt_base + PITTCTRL);
}
static inline void pit_timer_disable(void)
{
__raw_writel(0, clkevt_base + PITTCTRL);
}
static inline void pit_irq_acknowledge(void)
{
__raw_writel(PITTFLG_TIF, clkevt_base + PITTFLG);
}
static u64 notrace pit_read_sched_clock(void)
{
return ~__raw_readl(clksrc_base + PITCVAL);
}
static int __init pit_clocksource_init(unsigned long rate)
{
/* set the max load value and start the clock source counter */
__raw_writel(0, clksrc_base + PITTCTRL);
__raw_writel(~0UL, clksrc_base + PITLDVAL);
__raw_writel(PITTCTRL_TEN, clksrc_base + PITTCTRL);
sched_clock_register(pit_read_sched_clock, 32, rate);
return clocksource_mmio_init(clksrc_base + PITCVAL, "vf-pit", rate,
300, 32, clocksource_mmio_readl_down);
}
static int pit_set_next_event(unsigned long delta,
struct clock_event_device *unused)
{
/*
* set a new value to PITLDVAL register will not restart the timer,
* to abort the current cycle and start a timer period with the new
* value, the timer must be disabled and enabled again.
* and the PITLAVAL should be set to delta minus one according to pit
* hardware requirement.
*/
pit_timer_disable();
__raw_writel(delta - 1, clkevt_base + PITLDVAL);
pit_timer_enable();
return 0;
}
static int pit_shutdown(struct clock_event_device *evt)
{
pit_timer_disable();
return 0;
}
static int pit_set_periodic(struct clock_event_device *evt)
{
pit_set_next_event(cycle_per_jiffy, evt);
return 0;
}
static irqreturn_t pit_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evt = dev_id;
pit_irq_acknowledge();
/*
* pit hardware doesn't support oneshot, it will generate an interrupt
* and reload the counter value from PITLDVAL when PITCVAL reach zero,
* and start the counter again. So software need to disable the timer
* to stop the counter loop in ONESHOT mode.
*/
if (likely(clockevent_state_oneshot(evt)))
pit_timer_disable();
evt->event_handler(evt);
return IRQ_HANDLED;
}
static struct clock_event_device clockevent_pit = {
.name = "VF pit timer",
.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
.set_state_shutdown = pit_shutdown,
.set_state_periodic = pit_set_periodic,
.set_next_event = pit_set_next_event,
.rating = 300,
};
static int __init pit_clockevent_init(unsigned long rate, int irq)
{
__raw_writel(0, clkevt_base + PITTCTRL);
__raw_writel(PITTFLG_TIF, clkevt_base + PITTFLG);
BUG_ON(request_irq(irq, pit_timer_interrupt, IRQF_TIMER | IRQF_IRQPOLL,
"VF pit timer", &clockevent_pit));
clockevent_pit.cpumask = cpumask_of(0);
clockevent_pit.irq = irq;
/*
* The value for the LDVAL register trigger is calculated as:
* LDVAL trigger = (period / clock period) - 1
* The pit is a 32-bit down count timer, when the counter value
* reaches 0, it will generate an interrupt, thus the minimal
* LDVAL trigger value is 1. And then the min_delta is
* minimal LDVAL trigger value + 1, and the max_delta is full 32-bit.
*/
clockevents_config_and_register(&clockevent_pit, rate, 2, 0xffffffff);
return 0;
}
static int __init pit_timer_init(struct device_node *np)
{
struct clk *pit_clk;
void __iomem *timer_base;
unsigned long clk_rate;
int irq, ret;
timer_base = of_iomap(np, 0);
if (!timer_base) {
pr_err("Failed to iomap\n");
return -ENXIO;
}
/*
* PIT0 and PIT1 can be chained to build a 64-bit timer,
* so choose PIT2 as clocksource, PIT3 as clockevent device,
* and leave PIT0 and PIT1 unused for anyone else who needs them.
*/
clksrc_base = timer_base + PITn_OFFSET(2);
clkevt_base = timer_base + PITn_OFFSET(3);
irq = irq_of_parse_and_map(np, 0);
if (irq <= 0)
return -EINVAL;
pit_clk = of_clk_get(np, 0);
if (IS_ERR(pit_clk))
return PTR_ERR(pit_clk);
ret = clk_prepare_enable(pit_clk);
if (ret)
return ret;
clk_rate = clk_get_rate(pit_clk);
cycle_per_jiffy = clk_rate / (HZ);
/* enable the pit module */
__raw_writel(~PITMCR_MDIS, timer_base + PITMCR);
ret = pit_clocksource_init(clk_rate);
if (ret)
return ret;
return pit_clockevent_init(clk_rate, irq);
}
TIMER_OF_DECLARE(vf610, "fsl,vf610-pit", pit_timer_init);
|
linux-master
|
drivers/clocksource/timer-vf-pit.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Generic MMIO clocksource support
*/
#include <linux/clocksource.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/slab.h>
struct clocksource_mmio {
void __iomem *reg;
struct clocksource clksrc;
};
static inline struct clocksource_mmio *to_mmio_clksrc(struct clocksource *c)
{
return container_of(c, struct clocksource_mmio, clksrc);
}
u64 clocksource_mmio_readl_up(struct clocksource *c)
{
return (u64)readl_relaxed(to_mmio_clksrc(c)->reg);
}
u64 clocksource_mmio_readl_down(struct clocksource *c)
{
return ~(u64)readl_relaxed(to_mmio_clksrc(c)->reg) & c->mask;
}
u64 clocksource_mmio_readw_up(struct clocksource *c)
{
return (u64)readw_relaxed(to_mmio_clksrc(c)->reg);
}
u64 clocksource_mmio_readw_down(struct clocksource *c)
{
return ~(u64)readw_relaxed(to_mmio_clksrc(c)->reg) & c->mask;
}
/**
* clocksource_mmio_init - Initialize a simple mmio based clocksource
* @base: Virtual address of the clock readout register
* @name: Name of the clocksource
* @hz: Frequency of the clocksource in Hz
* @rating: Rating of the clocksource
* @bits: Number of valid bits
* @read: One of clocksource_mmio_read*() above
*/
int __init clocksource_mmio_init(void __iomem *base, const char *name,
unsigned long hz, int rating, unsigned bits,
u64 (*read)(struct clocksource *))
{
struct clocksource_mmio *cs;
if (bits > 64 || bits < 16)
return -EINVAL;
cs = kzalloc(sizeof(struct clocksource_mmio), GFP_KERNEL);
if (!cs)
return -ENOMEM;
cs->reg = base;
cs->clksrc.name = name;
cs->clksrc.rating = rating;
cs->clksrc.read = read;
cs->clksrc.mask = CLOCKSOURCE_MASK(bits);
cs->clksrc.flags = CLOCK_SOURCE_IS_CONTINUOUS;
return clocksource_register_hz(&cs->clksrc, hz);
}
|
linux-master
|
drivers/clocksource/mmio.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/drivers/clocksource/arm_arch_timer.c
*
* Copyright (C) 2011 ARM Ltd.
* All Rights Reserved
*/
#define pr_fmt(fmt) "arch_timer: " fmt
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/smp.h>
#include <linux/cpu.h>
#include <linux/cpu_pm.h>
#include <linux/clockchips.h>
#include <linux/clocksource.h>
#include <linux/clocksource_ids.h>
#include <linux/interrupt.h>
#include <linux/kstrtox.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/sched/clock.h>
#include <linux/sched_clock.h>
#include <linux/acpi.h>
#include <linux/arm-smccc.h>
#include <linux/ptp_kvm.h>
#include <asm/arch_timer.h>
#include <asm/virt.h>
#include <clocksource/arm_arch_timer.h>
#define CNTTIDR 0x08
#define CNTTIDR_VIRT(n) (BIT(1) << ((n) * 4))
#define CNTACR(n) (0x40 + ((n) * 4))
#define CNTACR_RPCT BIT(0)
#define CNTACR_RVCT BIT(1)
#define CNTACR_RFRQ BIT(2)
#define CNTACR_RVOFF BIT(3)
#define CNTACR_RWVT BIT(4)
#define CNTACR_RWPT BIT(5)
#define CNTPCT_LO 0x00
#define CNTVCT_LO 0x08
#define CNTFRQ 0x10
#define CNTP_CVAL_LO 0x20
#define CNTP_CTL 0x2c
#define CNTV_CVAL_LO 0x30
#define CNTV_CTL 0x3c
/*
* The minimum amount of time a generic counter is guaranteed to not roll over
* (40 years)
*/
#define MIN_ROLLOVER_SECS (40ULL * 365 * 24 * 3600)
static unsigned arch_timers_present __initdata;
struct arch_timer {
void __iomem *base;
struct clock_event_device evt;
};
static struct arch_timer *arch_timer_mem __ro_after_init;
#define to_arch_timer(e) container_of(e, struct arch_timer, evt)
static u32 arch_timer_rate __ro_after_init;
static int arch_timer_ppi[ARCH_TIMER_MAX_TIMER_PPI] __ro_after_init;
static const char *arch_timer_ppi_names[ARCH_TIMER_MAX_TIMER_PPI] = {
[ARCH_TIMER_PHYS_SECURE_PPI] = "sec-phys",
[ARCH_TIMER_PHYS_NONSECURE_PPI] = "phys",
[ARCH_TIMER_VIRT_PPI] = "virt",
[ARCH_TIMER_HYP_PPI] = "hyp-phys",
[ARCH_TIMER_HYP_VIRT_PPI] = "hyp-virt",
};
static struct clock_event_device __percpu *arch_timer_evt;
static enum arch_timer_ppi_nr arch_timer_uses_ppi __ro_after_init = ARCH_TIMER_VIRT_PPI;
static bool arch_timer_c3stop __ro_after_init;
static bool arch_timer_mem_use_virtual __ro_after_init;
static bool arch_counter_suspend_stop __ro_after_init;
#ifdef CONFIG_GENERIC_GETTIMEOFDAY
static enum vdso_clock_mode vdso_default = VDSO_CLOCKMODE_ARCHTIMER;
#else
static enum vdso_clock_mode vdso_default = VDSO_CLOCKMODE_NONE;
#endif /* CONFIG_GENERIC_GETTIMEOFDAY */
static cpumask_t evtstrm_available = CPU_MASK_NONE;
static bool evtstrm_enable __ro_after_init = IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM);
static int __init early_evtstrm_cfg(char *buf)
{
return kstrtobool(buf, &evtstrm_enable);
}
early_param("clocksource.arm_arch_timer.evtstrm", early_evtstrm_cfg);
/*
* Makes an educated guess at a valid counter width based on the Generic Timer
* specification. Of note:
* 1) the system counter is at least 56 bits wide
* 2) a roll-over time of not less than 40 years
*
* See 'ARM DDI 0487G.a D11.1.2 ("The system counter")' for more details.
*/
static int arch_counter_get_width(void)
{
u64 min_cycles = MIN_ROLLOVER_SECS * arch_timer_rate;
/* guarantee the returned width is within the valid range */
return clamp_val(ilog2(min_cycles - 1) + 1, 56, 64);
}
/*
* Architected system timer support.
*/
static __always_inline
void arch_timer_reg_write(int access, enum arch_timer_reg reg, u64 val,
struct clock_event_device *clk)
{
if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
struct arch_timer *timer = to_arch_timer(clk);
switch (reg) {
case ARCH_TIMER_REG_CTRL:
writel_relaxed((u32)val, timer->base + CNTP_CTL);
break;
case ARCH_TIMER_REG_CVAL:
/*
* Not guaranteed to be atomic, so the timer
* must be disabled at this point.
*/
writeq_relaxed(val, timer->base + CNTP_CVAL_LO);
break;
default:
BUILD_BUG();
}
} else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
struct arch_timer *timer = to_arch_timer(clk);
switch (reg) {
case ARCH_TIMER_REG_CTRL:
writel_relaxed((u32)val, timer->base + CNTV_CTL);
break;
case ARCH_TIMER_REG_CVAL:
/* Same restriction as above */
writeq_relaxed(val, timer->base + CNTV_CVAL_LO);
break;
default:
BUILD_BUG();
}
} else {
arch_timer_reg_write_cp15(access, reg, val);
}
}
static __always_inline
u32 arch_timer_reg_read(int access, enum arch_timer_reg reg,
struct clock_event_device *clk)
{
u32 val;
if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
struct arch_timer *timer = to_arch_timer(clk);
switch (reg) {
case ARCH_TIMER_REG_CTRL:
val = readl_relaxed(timer->base + CNTP_CTL);
break;
default:
BUILD_BUG();
}
} else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
struct arch_timer *timer = to_arch_timer(clk);
switch (reg) {
case ARCH_TIMER_REG_CTRL:
val = readl_relaxed(timer->base + CNTV_CTL);
break;
default:
BUILD_BUG();
}
} else {
val = arch_timer_reg_read_cp15(access, reg);
}
return val;
}
static noinstr u64 raw_counter_get_cntpct_stable(void)
{
return __arch_counter_get_cntpct_stable();
}
static notrace u64 arch_counter_get_cntpct_stable(void)
{
u64 val;
preempt_disable_notrace();
val = __arch_counter_get_cntpct_stable();
preempt_enable_notrace();
return val;
}
static noinstr u64 arch_counter_get_cntpct(void)
{
return __arch_counter_get_cntpct();
}
static noinstr u64 raw_counter_get_cntvct_stable(void)
{
return __arch_counter_get_cntvct_stable();
}
static notrace u64 arch_counter_get_cntvct_stable(void)
{
u64 val;
preempt_disable_notrace();
val = __arch_counter_get_cntvct_stable();
preempt_enable_notrace();
return val;
}
static noinstr u64 arch_counter_get_cntvct(void)
{
return __arch_counter_get_cntvct();
}
/*
* Default to cp15 based access because arm64 uses this function for
* sched_clock() before DT is probed and the cp15 method is guaranteed
* to exist on arm64. arm doesn't use this before DT is probed so even
* if we don't have the cp15 accessors we won't have a problem.
*/
u64 (*arch_timer_read_counter)(void) __ro_after_init = arch_counter_get_cntvct;
EXPORT_SYMBOL_GPL(arch_timer_read_counter);
static u64 arch_counter_read(struct clocksource *cs)
{
return arch_timer_read_counter();
}
static u64 arch_counter_read_cc(const struct cyclecounter *cc)
{
return arch_timer_read_counter();
}
static struct clocksource clocksource_counter = {
.name = "arch_sys_counter",
.id = CSID_ARM_ARCH_COUNTER,
.rating = 400,
.read = arch_counter_read,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static struct cyclecounter cyclecounter __ro_after_init = {
.read = arch_counter_read_cc,
};
struct ate_acpi_oem_info {
char oem_id[ACPI_OEM_ID_SIZE + 1];
char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1];
u32 oem_revision;
};
#ifdef CONFIG_FSL_ERRATUM_A008585
/*
* The number of retries is an arbitrary value well beyond the highest number
* of iterations the loop has been observed to take.
*/
#define __fsl_a008585_read_reg(reg) ({ \
u64 _old, _new; \
int _retries = 200; \
\
do { \
_old = read_sysreg(reg); \
_new = read_sysreg(reg); \
_retries--; \
} while (unlikely(_old != _new) && _retries); \
\
WARN_ON_ONCE(!_retries); \
_new; \
})
static u64 notrace fsl_a008585_read_cntpct_el0(void)
{
return __fsl_a008585_read_reg(cntpct_el0);
}
static u64 notrace fsl_a008585_read_cntvct_el0(void)
{
return __fsl_a008585_read_reg(cntvct_el0);
}
#endif
#ifdef CONFIG_HISILICON_ERRATUM_161010101
/*
* Verify whether the value of the second read is larger than the first by
* less than 32 is the only way to confirm the value is correct, so clear the
* lower 5 bits to check whether the difference is greater than 32 or not.
* Theoretically the erratum should not occur more than twice in succession
* when reading the system counter, but it is possible that some interrupts
* may lead to more than twice read errors, triggering the warning, so setting
* the number of retries far beyond the number of iterations the loop has been
* observed to take.
*/
#define __hisi_161010101_read_reg(reg) ({ \
u64 _old, _new; \
int _retries = 50; \
\
do { \
_old = read_sysreg(reg); \
_new = read_sysreg(reg); \
_retries--; \
} while (unlikely((_new - _old) >> 5) && _retries); \
\
WARN_ON_ONCE(!_retries); \
_new; \
})
static u64 notrace hisi_161010101_read_cntpct_el0(void)
{
return __hisi_161010101_read_reg(cntpct_el0);
}
static u64 notrace hisi_161010101_read_cntvct_el0(void)
{
return __hisi_161010101_read_reg(cntvct_el0);
}
static struct ate_acpi_oem_info hisi_161010101_oem_info[] = {
/*
* Note that trailing spaces are required to properly match
* the OEM table information.
*/
{
.oem_id = "HISI ",
.oem_table_id = "HIP05 ",
.oem_revision = 0,
},
{
.oem_id = "HISI ",
.oem_table_id = "HIP06 ",
.oem_revision = 0,
},
{
.oem_id = "HISI ",
.oem_table_id = "HIP07 ",
.oem_revision = 0,
},
{ /* Sentinel indicating the end of the OEM array */ },
};
#endif
#ifdef CONFIG_ARM64_ERRATUM_858921
static u64 notrace arm64_858921_read_cntpct_el0(void)
{
u64 old, new;
old = read_sysreg(cntpct_el0);
new = read_sysreg(cntpct_el0);
return (((old ^ new) >> 32) & 1) ? old : new;
}
static u64 notrace arm64_858921_read_cntvct_el0(void)
{
u64 old, new;
old = read_sysreg(cntvct_el0);
new = read_sysreg(cntvct_el0);
return (((old ^ new) >> 32) & 1) ? old : new;
}
#endif
#ifdef CONFIG_SUN50I_ERRATUM_UNKNOWN1
/*
* The low bits of the counter registers are indeterminate while bit 10 or
* greater is rolling over. Since the counter value can jump both backward
* (7ff -> 000 -> 800) and forward (7ff -> fff -> 800), ignore register values
* with all ones or all zeros in the low bits. Bound the loop by the maximum
* number of CPU cycles in 3 consecutive 24 MHz counter periods.
*/
#define __sun50i_a64_read_reg(reg) ({ \
u64 _val; \
int _retries = 150; \
\
do { \
_val = read_sysreg(reg); \
_retries--; \
} while (((_val + 1) & GENMASK(8, 0)) <= 1 && _retries); \
\
WARN_ON_ONCE(!_retries); \
_val; \
})
static u64 notrace sun50i_a64_read_cntpct_el0(void)
{
return __sun50i_a64_read_reg(cntpct_el0);
}
static u64 notrace sun50i_a64_read_cntvct_el0(void)
{
return __sun50i_a64_read_reg(cntvct_el0);
}
#endif
#ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
DEFINE_PER_CPU(const struct arch_timer_erratum_workaround *, timer_unstable_counter_workaround);
EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround);
static atomic_t timer_unstable_counter_workaround_in_use = ATOMIC_INIT(0);
/*
* Force the inlining of this function so that the register accesses
* can be themselves correctly inlined.
*/
static __always_inline
void erratum_set_next_event_generic(const int access, unsigned long evt,
struct clock_event_device *clk)
{
unsigned long ctrl;
u64 cval;
ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
ctrl |= ARCH_TIMER_CTRL_ENABLE;
ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
if (access == ARCH_TIMER_PHYS_ACCESS) {
cval = evt + arch_counter_get_cntpct_stable();
write_sysreg(cval, cntp_cval_el0);
} else {
cval = evt + arch_counter_get_cntvct_stable();
write_sysreg(cval, cntv_cval_el0);
}
arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
}
static __maybe_unused int erratum_set_next_event_virt(unsigned long evt,
struct clock_event_device *clk)
{
erratum_set_next_event_generic(ARCH_TIMER_VIRT_ACCESS, evt, clk);
return 0;
}
static __maybe_unused int erratum_set_next_event_phys(unsigned long evt,
struct clock_event_device *clk)
{
erratum_set_next_event_generic(ARCH_TIMER_PHYS_ACCESS, evt, clk);
return 0;
}
static const struct arch_timer_erratum_workaround ool_workarounds[] = {
#ifdef CONFIG_FSL_ERRATUM_A008585
{
.match_type = ate_match_dt,
.id = "fsl,erratum-a008585",
.desc = "Freescale erratum a005858",
.read_cntpct_el0 = fsl_a008585_read_cntpct_el0,
.read_cntvct_el0 = fsl_a008585_read_cntvct_el0,
.set_next_event_phys = erratum_set_next_event_phys,
.set_next_event_virt = erratum_set_next_event_virt,
},
#endif
#ifdef CONFIG_HISILICON_ERRATUM_161010101
{
.match_type = ate_match_dt,
.id = "hisilicon,erratum-161010101",
.desc = "HiSilicon erratum 161010101",
.read_cntpct_el0 = hisi_161010101_read_cntpct_el0,
.read_cntvct_el0 = hisi_161010101_read_cntvct_el0,
.set_next_event_phys = erratum_set_next_event_phys,
.set_next_event_virt = erratum_set_next_event_virt,
},
{
.match_type = ate_match_acpi_oem_info,
.id = hisi_161010101_oem_info,
.desc = "HiSilicon erratum 161010101",
.read_cntpct_el0 = hisi_161010101_read_cntpct_el0,
.read_cntvct_el0 = hisi_161010101_read_cntvct_el0,
.set_next_event_phys = erratum_set_next_event_phys,
.set_next_event_virt = erratum_set_next_event_virt,
},
#endif
#ifdef CONFIG_ARM64_ERRATUM_858921
{
.match_type = ate_match_local_cap_id,
.id = (void *)ARM64_WORKAROUND_858921,
.desc = "ARM erratum 858921",
.read_cntpct_el0 = arm64_858921_read_cntpct_el0,
.read_cntvct_el0 = arm64_858921_read_cntvct_el0,
.set_next_event_phys = erratum_set_next_event_phys,
.set_next_event_virt = erratum_set_next_event_virt,
},
#endif
#ifdef CONFIG_SUN50I_ERRATUM_UNKNOWN1
{
.match_type = ate_match_dt,
.id = "allwinner,erratum-unknown1",
.desc = "Allwinner erratum UNKNOWN1",
.read_cntpct_el0 = sun50i_a64_read_cntpct_el0,
.read_cntvct_el0 = sun50i_a64_read_cntvct_el0,
.set_next_event_phys = erratum_set_next_event_phys,
.set_next_event_virt = erratum_set_next_event_virt,
},
#endif
#ifdef CONFIG_ARM64_ERRATUM_1418040
{
.match_type = ate_match_local_cap_id,
.id = (void *)ARM64_WORKAROUND_1418040,
.desc = "ARM erratum 1418040",
.disable_compat_vdso = true,
},
#endif
};
typedef bool (*ate_match_fn_t)(const struct arch_timer_erratum_workaround *,
const void *);
static
bool arch_timer_check_dt_erratum(const struct arch_timer_erratum_workaround *wa,
const void *arg)
{
const struct device_node *np = arg;
return of_property_read_bool(np, wa->id);
}
static
bool arch_timer_check_local_cap_erratum(const struct arch_timer_erratum_workaround *wa,
const void *arg)
{
return this_cpu_has_cap((uintptr_t)wa->id);
}
static
bool arch_timer_check_acpi_oem_erratum(const struct arch_timer_erratum_workaround *wa,
const void *arg)
{
static const struct ate_acpi_oem_info empty_oem_info = {};
const struct ate_acpi_oem_info *info = wa->id;
const struct acpi_table_header *table = arg;
/* Iterate over the ACPI OEM info array, looking for a match */
while (memcmp(info, &empty_oem_info, sizeof(*info))) {
if (!memcmp(info->oem_id, table->oem_id, ACPI_OEM_ID_SIZE) &&
!memcmp(info->oem_table_id, table->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) &&
info->oem_revision == table->oem_revision)
return true;
info++;
}
return false;
}
static const struct arch_timer_erratum_workaround *
arch_timer_iterate_errata(enum arch_timer_erratum_match_type type,
ate_match_fn_t match_fn,
void *arg)
{
int i;
for (i = 0; i < ARRAY_SIZE(ool_workarounds); i++) {
if (ool_workarounds[i].match_type != type)
continue;
if (match_fn(&ool_workarounds[i], arg))
return &ool_workarounds[i];
}
return NULL;
}
static
void arch_timer_enable_workaround(const struct arch_timer_erratum_workaround *wa,
bool local)
{
int i;
if (local) {
__this_cpu_write(timer_unstable_counter_workaround, wa);
} else {
for_each_possible_cpu(i)
per_cpu(timer_unstable_counter_workaround, i) = wa;
}
if (wa->read_cntvct_el0 || wa->read_cntpct_el0)
atomic_set(&timer_unstable_counter_workaround_in_use, 1);
/*
* Don't use the vdso fastpath if errata require using the
* out-of-line counter accessor. We may change our mind pretty
* late in the game (with a per-CPU erratum, for example), so
* change both the default value and the vdso itself.
*/
if (wa->read_cntvct_el0) {
clocksource_counter.vdso_clock_mode = VDSO_CLOCKMODE_NONE;
vdso_default = VDSO_CLOCKMODE_NONE;
} else if (wa->disable_compat_vdso && vdso_default != VDSO_CLOCKMODE_NONE) {
vdso_default = VDSO_CLOCKMODE_ARCHTIMER_NOCOMPAT;
clocksource_counter.vdso_clock_mode = vdso_default;
}
}
static void arch_timer_check_ool_workaround(enum arch_timer_erratum_match_type type,
void *arg)
{
const struct arch_timer_erratum_workaround *wa, *__wa;
ate_match_fn_t match_fn = NULL;
bool local = false;
switch (type) {
case ate_match_dt:
match_fn = arch_timer_check_dt_erratum;
break;
case ate_match_local_cap_id:
match_fn = arch_timer_check_local_cap_erratum;
local = true;
break;
case ate_match_acpi_oem_info:
match_fn = arch_timer_check_acpi_oem_erratum;
break;
default:
WARN_ON(1);
return;
}
wa = arch_timer_iterate_errata(type, match_fn, arg);
if (!wa)
return;
__wa = __this_cpu_read(timer_unstable_counter_workaround);
if (__wa && wa != __wa)
pr_warn("Can't enable workaround for %s (clashes with %s\n)",
wa->desc, __wa->desc);
if (__wa)
return;
arch_timer_enable_workaround(wa, local);
pr_info("Enabling %s workaround for %s\n",
local ? "local" : "global", wa->desc);
}
static bool arch_timer_this_cpu_has_cntvct_wa(void)
{
return has_erratum_handler(read_cntvct_el0);
}
static bool arch_timer_counter_has_wa(void)
{
return atomic_read(&timer_unstable_counter_workaround_in_use);
}
#else
#define arch_timer_check_ool_workaround(t,a) do { } while(0)
#define arch_timer_this_cpu_has_cntvct_wa() ({false;})
#define arch_timer_counter_has_wa() ({false;})
#endif /* CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND */
static __always_inline irqreturn_t timer_handler(const int access,
struct clock_event_device *evt)
{
unsigned long ctrl;
ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, evt);
if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
ctrl |= ARCH_TIMER_CTRL_IT_MASK;
arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, evt);
evt->event_handler(evt);
return IRQ_HANDLED;
}
return IRQ_NONE;
}
static irqreturn_t arch_timer_handler_virt(int irq, void *dev_id)
{
struct clock_event_device *evt = dev_id;
return timer_handler(ARCH_TIMER_VIRT_ACCESS, evt);
}
static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id)
{
struct clock_event_device *evt = dev_id;
return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt);
}
static irqreturn_t arch_timer_handler_phys_mem(int irq, void *dev_id)
{
struct clock_event_device *evt = dev_id;
return timer_handler(ARCH_TIMER_MEM_PHYS_ACCESS, evt);
}
static irqreturn_t arch_timer_handler_virt_mem(int irq, void *dev_id)
{
struct clock_event_device *evt = dev_id;
return timer_handler(ARCH_TIMER_MEM_VIRT_ACCESS, evt);
}
static __always_inline int arch_timer_shutdown(const int access,
struct clock_event_device *clk)
{
unsigned long ctrl;
ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
return 0;
}
static int arch_timer_shutdown_virt(struct clock_event_device *clk)
{
return arch_timer_shutdown(ARCH_TIMER_VIRT_ACCESS, clk);
}
static int arch_timer_shutdown_phys(struct clock_event_device *clk)
{
return arch_timer_shutdown(ARCH_TIMER_PHYS_ACCESS, clk);
}
static int arch_timer_shutdown_virt_mem(struct clock_event_device *clk)
{
return arch_timer_shutdown(ARCH_TIMER_MEM_VIRT_ACCESS, clk);
}
static int arch_timer_shutdown_phys_mem(struct clock_event_device *clk)
{
return arch_timer_shutdown(ARCH_TIMER_MEM_PHYS_ACCESS, clk);
}
static __always_inline void set_next_event(const int access, unsigned long evt,
struct clock_event_device *clk)
{
unsigned long ctrl;
u64 cnt;
ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
ctrl |= ARCH_TIMER_CTRL_ENABLE;
ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
if (access == ARCH_TIMER_PHYS_ACCESS)
cnt = __arch_counter_get_cntpct();
else
cnt = __arch_counter_get_cntvct();
arch_timer_reg_write(access, ARCH_TIMER_REG_CVAL, evt + cnt, clk);
arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
}
static int arch_timer_set_next_event_virt(unsigned long evt,
struct clock_event_device *clk)
{
set_next_event(ARCH_TIMER_VIRT_ACCESS, evt, clk);
return 0;
}
static int arch_timer_set_next_event_phys(unsigned long evt,
struct clock_event_device *clk)
{
set_next_event(ARCH_TIMER_PHYS_ACCESS, evt, clk);
return 0;
}
static noinstr u64 arch_counter_get_cnt_mem(struct arch_timer *t, int offset_lo)
{
u32 cnt_lo, cnt_hi, tmp_hi;
do {
cnt_hi = __le32_to_cpu((__le32 __force)__raw_readl(t->base + offset_lo + 4));
cnt_lo = __le32_to_cpu((__le32 __force)__raw_readl(t->base + offset_lo));
tmp_hi = __le32_to_cpu((__le32 __force)__raw_readl(t->base + offset_lo + 4));
} while (cnt_hi != tmp_hi);
return ((u64) cnt_hi << 32) | cnt_lo;
}
static __always_inline void set_next_event_mem(const int access, unsigned long evt,
struct clock_event_device *clk)
{
struct arch_timer *timer = to_arch_timer(clk);
unsigned long ctrl;
u64 cnt;
ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
/* Timer must be disabled before programming CVAL */
if (ctrl & ARCH_TIMER_CTRL_ENABLE) {
ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
}
ctrl |= ARCH_TIMER_CTRL_ENABLE;
ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
if (access == ARCH_TIMER_MEM_VIRT_ACCESS)
cnt = arch_counter_get_cnt_mem(timer, CNTVCT_LO);
else
cnt = arch_counter_get_cnt_mem(timer, CNTPCT_LO);
arch_timer_reg_write(access, ARCH_TIMER_REG_CVAL, evt + cnt, clk);
arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
}
static int arch_timer_set_next_event_virt_mem(unsigned long evt,
struct clock_event_device *clk)
{
set_next_event_mem(ARCH_TIMER_MEM_VIRT_ACCESS, evt, clk);
return 0;
}
static int arch_timer_set_next_event_phys_mem(unsigned long evt,
struct clock_event_device *clk)
{
set_next_event_mem(ARCH_TIMER_MEM_PHYS_ACCESS, evt, clk);
return 0;
}
static u64 __arch_timer_check_delta(void)
{
#ifdef CONFIG_ARM64
const struct midr_range broken_cval_midrs[] = {
/*
* XGene-1 implements CVAL in terms of TVAL, meaning
* that the maximum timer range is 32bit. Shame on them.
*
* Note that TVAL is signed, thus has only 31 of its
* 32 bits to express magnitude.
*/
MIDR_ALL_VERSIONS(MIDR_CPU_MODEL(ARM_CPU_IMP_APM,
APM_CPU_PART_POTENZA)),
{},
};
if (is_midr_in_range_list(read_cpuid_id(), broken_cval_midrs)) {
pr_warn_once("Broken CNTx_CVAL_EL1, using 31 bit TVAL instead.\n");
return CLOCKSOURCE_MASK(31);
}
#endif
return CLOCKSOURCE_MASK(arch_counter_get_width());
}
static void __arch_timer_setup(unsigned type,
struct clock_event_device *clk)
{
u64 max_delta;
clk->features = CLOCK_EVT_FEAT_ONESHOT;
if (type == ARCH_TIMER_TYPE_CP15) {
typeof(clk->set_next_event) sne;
arch_timer_check_ool_workaround(ate_match_local_cap_id, NULL);
if (arch_timer_c3stop)
clk->features |= CLOCK_EVT_FEAT_C3STOP;
clk->name = "arch_sys_timer";
clk->rating = 450;
clk->cpumask = cpumask_of(smp_processor_id());
clk->irq = arch_timer_ppi[arch_timer_uses_ppi];
switch (arch_timer_uses_ppi) {
case ARCH_TIMER_VIRT_PPI:
clk->set_state_shutdown = arch_timer_shutdown_virt;
clk->set_state_oneshot_stopped = arch_timer_shutdown_virt;
sne = erratum_handler(set_next_event_virt);
break;
case ARCH_TIMER_PHYS_SECURE_PPI:
case ARCH_TIMER_PHYS_NONSECURE_PPI:
case ARCH_TIMER_HYP_PPI:
clk->set_state_shutdown = arch_timer_shutdown_phys;
clk->set_state_oneshot_stopped = arch_timer_shutdown_phys;
sne = erratum_handler(set_next_event_phys);
break;
default:
BUG();
}
clk->set_next_event = sne;
max_delta = __arch_timer_check_delta();
} else {
clk->features |= CLOCK_EVT_FEAT_DYNIRQ;
clk->name = "arch_mem_timer";
clk->rating = 400;
clk->cpumask = cpu_possible_mask;
if (arch_timer_mem_use_virtual) {
clk->set_state_shutdown = arch_timer_shutdown_virt_mem;
clk->set_state_oneshot_stopped = arch_timer_shutdown_virt_mem;
clk->set_next_event =
arch_timer_set_next_event_virt_mem;
} else {
clk->set_state_shutdown = arch_timer_shutdown_phys_mem;
clk->set_state_oneshot_stopped = arch_timer_shutdown_phys_mem;
clk->set_next_event =
arch_timer_set_next_event_phys_mem;
}
max_delta = CLOCKSOURCE_MASK(56);
}
clk->set_state_shutdown(clk);
clockevents_config_and_register(clk, arch_timer_rate, 0xf, max_delta);
}
static void arch_timer_evtstrm_enable(unsigned int divider)
{
u32 cntkctl = arch_timer_get_cntkctl();
#ifdef CONFIG_ARM64
/* ECV is likely to require a large divider. Use the EVNTIS flag. */
if (cpus_have_const_cap(ARM64_HAS_ECV) && divider > 15) {
cntkctl |= ARCH_TIMER_EVT_INTERVAL_SCALE;
divider -= 8;
}
#endif
divider = min(divider, 15U);
cntkctl &= ~ARCH_TIMER_EVT_TRIGGER_MASK;
/* Set the divider and enable virtual event stream */
cntkctl |= (divider << ARCH_TIMER_EVT_TRIGGER_SHIFT)
| ARCH_TIMER_VIRT_EVT_EN;
arch_timer_set_cntkctl(cntkctl);
arch_timer_set_evtstrm_feature();
cpumask_set_cpu(smp_processor_id(), &evtstrm_available);
}
static void arch_timer_configure_evtstream(void)
{
int evt_stream_div, lsb;
/*
* As the event stream can at most be generated at half the frequency
* of the counter, use half the frequency when computing the divider.
*/
evt_stream_div = arch_timer_rate / ARCH_TIMER_EVT_STREAM_FREQ / 2;
/*
* Find the closest power of two to the divisor. If the adjacent bit
* of lsb (last set bit, starts from 0) is set, then we use (lsb + 1).
*/
lsb = fls(evt_stream_div) - 1;
if (lsb > 0 && (evt_stream_div & BIT(lsb - 1)))
lsb++;
/* enable event stream */
arch_timer_evtstrm_enable(max(0, lsb));
}
static void arch_counter_set_user_access(void)
{
u32 cntkctl = arch_timer_get_cntkctl();
/* Disable user access to the timers and both counters */
/* Also disable virtual event stream */
cntkctl &= ~(ARCH_TIMER_USR_PT_ACCESS_EN
| ARCH_TIMER_USR_VT_ACCESS_EN
| ARCH_TIMER_USR_VCT_ACCESS_EN
| ARCH_TIMER_VIRT_EVT_EN
| ARCH_TIMER_USR_PCT_ACCESS_EN);
/*
* Enable user access to the virtual counter if it doesn't
* need to be workaround. The vdso may have been already
* disabled though.
*/
if (arch_timer_this_cpu_has_cntvct_wa())
pr_info("CPU%d: Trapping CNTVCT access\n", smp_processor_id());
else
cntkctl |= ARCH_TIMER_USR_VCT_ACCESS_EN;
arch_timer_set_cntkctl(cntkctl);
}
static bool arch_timer_has_nonsecure_ppi(void)
{
return (arch_timer_uses_ppi == ARCH_TIMER_PHYS_SECURE_PPI &&
arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]);
}
static u32 check_ppi_trigger(int irq)
{
u32 flags = irq_get_trigger_type(irq);
if (flags != IRQF_TRIGGER_HIGH && flags != IRQF_TRIGGER_LOW) {
pr_warn("WARNING: Invalid trigger for IRQ%d, assuming level low\n", irq);
pr_warn("WARNING: Please fix your firmware\n");
flags = IRQF_TRIGGER_LOW;
}
return flags;
}
static int arch_timer_starting_cpu(unsigned int cpu)
{
struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
u32 flags;
__arch_timer_setup(ARCH_TIMER_TYPE_CP15, clk);
flags = check_ppi_trigger(arch_timer_ppi[arch_timer_uses_ppi]);
enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], flags);
if (arch_timer_has_nonsecure_ppi()) {
flags = check_ppi_trigger(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]);
enable_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI],
flags);
}
arch_counter_set_user_access();
if (evtstrm_enable)
arch_timer_configure_evtstream();
return 0;
}
static int validate_timer_rate(void)
{
if (!arch_timer_rate)
return -EINVAL;
/* Arch timer frequency < 1MHz can cause trouble */
WARN_ON(arch_timer_rate < 1000000);
return 0;
}
/*
* For historical reasons, when probing with DT we use whichever (non-zero)
* rate was probed first, and don't verify that others match. If the first node
* probed has a clock-frequency property, this overrides the HW register.
*/
static void __init arch_timer_of_configure_rate(u32 rate, struct device_node *np)
{
/* Who has more than one independent system counter? */
if (arch_timer_rate)
return;
if (of_property_read_u32(np, "clock-frequency", &arch_timer_rate))
arch_timer_rate = rate;
/* Check the timer frequency. */
if (validate_timer_rate())
pr_warn("frequency not available\n");
}
static void __init arch_timer_banner(unsigned type)
{
pr_info("%s%s%s timer(s) running at %lu.%02luMHz (%s%s%s).\n",
type & ARCH_TIMER_TYPE_CP15 ? "cp15" : "",
type == (ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM) ?
" and " : "",
type & ARCH_TIMER_TYPE_MEM ? "mmio" : "",
(unsigned long)arch_timer_rate / 1000000,
(unsigned long)(arch_timer_rate / 10000) % 100,
type & ARCH_TIMER_TYPE_CP15 ?
(arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) ? "virt" : "phys" :
"",
type == (ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM) ? "/" : "",
type & ARCH_TIMER_TYPE_MEM ?
arch_timer_mem_use_virtual ? "virt" : "phys" :
"");
}
u32 arch_timer_get_rate(void)
{
return arch_timer_rate;
}
bool arch_timer_evtstrm_available(void)
{
/*
* We might get called from a preemptible context. This is fine
* because availability of the event stream should be always the same
* for a preemptible context and context where we might resume a task.
*/
return cpumask_test_cpu(raw_smp_processor_id(), &evtstrm_available);
}
static noinstr u64 arch_counter_get_cntvct_mem(void)
{
return arch_counter_get_cnt_mem(arch_timer_mem, CNTVCT_LO);
}
static struct arch_timer_kvm_info arch_timer_kvm_info;
struct arch_timer_kvm_info *arch_timer_get_kvm_info(void)
{
return &arch_timer_kvm_info;
}
static void __init arch_counter_register(unsigned type)
{
u64 (*scr)(void);
u64 start_count;
int width;
/* Register the CP15 based counter if we have one */
if (type & ARCH_TIMER_TYPE_CP15) {
u64 (*rd)(void);
if ((IS_ENABLED(CONFIG_ARM64) && !is_hyp_mode_available()) ||
arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) {
if (arch_timer_counter_has_wa()) {
rd = arch_counter_get_cntvct_stable;
scr = raw_counter_get_cntvct_stable;
} else {
rd = arch_counter_get_cntvct;
scr = arch_counter_get_cntvct;
}
} else {
if (arch_timer_counter_has_wa()) {
rd = arch_counter_get_cntpct_stable;
scr = raw_counter_get_cntpct_stable;
} else {
rd = arch_counter_get_cntpct;
scr = arch_counter_get_cntpct;
}
}
arch_timer_read_counter = rd;
clocksource_counter.vdso_clock_mode = vdso_default;
} else {
arch_timer_read_counter = arch_counter_get_cntvct_mem;
scr = arch_counter_get_cntvct_mem;
}
width = arch_counter_get_width();
clocksource_counter.mask = CLOCKSOURCE_MASK(width);
cyclecounter.mask = CLOCKSOURCE_MASK(width);
if (!arch_counter_suspend_stop)
clocksource_counter.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
start_count = arch_timer_read_counter();
clocksource_register_hz(&clocksource_counter, arch_timer_rate);
cyclecounter.mult = clocksource_counter.mult;
cyclecounter.shift = clocksource_counter.shift;
timecounter_init(&arch_timer_kvm_info.timecounter,
&cyclecounter, start_count);
sched_clock_register(scr, width, arch_timer_rate);
}
static void arch_timer_stop(struct clock_event_device *clk)
{
pr_debug("disable IRQ%d cpu #%d\n", clk->irq, smp_processor_id());
disable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi]);
if (arch_timer_has_nonsecure_ppi())
disable_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]);
clk->set_state_shutdown(clk);
}
static int arch_timer_dying_cpu(unsigned int cpu)
{
struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
cpumask_clear_cpu(smp_processor_id(), &evtstrm_available);
arch_timer_stop(clk);
return 0;
}
#ifdef CONFIG_CPU_PM
static DEFINE_PER_CPU(unsigned long, saved_cntkctl);
static int arch_timer_cpu_pm_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
if (action == CPU_PM_ENTER) {
__this_cpu_write(saved_cntkctl, arch_timer_get_cntkctl());
cpumask_clear_cpu(smp_processor_id(), &evtstrm_available);
} else if (action == CPU_PM_ENTER_FAILED || action == CPU_PM_EXIT) {
arch_timer_set_cntkctl(__this_cpu_read(saved_cntkctl));
if (arch_timer_have_evtstrm_feature())
cpumask_set_cpu(smp_processor_id(), &evtstrm_available);
}
return NOTIFY_OK;
}
static struct notifier_block arch_timer_cpu_pm_notifier = {
.notifier_call = arch_timer_cpu_pm_notify,
};
static int __init arch_timer_cpu_pm_init(void)
{
return cpu_pm_register_notifier(&arch_timer_cpu_pm_notifier);
}
static void __init arch_timer_cpu_pm_deinit(void)
{
WARN_ON(cpu_pm_unregister_notifier(&arch_timer_cpu_pm_notifier));
}
#else
static int __init arch_timer_cpu_pm_init(void)
{
return 0;
}
static void __init arch_timer_cpu_pm_deinit(void)
{
}
#endif
static int __init arch_timer_register(void)
{
int err;
int ppi;
arch_timer_evt = alloc_percpu(struct clock_event_device);
if (!arch_timer_evt) {
err = -ENOMEM;
goto out;
}
ppi = arch_timer_ppi[arch_timer_uses_ppi];
switch (arch_timer_uses_ppi) {
case ARCH_TIMER_VIRT_PPI:
err = request_percpu_irq(ppi, arch_timer_handler_virt,
"arch_timer", arch_timer_evt);
break;
case ARCH_TIMER_PHYS_SECURE_PPI:
case ARCH_TIMER_PHYS_NONSECURE_PPI:
err = request_percpu_irq(ppi, arch_timer_handler_phys,
"arch_timer", arch_timer_evt);
if (!err && arch_timer_has_nonsecure_ppi()) {
ppi = arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI];
err = request_percpu_irq(ppi, arch_timer_handler_phys,
"arch_timer", arch_timer_evt);
if (err)
free_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_SECURE_PPI],
arch_timer_evt);
}
break;
case ARCH_TIMER_HYP_PPI:
err = request_percpu_irq(ppi, arch_timer_handler_phys,
"arch_timer", arch_timer_evt);
break;
default:
BUG();
}
if (err) {
pr_err("can't register interrupt %d (%d)\n", ppi, err);
goto out_free;
}
err = arch_timer_cpu_pm_init();
if (err)
goto out_unreg_notify;
/* Register and immediately configure the timer on the boot CPU */
err = cpuhp_setup_state(CPUHP_AP_ARM_ARCH_TIMER_STARTING,
"clockevents/arm/arch_timer:starting",
arch_timer_starting_cpu, arch_timer_dying_cpu);
if (err)
goto out_unreg_cpupm;
return 0;
out_unreg_cpupm:
arch_timer_cpu_pm_deinit();
out_unreg_notify:
free_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], arch_timer_evt);
if (arch_timer_has_nonsecure_ppi())
free_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI],
arch_timer_evt);
out_free:
free_percpu(arch_timer_evt);
out:
return err;
}
static int __init arch_timer_mem_register(void __iomem *base, unsigned int irq)
{
int ret;
irq_handler_t func;
arch_timer_mem = kzalloc(sizeof(*arch_timer_mem), GFP_KERNEL);
if (!arch_timer_mem)
return -ENOMEM;
arch_timer_mem->base = base;
arch_timer_mem->evt.irq = irq;
__arch_timer_setup(ARCH_TIMER_TYPE_MEM, &arch_timer_mem->evt);
if (arch_timer_mem_use_virtual)
func = arch_timer_handler_virt_mem;
else
func = arch_timer_handler_phys_mem;
ret = request_irq(irq, func, IRQF_TIMER, "arch_mem_timer", &arch_timer_mem->evt);
if (ret) {
pr_err("Failed to request mem timer irq\n");
kfree(arch_timer_mem);
arch_timer_mem = NULL;
}
return ret;
}
static const struct of_device_id arch_timer_of_match[] __initconst = {
{ .compatible = "arm,armv7-timer", },
{ .compatible = "arm,armv8-timer", },
{},
};
static const struct of_device_id arch_timer_mem_of_match[] __initconst = {
{ .compatible = "arm,armv7-timer-mem", },
{},
};
static bool __init arch_timer_needs_of_probing(void)
{
struct device_node *dn;
bool needs_probing = false;
unsigned int mask = ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM;
/* We have two timers, and both device-tree nodes are probed. */
if ((arch_timers_present & mask) == mask)
return false;
/*
* Only one type of timer is probed,
* check if we have another type of timer node in device-tree.
*/
if (arch_timers_present & ARCH_TIMER_TYPE_CP15)
dn = of_find_matching_node(NULL, arch_timer_mem_of_match);
else
dn = of_find_matching_node(NULL, arch_timer_of_match);
if (dn && of_device_is_available(dn))
needs_probing = true;
of_node_put(dn);
return needs_probing;
}
static int __init arch_timer_common_init(void)
{
arch_timer_banner(arch_timers_present);
arch_counter_register(arch_timers_present);
return arch_timer_arch_init();
}
/**
* arch_timer_select_ppi() - Select suitable PPI for the current system.
*
* If HYP mode is available, we know that the physical timer
* has been configured to be accessible from PL1. Use it, so
* that a guest can use the virtual timer instead.
*
* On ARMv8.1 with VH extensions, the kernel runs in HYP. VHE
* accesses to CNTP_*_EL1 registers are silently redirected to
* their CNTHP_*_EL2 counterparts, and use a different PPI
* number.
*
* If no interrupt provided for virtual timer, we'll have to
* stick to the physical timer. It'd better be accessible...
* For arm64 we never use the secure interrupt.
*
* Return: a suitable PPI type for the current system.
*/
static enum arch_timer_ppi_nr __init arch_timer_select_ppi(void)
{
if (is_kernel_in_hyp_mode())
return ARCH_TIMER_HYP_PPI;
if (!is_hyp_mode_available() && arch_timer_ppi[ARCH_TIMER_VIRT_PPI])
return ARCH_TIMER_VIRT_PPI;
if (IS_ENABLED(CONFIG_ARM64))
return ARCH_TIMER_PHYS_NONSECURE_PPI;
return ARCH_TIMER_PHYS_SECURE_PPI;
}
static void __init arch_timer_populate_kvm_info(void)
{
arch_timer_kvm_info.virtual_irq = arch_timer_ppi[ARCH_TIMER_VIRT_PPI];
if (is_kernel_in_hyp_mode())
arch_timer_kvm_info.physical_irq = arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI];
}
static int __init arch_timer_of_init(struct device_node *np)
{
int i, irq, ret;
u32 rate;
bool has_names;
if (arch_timers_present & ARCH_TIMER_TYPE_CP15) {
pr_warn("multiple nodes in dt, skipping\n");
return 0;
}
arch_timers_present |= ARCH_TIMER_TYPE_CP15;
has_names = of_property_read_bool(np, "interrupt-names");
for (i = ARCH_TIMER_PHYS_SECURE_PPI; i < ARCH_TIMER_MAX_TIMER_PPI; i++) {
if (has_names)
irq = of_irq_get_byname(np, arch_timer_ppi_names[i]);
else
irq = of_irq_get(np, i);
if (irq > 0)
arch_timer_ppi[i] = irq;
}
arch_timer_populate_kvm_info();
rate = arch_timer_get_cntfrq();
arch_timer_of_configure_rate(rate, np);
arch_timer_c3stop = !of_property_read_bool(np, "always-on");
/* Check for globally applicable workarounds */
arch_timer_check_ool_workaround(ate_match_dt, np);
/*
* If we cannot rely on firmware initializing the timer registers then
* we should use the physical timers instead.
*/
if (IS_ENABLED(CONFIG_ARM) &&
of_property_read_bool(np, "arm,cpu-registers-not-fw-configured"))
arch_timer_uses_ppi = ARCH_TIMER_PHYS_SECURE_PPI;
else
arch_timer_uses_ppi = arch_timer_select_ppi();
if (!arch_timer_ppi[arch_timer_uses_ppi]) {
pr_err("No interrupt available, giving up\n");
return -EINVAL;
}
/* On some systems, the counter stops ticking when in suspend. */
arch_counter_suspend_stop = of_property_read_bool(np,
"arm,no-tick-in-suspend");
ret = arch_timer_register();
if (ret)
return ret;
if (arch_timer_needs_of_probing())
return 0;
return arch_timer_common_init();
}
TIMER_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_of_init);
TIMER_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_of_init);
static u32 __init
arch_timer_mem_frame_get_cntfrq(struct arch_timer_mem_frame *frame)
{
void __iomem *base;
u32 rate;
base = ioremap(frame->cntbase, frame->size);
if (!base) {
pr_err("Unable to map frame @ %pa\n", &frame->cntbase);
return 0;
}
rate = readl_relaxed(base + CNTFRQ);
iounmap(base);
return rate;
}
static struct arch_timer_mem_frame * __init
arch_timer_mem_find_best_frame(struct arch_timer_mem *timer_mem)
{
struct arch_timer_mem_frame *frame, *best_frame = NULL;
void __iomem *cntctlbase;
u32 cnttidr;
int i;
cntctlbase = ioremap(timer_mem->cntctlbase, timer_mem->size);
if (!cntctlbase) {
pr_err("Can't map CNTCTLBase @ %pa\n",
&timer_mem->cntctlbase);
return NULL;
}
cnttidr = readl_relaxed(cntctlbase + CNTTIDR);
/*
* Try to find a virtual capable frame. Otherwise fall back to a
* physical capable frame.
*/
for (i = 0; i < ARCH_TIMER_MEM_MAX_FRAMES; i++) {
u32 cntacr = CNTACR_RFRQ | CNTACR_RWPT | CNTACR_RPCT |
CNTACR_RWVT | CNTACR_RVOFF | CNTACR_RVCT;
frame = &timer_mem->frame[i];
if (!frame->valid)
continue;
/* Try enabling everything, and see what sticks */
writel_relaxed(cntacr, cntctlbase + CNTACR(i));
cntacr = readl_relaxed(cntctlbase + CNTACR(i));
if ((cnttidr & CNTTIDR_VIRT(i)) &&
!(~cntacr & (CNTACR_RWVT | CNTACR_RVCT))) {
best_frame = frame;
arch_timer_mem_use_virtual = true;
break;
}
if (~cntacr & (CNTACR_RWPT | CNTACR_RPCT))
continue;
best_frame = frame;
}
iounmap(cntctlbase);
return best_frame;
}
static int __init
arch_timer_mem_frame_register(struct arch_timer_mem_frame *frame)
{
void __iomem *base;
int ret, irq = 0;
if (arch_timer_mem_use_virtual)
irq = frame->virt_irq;
else
irq = frame->phys_irq;
if (!irq) {
pr_err("Frame missing %s irq.\n",
arch_timer_mem_use_virtual ? "virt" : "phys");
return -EINVAL;
}
if (!request_mem_region(frame->cntbase, frame->size,
"arch_mem_timer"))
return -EBUSY;
base = ioremap(frame->cntbase, frame->size);
if (!base) {
pr_err("Can't map frame's registers\n");
return -ENXIO;
}
ret = arch_timer_mem_register(base, irq);
if (ret) {
iounmap(base);
return ret;
}
arch_timers_present |= ARCH_TIMER_TYPE_MEM;
return 0;
}
static int __init arch_timer_mem_of_init(struct device_node *np)
{
struct arch_timer_mem *timer_mem;
struct arch_timer_mem_frame *frame;
struct device_node *frame_node;
struct resource res;
int ret = -EINVAL;
u32 rate;
timer_mem = kzalloc(sizeof(*timer_mem), GFP_KERNEL);
if (!timer_mem)
return -ENOMEM;
if (of_address_to_resource(np, 0, &res))
goto out;
timer_mem->cntctlbase = res.start;
timer_mem->size = resource_size(&res);
for_each_available_child_of_node(np, frame_node) {
u32 n;
struct arch_timer_mem_frame *frame;
if (of_property_read_u32(frame_node, "frame-number", &n)) {
pr_err(FW_BUG "Missing frame-number.\n");
of_node_put(frame_node);
goto out;
}
if (n >= ARCH_TIMER_MEM_MAX_FRAMES) {
pr_err(FW_BUG "Wrong frame-number, only 0-%u are permitted.\n",
ARCH_TIMER_MEM_MAX_FRAMES - 1);
of_node_put(frame_node);
goto out;
}
frame = &timer_mem->frame[n];
if (frame->valid) {
pr_err(FW_BUG "Duplicated frame-number.\n");
of_node_put(frame_node);
goto out;
}
if (of_address_to_resource(frame_node, 0, &res)) {
of_node_put(frame_node);
goto out;
}
frame->cntbase = res.start;
frame->size = resource_size(&res);
frame->virt_irq = irq_of_parse_and_map(frame_node,
ARCH_TIMER_VIRT_SPI);
frame->phys_irq = irq_of_parse_and_map(frame_node,
ARCH_TIMER_PHYS_SPI);
frame->valid = true;
}
frame = arch_timer_mem_find_best_frame(timer_mem);
if (!frame) {
pr_err("Unable to find a suitable frame in timer @ %pa\n",
&timer_mem->cntctlbase);
ret = -EINVAL;
goto out;
}
rate = arch_timer_mem_frame_get_cntfrq(frame);
arch_timer_of_configure_rate(rate, np);
ret = arch_timer_mem_frame_register(frame);
if (!ret && !arch_timer_needs_of_probing())
ret = arch_timer_common_init();
out:
kfree(timer_mem);
return ret;
}
TIMER_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem",
arch_timer_mem_of_init);
#ifdef CONFIG_ACPI_GTDT
static int __init
arch_timer_mem_verify_cntfrq(struct arch_timer_mem *timer_mem)
{
struct arch_timer_mem_frame *frame;
u32 rate;
int i;
for (i = 0; i < ARCH_TIMER_MEM_MAX_FRAMES; i++) {
frame = &timer_mem->frame[i];
if (!frame->valid)
continue;
rate = arch_timer_mem_frame_get_cntfrq(frame);
if (rate == arch_timer_rate)
continue;
pr_err(FW_BUG "CNTFRQ mismatch: frame @ %pa: (0x%08lx), CPU: (0x%08lx)\n",
&frame->cntbase,
(unsigned long)rate, (unsigned long)arch_timer_rate);
return -EINVAL;
}
return 0;
}
static int __init arch_timer_mem_acpi_init(int platform_timer_count)
{
struct arch_timer_mem *timers, *timer;
struct arch_timer_mem_frame *frame, *best_frame = NULL;
int timer_count, i, ret = 0;
timers = kcalloc(platform_timer_count, sizeof(*timers),
GFP_KERNEL);
if (!timers)
return -ENOMEM;
ret = acpi_arch_timer_mem_init(timers, &timer_count);
if (ret || !timer_count)
goto out;
/*
* While unlikely, it's theoretically possible that none of the frames
* in a timer expose the combination of feature we want.
*/
for (i = 0; i < timer_count; i++) {
timer = &timers[i];
frame = arch_timer_mem_find_best_frame(timer);
if (!best_frame)
best_frame = frame;
ret = arch_timer_mem_verify_cntfrq(timer);
if (ret) {
pr_err("Disabling MMIO timers due to CNTFRQ mismatch\n");
goto out;
}
if (!best_frame) /* implies !frame */
/*
* Only complain about missing suitable frames if we
* haven't already found one in a previous iteration.
*/
pr_err("Unable to find a suitable frame in timer @ %pa\n",
&timer->cntctlbase);
}
if (best_frame)
ret = arch_timer_mem_frame_register(best_frame);
out:
kfree(timers);
return ret;
}
/* Initialize per-processor generic timer and memory-mapped timer(if present) */
static int __init arch_timer_acpi_init(struct acpi_table_header *table)
{
int ret, platform_timer_count;
if (arch_timers_present & ARCH_TIMER_TYPE_CP15) {
pr_warn("already initialized, skipping\n");
return -EINVAL;
}
arch_timers_present |= ARCH_TIMER_TYPE_CP15;
ret = acpi_gtdt_init(table, &platform_timer_count);
if (ret)
return ret;
arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI] =
acpi_gtdt_map_ppi(ARCH_TIMER_PHYS_NONSECURE_PPI);
arch_timer_ppi[ARCH_TIMER_VIRT_PPI] =
acpi_gtdt_map_ppi(ARCH_TIMER_VIRT_PPI);
arch_timer_ppi[ARCH_TIMER_HYP_PPI] =
acpi_gtdt_map_ppi(ARCH_TIMER_HYP_PPI);
arch_timer_populate_kvm_info();
/*
* When probing via ACPI, we have no mechanism to override the sysreg
* CNTFRQ value. This *must* be correct.
*/
arch_timer_rate = arch_timer_get_cntfrq();
ret = validate_timer_rate();
if (ret) {
pr_err(FW_BUG "frequency not available.\n");
return ret;
}
arch_timer_uses_ppi = arch_timer_select_ppi();
if (!arch_timer_ppi[arch_timer_uses_ppi]) {
pr_err("No interrupt available, giving up\n");
return -EINVAL;
}
/* Always-on capability */
arch_timer_c3stop = acpi_gtdt_c3stop(arch_timer_uses_ppi);
/* Check for globally applicable workarounds */
arch_timer_check_ool_workaround(ate_match_acpi_oem_info, table);
ret = arch_timer_register();
if (ret)
return ret;
if (platform_timer_count &&
arch_timer_mem_acpi_init(platform_timer_count))
pr_err("Failed to initialize memory-mapped timer.\n");
return arch_timer_common_init();
}
TIMER_ACPI_DECLARE(arch_timer, ACPI_SIG_GTDT, arch_timer_acpi_init);
#endif
int kvm_arch_ptp_get_crosststamp(u64 *cycle, struct timespec64 *ts,
struct clocksource **cs)
{
struct arm_smccc_res hvc_res;
u32 ptp_counter;
ktime_t ktime;
if (!IS_ENABLED(CONFIG_HAVE_ARM_SMCCC_DISCOVERY))
return -EOPNOTSUPP;
if (arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI)
ptp_counter = KVM_PTP_VIRT_COUNTER;
else
ptp_counter = KVM_PTP_PHYS_COUNTER;
arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID,
ptp_counter, &hvc_res);
if ((int)(hvc_res.a0) < 0)
return -EOPNOTSUPP;
ktime = (u64)hvc_res.a0 << 32 | hvc_res.a1;
*ts = ktime_to_timespec64(ktime);
if (cycle)
*cycle = (u64)hvc_res.a2 << 32 | hvc_res.a3;
if (cs)
*cs = &clocksource_counter;
return 0;
}
EXPORT_SYMBOL_GPL(kvm_arch_ptp_get_crosststamp);
|
linux-master
|
drivers/clocksource/arm_arch_timer.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Emma Mobile Timer Support - STI
*
* Copyright (C) 2012 Magnus Damm
*/
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/irq.h>
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <linux/slab.h>
#include <linux/module.h>
enum { USER_CLOCKSOURCE, USER_CLOCKEVENT, USER_NR };
struct em_sti_priv {
void __iomem *base;
struct clk *clk;
struct platform_device *pdev;
unsigned int active[USER_NR];
unsigned long rate;
raw_spinlock_t lock;
struct clock_event_device ced;
struct clocksource cs;
};
#define STI_CONTROL 0x00
#define STI_COMPA_H 0x10
#define STI_COMPA_L 0x14
#define STI_COMPB_H 0x18
#define STI_COMPB_L 0x1c
#define STI_COUNT_H 0x20
#define STI_COUNT_L 0x24
#define STI_COUNT_RAW_H 0x28
#define STI_COUNT_RAW_L 0x2c
#define STI_SET_H 0x30
#define STI_SET_L 0x34
#define STI_INTSTATUS 0x40
#define STI_INTRAWSTATUS 0x44
#define STI_INTENSET 0x48
#define STI_INTENCLR 0x4c
#define STI_INTFFCLR 0x50
static inline unsigned long em_sti_read(struct em_sti_priv *p, int offs)
{
return ioread32(p->base + offs);
}
static inline void em_sti_write(struct em_sti_priv *p, int offs,
unsigned long value)
{
iowrite32(value, p->base + offs);
}
static int em_sti_enable(struct em_sti_priv *p)
{
int ret;
/* enable clock */
ret = clk_enable(p->clk);
if (ret) {
dev_err(&p->pdev->dev, "cannot enable clock\n");
return ret;
}
/* reset the counter */
em_sti_write(p, STI_SET_H, 0x40000000);
em_sti_write(p, STI_SET_L, 0x00000000);
/* mask and clear pending interrupts */
em_sti_write(p, STI_INTENCLR, 3);
em_sti_write(p, STI_INTFFCLR, 3);
/* enable updates of counter registers */
em_sti_write(p, STI_CONTROL, 1);
return 0;
}
static void em_sti_disable(struct em_sti_priv *p)
{
/* mask interrupts */
em_sti_write(p, STI_INTENCLR, 3);
/* stop clock */
clk_disable(p->clk);
}
static u64 em_sti_count(struct em_sti_priv *p)
{
u64 ticks;
unsigned long flags;
/* the STI hardware buffers the 48-bit count, but to
* break it out into two 32-bit access the registers
* must be accessed in a certain order.
* Always read STI_COUNT_H before STI_COUNT_L.
*/
raw_spin_lock_irqsave(&p->lock, flags);
ticks = (u64)(em_sti_read(p, STI_COUNT_H) & 0xffff) << 32;
ticks |= em_sti_read(p, STI_COUNT_L);
raw_spin_unlock_irqrestore(&p->lock, flags);
return ticks;
}
static u64 em_sti_set_next(struct em_sti_priv *p, u64 next)
{
unsigned long flags;
raw_spin_lock_irqsave(&p->lock, flags);
/* mask compare A interrupt */
em_sti_write(p, STI_INTENCLR, 1);
/* update compare A value */
em_sti_write(p, STI_COMPA_H, next >> 32);
em_sti_write(p, STI_COMPA_L, next & 0xffffffff);
/* clear compare A interrupt source */
em_sti_write(p, STI_INTFFCLR, 1);
/* unmask compare A interrupt */
em_sti_write(p, STI_INTENSET, 1);
raw_spin_unlock_irqrestore(&p->lock, flags);
return next;
}
static irqreturn_t em_sti_interrupt(int irq, void *dev_id)
{
struct em_sti_priv *p = dev_id;
p->ced.event_handler(&p->ced);
return IRQ_HANDLED;
}
static int em_sti_start(struct em_sti_priv *p, unsigned int user)
{
unsigned long flags;
int used_before;
int ret = 0;
raw_spin_lock_irqsave(&p->lock, flags);
used_before = p->active[USER_CLOCKSOURCE] | p->active[USER_CLOCKEVENT];
if (!used_before)
ret = em_sti_enable(p);
if (!ret)
p->active[user] = 1;
raw_spin_unlock_irqrestore(&p->lock, flags);
return ret;
}
static void em_sti_stop(struct em_sti_priv *p, unsigned int user)
{
unsigned long flags;
int used_before, used_after;
raw_spin_lock_irqsave(&p->lock, flags);
used_before = p->active[USER_CLOCKSOURCE] | p->active[USER_CLOCKEVENT];
p->active[user] = 0;
used_after = p->active[USER_CLOCKSOURCE] | p->active[USER_CLOCKEVENT];
if (used_before && !used_after)
em_sti_disable(p);
raw_spin_unlock_irqrestore(&p->lock, flags);
}
static struct em_sti_priv *cs_to_em_sti(struct clocksource *cs)
{
return container_of(cs, struct em_sti_priv, cs);
}
static u64 em_sti_clocksource_read(struct clocksource *cs)
{
return em_sti_count(cs_to_em_sti(cs));
}
static int em_sti_clocksource_enable(struct clocksource *cs)
{
struct em_sti_priv *p = cs_to_em_sti(cs);
return em_sti_start(p, USER_CLOCKSOURCE);
}
static void em_sti_clocksource_disable(struct clocksource *cs)
{
em_sti_stop(cs_to_em_sti(cs), USER_CLOCKSOURCE);
}
static void em_sti_clocksource_resume(struct clocksource *cs)
{
em_sti_clocksource_enable(cs);
}
static int em_sti_register_clocksource(struct em_sti_priv *p)
{
struct clocksource *cs = &p->cs;
cs->name = dev_name(&p->pdev->dev);
cs->rating = 200;
cs->read = em_sti_clocksource_read;
cs->enable = em_sti_clocksource_enable;
cs->disable = em_sti_clocksource_disable;
cs->suspend = em_sti_clocksource_disable;
cs->resume = em_sti_clocksource_resume;
cs->mask = CLOCKSOURCE_MASK(48);
cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
dev_info(&p->pdev->dev, "used as clock source\n");
clocksource_register_hz(cs, p->rate);
return 0;
}
static struct em_sti_priv *ced_to_em_sti(struct clock_event_device *ced)
{
return container_of(ced, struct em_sti_priv, ced);
}
static int em_sti_clock_event_shutdown(struct clock_event_device *ced)
{
struct em_sti_priv *p = ced_to_em_sti(ced);
em_sti_stop(p, USER_CLOCKEVENT);
return 0;
}
static int em_sti_clock_event_set_oneshot(struct clock_event_device *ced)
{
struct em_sti_priv *p = ced_to_em_sti(ced);
dev_info(&p->pdev->dev, "used for oneshot clock events\n");
em_sti_start(p, USER_CLOCKEVENT);
return 0;
}
static int em_sti_clock_event_next(unsigned long delta,
struct clock_event_device *ced)
{
struct em_sti_priv *p = ced_to_em_sti(ced);
u64 next;
int safe;
next = em_sti_set_next(p, em_sti_count(p) + delta);
safe = em_sti_count(p) < (next - 1);
return !safe;
}
static void em_sti_register_clockevent(struct em_sti_priv *p)
{
struct clock_event_device *ced = &p->ced;
ced->name = dev_name(&p->pdev->dev);
ced->features = CLOCK_EVT_FEAT_ONESHOT;
ced->rating = 200;
ced->cpumask = cpu_possible_mask;
ced->set_next_event = em_sti_clock_event_next;
ced->set_state_shutdown = em_sti_clock_event_shutdown;
ced->set_state_oneshot = em_sti_clock_event_set_oneshot;
dev_info(&p->pdev->dev, "used for clock events\n");
clockevents_config_and_register(ced, p->rate, 2, 0xffffffff);
}
static int em_sti_probe(struct platform_device *pdev)
{
struct em_sti_priv *p;
int irq, ret;
p = devm_kzalloc(&pdev->dev, sizeof(*p), GFP_KERNEL);
if (p == NULL)
return -ENOMEM;
p->pdev = pdev;
platform_set_drvdata(pdev, p);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
/* map memory, let base point to the STI instance */
p->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(p->base))
return PTR_ERR(p->base);
ret = devm_request_irq(&pdev->dev, irq, em_sti_interrupt,
IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
dev_name(&pdev->dev), p);
if (ret) {
dev_err(&pdev->dev, "failed to request low IRQ\n");
return ret;
}
/* get hold of clock */
p->clk = devm_clk_get(&pdev->dev, "sclk");
if (IS_ERR(p->clk)) {
dev_err(&pdev->dev, "cannot get clock\n");
return PTR_ERR(p->clk);
}
ret = clk_prepare(p->clk);
if (ret < 0) {
dev_err(&pdev->dev, "cannot prepare clock\n");
return ret;
}
ret = clk_enable(p->clk);
if (ret < 0) {
dev_err(&p->pdev->dev, "cannot enable clock\n");
clk_unprepare(p->clk);
return ret;
}
p->rate = clk_get_rate(p->clk);
clk_disable(p->clk);
raw_spin_lock_init(&p->lock);
em_sti_register_clockevent(p);
em_sti_register_clocksource(p);
return 0;
}
static const struct of_device_id em_sti_dt_ids[] = {
{ .compatible = "renesas,em-sti", },
{},
};
MODULE_DEVICE_TABLE(of, em_sti_dt_ids);
static struct platform_driver em_sti_device_driver = {
.probe = em_sti_probe,
.driver = {
.name = "em_sti",
.of_match_table = em_sti_dt_ids,
.suppress_bind_attrs = true,
}
};
static int __init em_sti_init(void)
{
return platform_driver_register(&em_sti_device_driver);
}
static void __exit em_sti_exit(void)
{
platform_driver_unregister(&em_sti_device_driver);
}
subsys_initcall(em_sti_init);
module_exit(em_sti_exit);
MODULE_AUTHOR("Magnus Damm");
MODULE_DESCRIPTION("Renesas Emma Mobile STI Timer Driver");
|
linux-master
|
drivers/clocksource/em_sti.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
*/
#include <linux/acpi.h>
#include <linux/init.h>
#include <linux/of.h>
#include <linux/clocksource.h>
extern struct of_device_id __timer_of_table[];
static const struct of_device_id __timer_of_table_sentinel
__used __section("__timer_of_table_end");
void __init timer_probe(void)
{
struct device_node *np;
const struct of_device_id *match;
of_init_fn_1_ret init_func_ret;
unsigned timers = 0;
int ret;
for_each_matching_node_and_match(np, __timer_of_table, &match) {
if (!of_device_is_available(np))
continue;
init_func_ret = match->data;
ret = init_func_ret(np);
if (ret) {
if (ret != -EPROBE_DEFER)
pr_err("Failed to initialize '%pOF': %d\n", np,
ret);
continue;
}
timers++;
}
timers += acpi_probe_device_table(timer);
if (!timers)
pr_crit("%s: no matching timers found\n", __func__);
}
|
linux-master
|
drivers/clocksource/timer-probe.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* (C) Copyright 2009 Intel Corporation
* Author: Jacob Pan ([email protected])
*
* Shared with ARM platforms, Jamie Iles, Picochip 2011
*
* Support for the Synopsys DesignWare APB Timers.
*/
#include <linux/dw_apb_timer.h>
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/slab.h>
#define APBT_MIN_PERIOD 4
#define APBT_MIN_DELTA_USEC 200
#define APBTMR_N_LOAD_COUNT 0x00
#define APBTMR_N_CURRENT_VALUE 0x04
#define APBTMR_N_CONTROL 0x08
#define APBTMR_N_EOI 0x0c
#define APBTMR_N_INT_STATUS 0x10
#define APBTMRS_INT_STATUS 0xa0
#define APBTMRS_EOI 0xa4
#define APBTMRS_RAW_INT_STATUS 0xa8
#define APBTMRS_COMP_VERSION 0xac
#define APBTMR_CONTROL_ENABLE (1 << 0)
/* 1: periodic, 0:free running. */
#define APBTMR_CONTROL_MODE_PERIODIC (1 << 1)
#define APBTMR_CONTROL_INT (1 << 2)
static inline struct dw_apb_clock_event_device *
ced_to_dw_apb_ced(struct clock_event_device *evt)
{
return container_of(evt, struct dw_apb_clock_event_device, ced);
}
static inline struct dw_apb_clocksource *
clocksource_to_dw_apb_clocksource(struct clocksource *cs)
{
return container_of(cs, struct dw_apb_clocksource, cs);
}
static inline u32 apbt_readl(struct dw_apb_timer *timer, unsigned long offs)
{
return readl(timer->base + offs);
}
static inline void apbt_writel(struct dw_apb_timer *timer, u32 val,
unsigned long offs)
{
writel(val, timer->base + offs);
}
static inline u32 apbt_readl_relaxed(struct dw_apb_timer *timer, unsigned long offs)
{
return readl_relaxed(timer->base + offs);
}
static inline void apbt_writel_relaxed(struct dw_apb_timer *timer, u32 val,
unsigned long offs)
{
writel_relaxed(val, timer->base + offs);
}
static void apbt_disable_int(struct dw_apb_timer *timer)
{
u32 ctrl = apbt_readl(timer, APBTMR_N_CONTROL);
ctrl |= APBTMR_CONTROL_INT;
apbt_writel(timer, ctrl, APBTMR_N_CONTROL);
}
/**
* dw_apb_clockevent_pause() - stop the clock_event_device from running
*
* @dw_ced: The APB clock to stop generating events.
*/
void dw_apb_clockevent_pause(struct dw_apb_clock_event_device *dw_ced)
{
disable_irq(dw_ced->timer.irq);
apbt_disable_int(&dw_ced->timer);
}
static void apbt_eoi(struct dw_apb_timer *timer)
{
apbt_readl_relaxed(timer, APBTMR_N_EOI);
}
static irqreturn_t dw_apb_clockevent_irq(int irq, void *data)
{
struct clock_event_device *evt = data;
struct dw_apb_clock_event_device *dw_ced = ced_to_dw_apb_ced(evt);
if (!evt->event_handler) {
pr_info("Spurious APBT timer interrupt %d\n", irq);
return IRQ_NONE;
}
if (dw_ced->eoi)
dw_ced->eoi(&dw_ced->timer);
evt->event_handler(evt);
return IRQ_HANDLED;
}
static void apbt_enable_int(struct dw_apb_timer *timer)
{
u32 ctrl = apbt_readl(timer, APBTMR_N_CONTROL);
/* clear pending intr */
apbt_readl(timer, APBTMR_N_EOI);
ctrl &= ~APBTMR_CONTROL_INT;
apbt_writel(timer, ctrl, APBTMR_N_CONTROL);
}
static int apbt_shutdown(struct clock_event_device *evt)
{
struct dw_apb_clock_event_device *dw_ced = ced_to_dw_apb_ced(evt);
u32 ctrl;
pr_debug("%s CPU %d state=shutdown\n", __func__,
cpumask_first(evt->cpumask));
ctrl = apbt_readl(&dw_ced->timer, APBTMR_N_CONTROL);
ctrl &= ~APBTMR_CONTROL_ENABLE;
apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL);
return 0;
}
static int apbt_set_oneshot(struct clock_event_device *evt)
{
struct dw_apb_clock_event_device *dw_ced = ced_to_dw_apb_ced(evt);
u32 ctrl;
pr_debug("%s CPU %d state=oneshot\n", __func__,
cpumask_first(evt->cpumask));
ctrl = apbt_readl(&dw_ced->timer, APBTMR_N_CONTROL);
/*
* set free running mode, this mode will let timer reload max
* timeout which will give time (3min on 25MHz clock) to rearm
* the next event, therefore emulate the one-shot mode.
*/
ctrl &= ~APBTMR_CONTROL_ENABLE;
ctrl &= ~APBTMR_CONTROL_MODE_PERIODIC;
apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL);
/* write again to set free running mode */
apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL);
/*
* DW APB p. 46, load counter with all 1s before starting free
* running mode.
*/
apbt_writel(&dw_ced->timer, ~0, APBTMR_N_LOAD_COUNT);
ctrl &= ~APBTMR_CONTROL_INT;
ctrl |= APBTMR_CONTROL_ENABLE;
apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL);
return 0;
}
static int apbt_set_periodic(struct clock_event_device *evt)
{
struct dw_apb_clock_event_device *dw_ced = ced_to_dw_apb_ced(evt);
unsigned long period = DIV_ROUND_UP(dw_ced->timer.freq, HZ);
u32 ctrl;
pr_debug("%s CPU %d state=periodic\n", __func__,
cpumask_first(evt->cpumask));
ctrl = apbt_readl(&dw_ced->timer, APBTMR_N_CONTROL);
ctrl |= APBTMR_CONTROL_MODE_PERIODIC;
apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL);
/*
* DW APB p. 46, have to disable timer before load counter,
* may cause sync problem.
*/
ctrl &= ~APBTMR_CONTROL_ENABLE;
apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL);
udelay(1);
pr_debug("Setting clock period %lu for HZ %d\n", period, HZ);
apbt_writel(&dw_ced->timer, period, APBTMR_N_LOAD_COUNT);
ctrl |= APBTMR_CONTROL_ENABLE;
apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL);
return 0;
}
static int apbt_resume(struct clock_event_device *evt)
{
struct dw_apb_clock_event_device *dw_ced = ced_to_dw_apb_ced(evt);
pr_debug("%s CPU %d state=resume\n", __func__,
cpumask_first(evt->cpumask));
apbt_enable_int(&dw_ced->timer);
return 0;
}
static int apbt_next_event(unsigned long delta,
struct clock_event_device *evt)
{
u32 ctrl;
struct dw_apb_clock_event_device *dw_ced = ced_to_dw_apb_ced(evt);
/* Disable timer */
ctrl = apbt_readl_relaxed(&dw_ced->timer, APBTMR_N_CONTROL);
ctrl &= ~APBTMR_CONTROL_ENABLE;
apbt_writel_relaxed(&dw_ced->timer, ctrl, APBTMR_N_CONTROL);
/* write new count */
apbt_writel_relaxed(&dw_ced->timer, delta, APBTMR_N_LOAD_COUNT);
ctrl |= APBTMR_CONTROL_ENABLE;
apbt_writel_relaxed(&dw_ced->timer, ctrl, APBTMR_N_CONTROL);
return 0;
}
/**
* dw_apb_clockevent_init() - use an APB timer as a clock_event_device
*
* @cpu: The CPU the events will be targeted at or -1 if CPU affiliation
* isn't required.
* @name: The name used for the timer and the IRQ for it.
* @rating: The rating to give the timer.
* @base: I/O base for the timer registers.
* @irq: The interrupt number to use for the timer.
* @freq: The frequency that the timer counts at.
*
* This creates a clock_event_device for using with the generic clock layer
* but does not start and register it. This should be done with
* dw_apb_clockevent_register() as the next step. If this is the first time
* it has been called for a timer then the IRQ will be requested, if not it
* just be enabled to allow CPU hotplug to avoid repeatedly requesting and
* releasing the IRQ.
*/
struct dw_apb_clock_event_device *
dw_apb_clockevent_init(int cpu, const char *name, unsigned rating,
void __iomem *base, int irq, unsigned long freq)
{
struct dw_apb_clock_event_device *dw_ced =
kzalloc(sizeof(*dw_ced), GFP_KERNEL);
int err;
if (!dw_ced)
return NULL;
dw_ced->timer.base = base;
dw_ced->timer.irq = irq;
dw_ced->timer.freq = freq;
clockevents_calc_mult_shift(&dw_ced->ced, freq, APBT_MIN_PERIOD);
dw_ced->ced.max_delta_ns = clockevent_delta2ns(0x7fffffff,
&dw_ced->ced);
dw_ced->ced.max_delta_ticks = 0x7fffffff;
dw_ced->ced.min_delta_ns = clockevent_delta2ns(5000, &dw_ced->ced);
dw_ced->ced.min_delta_ticks = 5000;
dw_ced->ced.cpumask = cpu < 0 ? cpu_possible_mask : cpumask_of(cpu);
dw_ced->ced.features = CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_DYNIRQ;
dw_ced->ced.set_state_shutdown = apbt_shutdown;
dw_ced->ced.set_state_periodic = apbt_set_periodic;
dw_ced->ced.set_state_oneshot = apbt_set_oneshot;
dw_ced->ced.set_state_oneshot_stopped = apbt_shutdown;
dw_ced->ced.tick_resume = apbt_resume;
dw_ced->ced.set_next_event = apbt_next_event;
dw_ced->ced.irq = dw_ced->timer.irq;
dw_ced->ced.rating = rating;
dw_ced->ced.name = name;
dw_ced->eoi = apbt_eoi;
err = request_irq(irq, dw_apb_clockevent_irq,
IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
dw_ced->ced.name, &dw_ced->ced);
if (err) {
pr_err("failed to request timer irq\n");
kfree(dw_ced);
dw_ced = NULL;
}
return dw_ced;
}
/**
* dw_apb_clockevent_resume() - resume a clock that has been paused.
*
* @dw_ced: The APB clock to resume.
*/
void dw_apb_clockevent_resume(struct dw_apb_clock_event_device *dw_ced)
{
enable_irq(dw_ced->timer.irq);
}
/**
* dw_apb_clockevent_stop() - stop the clock_event_device and release the IRQ.
*
* @dw_ced: The APB clock to stop generating the events.
*/
void dw_apb_clockevent_stop(struct dw_apb_clock_event_device *dw_ced)
{
free_irq(dw_ced->timer.irq, &dw_ced->ced);
}
/**
* dw_apb_clockevent_register() - register the clock with the generic layer
*
* @dw_ced: The APB clock to register as a clock_event_device.
*/
void dw_apb_clockevent_register(struct dw_apb_clock_event_device *dw_ced)
{
apbt_writel(&dw_ced->timer, 0, APBTMR_N_CONTROL);
clockevents_register_device(&dw_ced->ced);
apbt_enable_int(&dw_ced->timer);
}
/**
* dw_apb_clocksource_start() - start the clocksource counting.
*
* @dw_cs: The clocksource to start.
*
* This is used to start the clocksource before registration and can be used
* to enable calibration of timers.
*/
void dw_apb_clocksource_start(struct dw_apb_clocksource *dw_cs)
{
/*
* start count down from 0xffff_ffff. this is done by toggling the
* enable bit then load initial load count to ~0.
*/
u32 ctrl = apbt_readl(&dw_cs->timer, APBTMR_N_CONTROL);
ctrl &= ~APBTMR_CONTROL_ENABLE;
apbt_writel(&dw_cs->timer, ctrl, APBTMR_N_CONTROL);
apbt_writel(&dw_cs->timer, ~0, APBTMR_N_LOAD_COUNT);
/* enable, mask interrupt */
ctrl &= ~APBTMR_CONTROL_MODE_PERIODIC;
ctrl |= (APBTMR_CONTROL_ENABLE | APBTMR_CONTROL_INT);
apbt_writel(&dw_cs->timer, ctrl, APBTMR_N_CONTROL);
/* read it once to get cached counter value initialized */
dw_apb_clocksource_read(dw_cs);
}
static u64 __apbt_read_clocksource(struct clocksource *cs)
{
u32 current_count;
struct dw_apb_clocksource *dw_cs =
clocksource_to_dw_apb_clocksource(cs);
current_count = apbt_readl_relaxed(&dw_cs->timer,
APBTMR_N_CURRENT_VALUE);
return (u64)~current_count;
}
static void apbt_restart_clocksource(struct clocksource *cs)
{
struct dw_apb_clocksource *dw_cs =
clocksource_to_dw_apb_clocksource(cs);
dw_apb_clocksource_start(dw_cs);
}
/**
* dw_apb_clocksource_init() - use an APB timer as a clocksource.
*
* @rating: The rating to give the clocksource.
* @name: The name for the clocksource.
* @base: The I/O base for the timer registers.
* @freq: The frequency that the timer counts at.
*
* This creates a clocksource using an APB timer but does not yet register it
* with the clocksource system. This should be done with
* dw_apb_clocksource_register() as the next step.
*/
struct dw_apb_clocksource *
dw_apb_clocksource_init(unsigned rating, const char *name, void __iomem *base,
unsigned long freq)
{
struct dw_apb_clocksource *dw_cs = kzalloc(sizeof(*dw_cs), GFP_KERNEL);
if (!dw_cs)
return NULL;
dw_cs->timer.base = base;
dw_cs->timer.freq = freq;
dw_cs->cs.name = name;
dw_cs->cs.rating = rating;
dw_cs->cs.read = __apbt_read_clocksource;
dw_cs->cs.mask = CLOCKSOURCE_MASK(32);
dw_cs->cs.flags = CLOCK_SOURCE_IS_CONTINUOUS;
dw_cs->cs.resume = apbt_restart_clocksource;
return dw_cs;
}
/**
* dw_apb_clocksource_register() - register the APB clocksource.
*
* @dw_cs: The clocksource to register.
*/
void dw_apb_clocksource_register(struct dw_apb_clocksource *dw_cs)
{
clocksource_register_hz(&dw_cs->cs, dw_cs->timer.freq);
}
/**
* dw_apb_clocksource_read() - read the current value of a clocksource.
*
* @dw_cs: The clocksource to read.
*/
u64 dw_apb_clocksource_read(struct dw_apb_clocksource *dw_cs)
{
return (u64)~apbt_readl(&dw_cs->timer, APBTMR_N_CURRENT_VALUE);
}
|
linux-master
|
drivers/clocksource/dw_apb_timer.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* Clocksource driver for NXP LPC32xx/18xx/43xx timer
*
* Copyright (C) 2015 Joachim Eastwood <[email protected]>
*
* Based on:
* time-efm32 Copyright (C) 2013 Pengutronix
* mach-lpc32xx/timer.c Copyright (C) 2009 - 2010 NXP Semiconductors
*/
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/clk.h>
#include <linux/clockchips.h>
#include <linux/clocksource.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/sched_clock.h>
#define LPC32XX_TIMER_IR 0x000
#define LPC32XX_TIMER_IR_MR0INT BIT(0)
#define LPC32XX_TIMER_TCR 0x004
#define LPC32XX_TIMER_TCR_CEN BIT(0)
#define LPC32XX_TIMER_TCR_CRST BIT(1)
#define LPC32XX_TIMER_TC 0x008
#define LPC32XX_TIMER_PR 0x00c
#define LPC32XX_TIMER_MCR 0x014
#define LPC32XX_TIMER_MCR_MR0I BIT(0)
#define LPC32XX_TIMER_MCR_MR0R BIT(1)
#define LPC32XX_TIMER_MCR_MR0S BIT(2)
#define LPC32XX_TIMER_MR0 0x018
#define LPC32XX_TIMER_CTCR 0x070
struct lpc32xx_clock_event_ddata {
struct clock_event_device evtdev;
void __iomem *base;
u32 ticks_per_jiffy;
};
/* Needed for the sched clock */
static void __iomem *clocksource_timer_counter;
static u64 notrace lpc32xx_read_sched_clock(void)
{
return readl(clocksource_timer_counter);
}
static unsigned long lpc32xx_delay_timer_read(void)
{
return readl(clocksource_timer_counter);
}
static struct delay_timer lpc32xx_delay_timer = {
.read_current_timer = lpc32xx_delay_timer_read,
};
static int lpc32xx_clkevt_next_event(unsigned long delta,
struct clock_event_device *evtdev)
{
struct lpc32xx_clock_event_ddata *ddata =
container_of(evtdev, struct lpc32xx_clock_event_ddata, evtdev);
/*
* Place timer in reset and program the delta in the match
* channel 0 (MR0). When the timer counter matches the value
* in MR0 register the match will trigger an interrupt.
* After setup the timer is released from reset and enabled.
*/
writel_relaxed(LPC32XX_TIMER_TCR_CRST, ddata->base + LPC32XX_TIMER_TCR);
writel_relaxed(delta, ddata->base + LPC32XX_TIMER_MR0);
writel_relaxed(LPC32XX_TIMER_TCR_CEN, ddata->base + LPC32XX_TIMER_TCR);
return 0;
}
static int lpc32xx_clkevt_shutdown(struct clock_event_device *evtdev)
{
struct lpc32xx_clock_event_ddata *ddata =
container_of(evtdev, struct lpc32xx_clock_event_ddata, evtdev);
/* Disable the timer */
writel_relaxed(0, ddata->base + LPC32XX_TIMER_TCR);
return 0;
}
static int lpc32xx_clkevt_oneshot(struct clock_event_device *evtdev)
{
struct lpc32xx_clock_event_ddata *ddata =
container_of(evtdev, struct lpc32xx_clock_event_ddata, evtdev);
/*
* When using oneshot, we must also disable the timer
* to wait for the first call to set_next_event().
*/
writel_relaxed(0, ddata->base + LPC32XX_TIMER_TCR);
/* Enable interrupt, reset on match and stop on match (MCR). */
writel_relaxed(LPC32XX_TIMER_MCR_MR0I | LPC32XX_TIMER_MCR_MR0R |
LPC32XX_TIMER_MCR_MR0S, ddata->base + LPC32XX_TIMER_MCR);
return 0;
}
static int lpc32xx_clkevt_periodic(struct clock_event_device *evtdev)
{
struct lpc32xx_clock_event_ddata *ddata =
container_of(evtdev, struct lpc32xx_clock_event_ddata, evtdev);
/* Enable interrupt and reset on match. */
writel_relaxed(LPC32XX_TIMER_MCR_MR0I | LPC32XX_TIMER_MCR_MR0R,
ddata->base + LPC32XX_TIMER_MCR);
/*
* Place timer in reset and program the delta in the match
* channel 0 (MR0).
*/
writel_relaxed(LPC32XX_TIMER_TCR_CRST, ddata->base + LPC32XX_TIMER_TCR);
writel_relaxed(ddata->ticks_per_jiffy, ddata->base + LPC32XX_TIMER_MR0);
writel_relaxed(LPC32XX_TIMER_TCR_CEN, ddata->base + LPC32XX_TIMER_TCR);
return 0;
}
static irqreturn_t lpc32xx_clock_event_handler(int irq, void *dev_id)
{
struct lpc32xx_clock_event_ddata *ddata = dev_id;
/* Clear match on channel 0 */
writel_relaxed(LPC32XX_TIMER_IR_MR0INT, ddata->base + LPC32XX_TIMER_IR);
ddata->evtdev.event_handler(&ddata->evtdev);
return IRQ_HANDLED;
}
static struct lpc32xx_clock_event_ddata lpc32xx_clk_event_ddata = {
.evtdev = {
.name = "lpc3220 clockevent",
.features = CLOCK_EVT_FEAT_ONESHOT |
CLOCK_EVT_FEAT_PERIODIC,
.rating = 300,
.set_next_event = lpc32xx_clkevt_next_event,
.set_state_shutdown = lpc32xx_clkevt_shutdown,
.set_state_oneshot = lpc32xx_clkevt_oneshot,
.set_state_periodic = lpc32xx_clkevt_periodic,
},
};
static int __init lpc32xx_clocksource_init(struct device_node *np)
{
void __iomem *base;
unsigned long rate;
struct clk *clk;
int ret;
clk = of_clk_get_by_name(np, "timerclk");
if (IS_ERR(clk)) {
pr_err("clock get failed (%ld)\n", PTR_ERR(clk));
return PTR_ERR(clk);
}
ret = clk_prepare_enable(clk);
if (ret) {
pr_err("clock enable failed (%d)\n", ret);
goto err_clk_enable;
}
base = of_iomap(np, 0);
if (!base) {
pr_err("unable to map registers\n");
ret = -EADDRNOTAVAIL;
goto err_iomap;
}
/*
* Disable and reset timer then set it to free running timer
* mode (CTCR) with no prescaler (PR) or match operations (MCR).
* After setup the timer is released from reset and enabled.
*/
writel_relaxed(LPC32XX_TIMER_TCR_CRST, base + LPC32XX_TIMER_TCR);
writel_relaxed(0, base + LPC32XX_TIMER_PR);
writel_relaxed(0, base + LPC32XX_TIMER_MCR);
writel_relaxed(0, base + LPC32XX_TIMER_CTCR);
writel_relaxed(LPC32XX_TIMER_TCR_CEN, base + LPC32XX_TIMER_TCR);
rate = clk_get_rate(clk);
ret = clocksource_mmio_init(base + LPC32XX_TIMER_TC, "lpc3220 timer",
rate, 300, 32, clocksource_mmio_readl_up);
if (ret) {
pr_err("failed to init clocksource (%d)\n", ret);
goto err_clocksource_init;
}
clocksource_timer_counter = base + LPC32XX_TIMER_TC;
lpc32xx_delay_timer.freq = rate;
register_current_timer_delay(&lpc32xx_delay_timer);
sched_clock_register(lpc32xx_read_sched_clock, 32, rate);
return 0;
err_clocksource_init:
iounmap(base);
err_iomap:
clk_disable_unprepare(clk);
err_clk_enable:
clk_put(clk);
return ret;
}
static int __init lpc32xx_clockevent_init(struct device_node *np)
{
void __iomem *base;
unsigned long rate;
struct clk *clk;
int ret, irq;
clk = of_clk_get_by_name(np, "timerclk");
if (IS_ERR(clk)) {
pr_err("clock get failed (%ld)\n", PTR_ERR(clk));
return PTR_ERR(clk);
}
ret = clk_prepare_enable(clk);
if (ret) {
pr_err("clock enable failed (%d)\n", ret);
goto err_clk_enable;
}
base = of_iomap(np, 0);
if (!base) {
pr_err("unable to map registers\n");
ret = -EADDRNOTAVAIL;
goto err_iomap;
}
irq = irq_of_parse_and_map(np, 0);
if (!irq) {
pr_err("get irq failed\n");
ret = -ENOENT;
goto err_irq;
}
/*
* Disable timer and clear any pending interrupt (IR) on match
* channel 0 (MR0). Clear the prescaler as it's not used.
*/
writel_relaxed(0, base + LPC32XX_TIMER_TCR);
writel_relaxed(0, base + LPC32XX_TIMER_PR);
writel_relaxed(0, base + LPC32XX_TIMER_CTCR);
writel_relaxed(LPC32XX_TIMER_IR_MR0INT, base + LPC32XX_TIMER_IR);
rate = clk_get_rate(clk);
lpc32xx_clk_event_ddata.base = base;
lpc32xx_clk_event_ddata.ticks_per_jiffy = DIV_ROUND_CLOSEST(rate, HZ);
clockevents_config_and_register(&lpc32xx_clk_event_ddata.evtdev,
rate, 1, -1);
ret = request_irq(irq, lpc32xx_clock_event_handler,
IRQF_TIMER | IRQF_IRQPOLL, "lpc3220 clockevent",
&lpc32xx_clk_event_ddata);
if (ret) {
pr_err("request irq failed\n");
goto err_irq;
}
return 0;
err_irq:
iounmap(base);
err_iomap:
clk_disable_unprepare(clk);
err_clk_enable:
clk_put(clk);
return ret;
}
/*
* This function asserts that we have exactly one clocksource and one
* clock_event_device in the end.
*/
static int __init lpc32xx_timer_init(struct device_node *np)
{
static int has_clocksource, has_clockevent;
int ret = 0;
if (!has_clocksource) {
ret = lpc32xx_clocksource_init(np);
if (!ret) {
has_clocksource = 1;
return 0;
}
}
if (!has_clockevent) {
ret = lpc32xx_clockevent_init(np);
if (!ret) {
has_clockevent = 1;
return 0;
}
}
return ret;
}
TIMER_OF_DECLARE(lpc32xx_timer, "nxp,lpc3220-timer", lpc32xx_timer_init);
|
linux-master
|
drivers/clocksource/timer-lpc32xx.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* SuperH Timer Support - MTU2
*
* Copyright (C) 2009 Magnus Damm
*/
#include <linux/clk.h>
#include <linux/clockchips.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
#include <linux/sh_timer.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#ifdef CONFIG_SUPERH
#include <asm/platform_early.h>
#endif
struct sh_mtu2_device;
struct sh_mtu2_channel {
struct sh_mtu2_device *mtu;
unsigned int index;
void __iomem *base;
struct clock_event_device ced;
};
struct sh_mtu2_device {
struct platform_device *pdev;
void __iomem *mapbase;
struct clk *clk;
raw_spinlock_t lock; /* Protect the shared registers */
struct sh_mtu2_channel *channels;
unsigned int num_channels;
bool has_clockevent;
};
#define TSTR -1 /* shared register */
#define TCR 0 /* channel register */
#define TMDR 1 /* channel register */
#define TIOR 2 /* channel register */
#define TIER 3 /* channel register */
#define TSR 4 /* channel register */
#define TCNT 5 /* channel register */
#define TGR 6 /* channel register */
#define TCR_CCLR_NONE (0 << 5)
#define TCR_CCLR_TGRA (1 << 5)
#define TCR_CCLR_TGRB (2 << 5)
#define TCR_CCLR_SYNC (3 << 5)
#define TCR_CCLR_TGRC (5 << 5)
#define TCR_CCLR_TGRD (6 << 5)
#define TCR_CCLR_MASK (7 << 5)
#define TCR_CKEG_RISING (0 << 3)
#define TCR_CKEG_FALLING (1 << 3)
#define TCR_CKEG_BOTH (2 << 3)
#define TCR_CKEG_MASK (3 << 3)
/* Values 4 to 7 are channel-dependent */
#define TCR_TPSC_P1 (0 << 0)
#define TCR_TPSC_P4 (1 << 0)
#define TCR_TPSC_P16 (2 << 0)
#define TCR_TPSC_P64 (3 << 0)
#define TCR_TPSC_CH0_TCLKA (4 << 0)
#define TCR_TPSC_CH0_TCLKB (5 << 0)
#define TCR_TPSC_CH0_TCLKC (6 << 0)
#define TCR_TPSC_CH0_TCLKD (7 << 0)
#define TCR_TPSC_CH1_TCLKA (4 << 0)
#define TCR_TPSC_CH1_TCLKB (5 << 0)
#define TCR_TPSC_CH1_P256 (6 << 0)
#define TCR_TPSC_CH1_TCNT2 (7 << 0)
#define TCR_TPSC_CH2_TCLKA (4 << 0)
#define TCR_TPSC_CH2_TCLKB (5 << 0)
#define TCR_TPSC_CH2_TCLKC (6 << 0)
#define TCR_TPSC_CH2_P1024 (7 << 0)
#define TCR_TPSC_CH34_P256 (4 << 0)
#define TCR_TPSC_CH34_P1024 (5 << 0)
#define TCR_TPSC_CH34_TCLKA (6 << 0)
#define TCR_TPSC_CH34_TCLKB (7 << 0)
#define TCR_TPSC_MASK (7 << 0)
#define TMDR_BFE (1 << 6)
#define TMDR_BFB (1 << 5)
#define TMDR_BFA (1 << 4)
#define TMDR_MD_NORMAL (0 << 0)
#define TMDR_MD_PWM_1 (2 << 0)
#define TMDR_MD_PWM_2 (3 << 0)
#define TMDR_MD_PHASE_1 (4 << 0)
#define TMDR_MD_PHASE_2 (5 << 0)
#define TMDR_MD_PHASE_3 (6 << 0)
#define TMDR_MD_PHASE_4 (7 << 0)
#define TMDR_MD_PWM_SYNC (8 << 0)
#define TMDR_MD_PWM_COMP_CREST (13 << 0)
#define TMDR_MD_PWM_COMP_TROUGH (14 << 0)
#define TMDR_MD_PWM_COMP_BOTH (15 << 0)
#define TMDR_MD_MASK (15 << 0)
#define TIOC_IOCH(n) ((n) << 4)
#define TIOC_IOCL(n) ((n) << 0)
#define TIOR_OC_RETAIN (0 << 0)
#define TIOR_OC_0_CLEAR (1 << 0)
#define TIOR_OC_0_SET (2 << 0)
#define TIOR_OC_0_TOGGLE (3 << 0)
#define TIOR_OC_1_CLEAR (5 << 0)
#define TIOR_OC_1_SET (6 << 0)
#define TIOR_OC_1_TOGGLE (7 << 0)
#define TIOR_IC_RISING (8 << 0)
#define TIOR_IC_FALLING (9 << 0)
#define TIOR_IC_BOTH (10 << 0)
#define TIOR_IC_TCNT (12 << 0)
#define TIOR_MASK (15 << 0)
#define TIER_TTGE (1 << 7)
#define TIER_TTGE2 (1 << 6)
#define TIER_TCIEU (1 << 5)
#define TIER_TCIEV (1 << 4)
#define TIER_TGIED (1 << 3)
#define TIER_TGIEC (1 << 2)
#define TIER_TGIEB (1 << 1)
#define TIER_TGIEA (1 << 0)
#define TSR_TCFD (1 << 7)
#define TSR_TCFU (1 << 5)
#define TSR_TCFV (1 << 4)
#define TSR_TGFD (1 << 3)
#define TSR_TGFC (1 << 2)
#define TSR_TGFB (1 << 1)
#define TSR_TGFA (1 << 0)
static unsigned long mtu2_reg_offs[] = {
[TCR] = 0,
[TMDR] = 1,
[TIOR] = 2,
[TIER] = 4,
[TSR] = 5,
[TCNT] = 6,
[TGR] = 8,
};
static inline unsigned long sh_mtu2_read(struct sh_mtu2_channel *ch, int reg_nr)
{
unsigned long offs;
if (reg_nr == TSTR)
return ioread8(ch->mtu->mapbase + 0x280);
offs = mtu2_reg_offs[reg_nr];
if ((reg_nr == TCNT) || (reg_nr == TGR))
return ioread16(ch->base + offs);
else
return ioread8(ch->base + offs);
}
static inline void sh_mtu2_write(struct sh_mtu2_channel *ch, int reg_nr,
unsigned long value)
{
unsigned long offs;
if (reg_nr == TSTR)
return iowrite8(value, ch->mtu->mapbase + 0x280);
offs = mtu2_reg_offs[reg_nr];
if ((reg_nr == TCNT) || (reg_nr == TGR))
iowrite16(value, ch->base + offs);
else
iowrite8(value, ch->base + offs);
}
static void sh_mtu2_start_stop_ch(struct sh_mtu2_channel *ch, int start)
{
unsigned long flags, value;
/* start stop register shared by multiple timer channels */
raw_spin_lock_irqsave(&ch->mtu->lock, flags);
value = sh_mtu2_read(ch, TSTR);
if (start)
value |= 1 << ch->index;
else
value &= ~(1 << ch->index);
sh_mtu2_write(ch, TSTR, value);
raw_spin_unlock_irqrestore(&ch->mtu->lock, flags);
}
static int sh_mtu2_enable(struct sh_mtu2_channel *ch)
{
unsigned long periodic;
unsigned long rate;
int ret;
pm_runtime_get_sync(&ch->mtu->pdev->dev);
dev_pm_syscore_device(&ch->mtu->pdev->dev, true);
/* enable clock */
ret = clk_enable(ch->mtu->clk);
if (ret) {
dev_err(&ch->mtu->pdev->dev, "ch%u: cannot enable clock\n",
ch->index);
return ret;
}
/* make sure channel is disabled */
sh_mtu2_start_stop_ch(ch, 0);
rate = clk_get_rate(ch->mtu->clk) / 64;
periodic = (rate + HZ/2) / HZ;
/*
* "Periodic Counter Operation"
* Clear on TGRA compare match, divide clock by 64.
*/
sh_mtu2_write(ch, TCR, TCR_CCLR_TGRA | TCR_TPSC_P64);
sh_mtu2_write(ch, TIOR, TIOC_IOCH(TIOR_OC_0_CLEAR) |
TIOC_IOCL(TIOR_OC_0_CLEAR));
sh_mtu2_write(ch, TGR, periodic);
sh_mtu2_write(ch, TCNT, 0);
sh_mtu2_write(ch, TMDR, TMDR_MD_NORMAL);
sh_mtu2_write(ch, TIER, TIER_TGIEA);
/* enable channel */
sh_mtu2_start_stop_ch(ch, 1);
return 0;
}
static void sh_mtu2_disable(struct sh_mtu2_channel *ch)
{
/* disable channel */
sh_mtu2_start_stop_ch(ch, 0);
/* stop clock */
clk_disable(ch->mtu->clk);
dev_pm_syscore_device(&ch->mtu->pdev->dev, false);
pm_runtime_put(&ch->mtu->pdev->dev);
}
static irqreturn_t sh_mtu2_interrupt(int irq, void *dev_id)
{
struct sh_mtu2_channel *ch = dev_id;
/* acknowledge interrupt */
sh_mtu2_read(ch, TSR);
sh_mtu2_write(ch, TSR, ~TSR_TGFA);
/* notify clockevent layer */
ch->ced.event_handler(&ch->ced);
return IRQ_HANDLED;
}
static struct sh_mtu2_channel *ced_to_sh_mtu2(struct clock_event_device *ced)
{
return container_of(ced, struct sh_mtu2_channel, ced);
}
static int sh_mtu2_clock_event_shutdown(struct clock_event_device *ced)
{
struct sh_mtu2_channel *ch = ced_to_sh_mtu2(ced);
if (clockevent_state_periodic(ced))
sh_mtu2_disable(ch);
return 0;
}
static int sh_mtu2_clock_event_set_periodic(struct clock_event_device *ced)
{
struct sh_mtu2_channel *ch = ced_to_sh_mtu2(ced);
if (clockevent_state_periodic(ced))
sh_mtu2_disable(ch);
dev_info(&ch->mtu->pdev->dev, "ch%u: used for periodic clock events\n",
ch->index);
sh_mtu2_enable(ch);
return 0;
}
static void sh_mtu2_clock_event_suspend(struct clock_event_device *ced)
{
dev_pm_genpd_suspend(&ced_to_sh_mtu2(ced)->mtu->pdev->dev);
}
static void sh_mtu2_clock_event_resume(struct clock_event_device *ced)
{
dev_pm_genpd_resume(&ced_to_sh_mtu2(ced)->mtu->pdev->dev);
}
static void sh_mtu2_register_clockevent(struct sh_mtu2_channel *ch,
const char *name)
{
struct clock_event_device *ced = &ch->ced;
ced->name = name;
ced->features = CLOCK_EVT_FEAT_PERIODIC;
ced->rating = 200;
ced->cpumask = cpu_possible_mask;
ced->set_state_shutdown = sh_mtu2_clock_event_shutdown;
ced->set_state_periodic = sh_mtu2_clock_event_set_periodic;
ced->suspend = sh_mtu2_clock_event_suspend;
ced->resume = sh_mtu2_clock_event_resume;
dev_info(&ch->mtu->pdev->dev, "ch%u: used for clock events\n",
ch->index);
clockevents_register_device(ced);
}
static int sh_mtu2_register(struct sh_mtu2_channel *ch, const char *name)
{
ch->mtu->has_clockevent = true;
sh_mtu2_register_clockevent(ch, name);
return 0;
}
static const unsigned int sh_mtu2_channel_offsets[] = {
0x300, 0x380, 0x000,
};
static int sh_mtu2_setup_channel(struct sh_mtu2_channel *ch, unsigned int index,
struct sh_mtu2_device *mtu)
{
char name[6];
int irq;
int ret;
ch->mtu = mtu;
sprintf(name, "tgi%ua", index);
irq = platform_get_irq_byname(mtu->pdev, name);
if (irq < 0) {
/* Skip channels with no declared interrupt. */
return 0;
}
ret = request_irq(irq, sh_mtu2_interrupt,
IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
dev_name(&ch->mtu->pdev->dev), ch);
if (ret) {
dev_err(&ch->mtu->pdev->dev, "ch%u: failed to request irq %d\n",
index, irq);
return ret;
}
ch->base = mtu->mapbase + sh_mtu2_channel_offsets[index];
ch->index = index;
return sh_mtu2_register(ch, dev_name(&mtu->pdev->dev));
}
static int sh_mtu2_map_memory(struct sh_mtu2_device *mtu)
{
struct resource *res;
res = platform_get_resource(mtu->pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(&mtu->pdev->dev, "failed to get I/O memory\n");
return -ENXIO;
}
mtu->mapbase = ioremap(res->start, resource_size(res));
if (mtu->mapbase == NULL)
return -ENXIO;
return 0;
}
static int sh_mtu2_setup(struct sh_mtu2_device *mtu,
struct platform_device *pdev)
{
unsigned int i;
int ret;
mtu->pdev = pdev;
raw_spin_lock_init(&mtu->lock);
/* Get hold of clock. */
mtu->clk = clk_get(&mtu->pdev->dev, "fck");
if (IS_ERR(mtu->clk)) {
dev_err(&mtu->pdev->dev, "cannot get clock\n");
return PTR_ERR(mtu->clk);
}
ret = clk_prepare(mtu->clk);
if (ret < 0)
goto err_clk_put;
/* Map the memory resource. */
ret = sh_mtu2_map_memory(mtu);
if (ret < 0) {
dev_err(&mtu->pdev->dev, "failed to remap I/O memory\n");
goto err_clk_unprepare;
}
/* Allocate and setup the channels. */
ret = platform_irq_count(pdev);
if (ret < 0)
goto err_unmap;
mtu->num_channels = min_t(unsigned int, ret,
ARRAY_SIZE(sh_mtu2_channel_offsets));
mtu->channels = kcalloc(mtu->num_channels, sizeof(*mtu->channels),
GFP_KERNEL);
if (mtu->channels == NULL) {
ret = -ENOMEM;
goto err_unmap;
}
for (i = 0; i < mtu->num_channels; ++i) {
ret = sh_mtu2_setup_channel(&mtu->channels[i], i, mtu);
if (ret < 0)
goto err_unmap;
}
platform_set_drvdata(pdev, mtu);
return 0;
err_unmap:
kfree(mtu->channels);
iounmap(mtu->mapbase);
err_clk_unprepare:
clk_unprepare(mtu->clk);
err_clk_put:
clk_put(mtu->clk);
return ret;
}
static int sh_mtu2_probe(struct platform_device *pdev)
{
struct sh_mtu2_device *mtu = platform_get_drvdata(pdev);
int ret;
if (!is_sh_early_platform_device(pdev)) {
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
}
if (mtu) {
dev_info(&pdev->dev, "kept as earlytimer\n");
goto out;
}
mtu = kzalloc(sizeof(*mtu), GFP_KERNEL);
if (mtu == NULL)
return -ENOMEM;
ret = sh_mtu2_setup(mtu, pdev);
if (ret) {
kfree(mtu);
pm_runtime_idle(&pdev->dev);
return ret;
}
if (is_sh_early_platform_device(pdev))
return 0;
out:
if (mtu->has_clockevent)
pm_runtime_irq_safe(&pdev->dev);
else
pm_runtime_idle(&pdev->dev);
return 0;
}
static const struct platform_device_id sh_mtu2_id_table[] = {
{ "sh-mtu2", 0 },
{ },
};
MODULE_DEVICE_TABLE(platform, sh_mtu2_id_table);
static const struct of_device_id sh_mtu2_of_table[] __maybe_unused = {
{ .compatible = "renesas,mtu2" },
{ }
};
MODULE_DEVICE_TABLE(of, sh_mtu2_of_table);
static struct platform_driver sh_mtu2_device_driver = {
.probe = sh_mtu2_probe,
.driver = {
.name = "sh_mtu2",
.of_match_table = of_match_ptr(sh_mtu2_of_table),
.suppress_bind_attrs = true,
},
.id_table = sh_mtu2_id_table,
};
static int __init sh_mtu2_init(void)
{
return platform_driver_register(&sh_mtu2_device_driver);
}
static void __exit sh_mtu2_exit(void)
{
platform_driver_unregister(&sh_mtu2_device_driver);
}
#ifdef CONFIG_SUPERH
sh_early_platform_init("earlytimer", &sh_mtu2_device_driver);
#endif
subsys_initcall(sh_mtu2_init);
module_exit(sh_mtu2_exit);
MODULE_AUTHOR("Magnus Damm");
MODULE_DESCRIPTION("SuperH MTU2 Timer Driver");
|
linux-master
|
drivers/clocksource/sh_mtu2.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Copyright (C) 2007 Google, Inc.
* Copyright (c) 2009-2012,2014, The Linux Foundation. All rights reserved.
*/
#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <linux/cpu.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/sched_clock.h>
#include <asm/delay.h>
#define TIMER_MATCH_VAL 0x0000
#define TIMER_COUNT_VAL 0x0004
#define TIMER_ENABLE 0x0008
#define TIMER_ENABLE_CLR_ON_MATCH_EN BIT(1)
#define TIMER_ENABLE_EN BIT(0)
#define TIMER_CLEAR 0x000C
#define DGT_CLK_CTL 0x10
#define DGT_CLK_CTL_DIV_4 0x3
#define TIMER_STS_GPT0_CLR_PEND BIT(10)
#define GPT_HZ 32768
static void __iomem *event_base;
static void __iomem *sts_base;
static irqreturn_t msm_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evt = dev_id;
/* Stop the timer tick */
if (clockevent_state_oneshot(evt)) {
u32 ctrl = readl_relaxed(event_base + TIMER_ENABLE);
ctrl &= ~TIMER_ENABLE_EN;
writel_relaxed(ctrl, event_base + TIMER_ENABLE);
}
evt->event_handler(evt);
return IRQ_HANDLED;
}
static int msm_timer_set_next_event(unsigned long cycles,
struct clock_event_device *evt)
{
u32 ctrl = readl_relaxed(event_base + TIMER_ENABLE);
ctrl &= ~TIMER_ENABLE_EN;
writel_relaxed(ctrl, event_base + TIMER_ENABLE);
writel_relaxed(ctrl, event_base + TIMER_CLEAR);
writel_relaxed(cycles, event_base + TIMER_MATCH_VAL);
if (sts_base)
while (readl_relaxed(sts_base) & TIMER_STS_GPT0_CLR_PEND)
cpu_relax();
writel_relaxed(ctrl | TIMER_ENABLE_EN, event_base + TIMER_ENABLE);
return 0;
}
static int msm_timer_shutdown(struct clock_event_device *evt)
{
u32 ctrl;
ctrl = readl_relaxed(event_base + TIMER_ENABLE);
ctrl &= ~(TIMER_ENABLE_EN | TIMER_ENABLE_CLR_ON_MATCH_EN);
writel_relaxed(ctrl, event_base + TIMER_ENABLE);
return 0;
}
static struct clock_event_device __percpu *msm_evt;
static void __iomem *source_base;
static notrace u64 msm_read_timer_count(struct clocksource *cs)
{
return readl_relaxed(source_base + TIMER_COUNT_VAL);
}
static struct clocksource msm_clocksource = {
.name = "dg_timer",
.rating = 300,
.read = msm_read_timer_count,
.mask = CLOCKSOURCE_MASK(32),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static int msm_timer_irq;
static int msm_timer_has_ppi;
static int msm_local_timer_starting_cpu(unsigned int cpu)
{
struct clock_event_device *evt = per_cpu_ptr(msm_evt, cpu);
int err;
evt->irq = msm_timer_irq;
evt->name = "msm_timer";
evt->features = CLOCK_EVT_FEAT_ONESHOT;
evt->rating = 200;
evt->set_state_shutdown = msm_timer_shutdown;
evt->set_state_oneshot = msm_timer_shutdown;
evt->tick_resume = msm_timer_shutdown;
evt->set_next_event = msm_timer_set_next_event;
evt->cpumask = cpumask_of(cpu);
clockevents_config_and_register(evt, GPT_HZ, 4, 0xffffffff);
if (msm_timer_has_ppi) {
enable_percpu_irq(evt->irq, IRQ_TYPE_EDGE_RISING);
} else {
err = request_irq(evt->irq, msm_timer_interrupt,
IRQF_TIMER | IRQF_NOBALANCING |
IRQF_TRIGGER_RISING, "gp_timer", evt);
if (err)
pr_err("request_irq failed\n");
}
return 0;
}
static int msm_local_timer_dying_cpu(unsigned int cpu)
{
struct clock_event_device *evt = per_cpu_ptr(msm_evt, cpu);
evt->set_state_shutdown(evt);
disable_percpu_irq(evt->irq);
return 0;
}
static u64 notrace msm_sched_clock_read(void)
{
return msm_clocksource.read(&msm_clocksource);
}
static unsigned long msm_read_current_timer(void)
{
return msm_clocksource.read(&msm_clocksource);
}
static struct delay_timer msm_delay_timer = {
.read_current_timer = msm_read_current_timer,
};
static int __init msm_timer_init(u32 dgt_hz, int sched_bits, int irq,
bool percpu)
{
struct clocksource *cs = &msm_clocksource;
int res = 0;
msm_timer_irq = irq;
msm_timer_has_ppi = percpu;
msm_evt = alloc_percpu(struct clock_event_device);
if (!msm_evt) {
pr_err("memory allocation failed for clockevents\n");
goto err;
}
if (percpu)
res = request_percpu_irq(irq, msm_timer_interrupt,
"gp_timer", msm_evt);
if (res) {
pr_err("request_percpu_irq failed\n");
} else {
/* Install and invoke hotplug callbacks */
res = cpuhp_setup_state(CPUHP_AP_QCOM_TIMER_STARTING,
"clockevents/qcom/timer:starting",
msm_local_timer_starting_cpu,
msm_local_timer_dying_cpu);
if (res) {
free_percpu_irq(irq, msm_evt);
goto err;
}
}
err:
writel_relaxed(TIMER_ENABLE_EN, source_base + TIMER_ENABLE);
res = clocksource_register_hz(cs, dgt_hz);
if (res)
pr_err("clocksource_register failed\n");
sched_clock_register(msm_sched_clock_read, sched_bits, dgt_hz);
msm_delay_timer.freq = dgt_hz;
register_current_timer_delay(&msm_delay_timer);
return res;
}
static int __init msm_dt_timer_init(struct device_node *np)
{
u32 freq;
int irq, ret;
struct resource res;
u32 percpu_offset;
void __iomem *base;
void __iomem *cpu0_base;
base = of_iomap(np, 0);
if (!base) {
pr_err("Failed to map event base\n");
return -ENXIO;
}
/* We use GPT0 for the clockevent */
irq = irq_of_parse_and_map(np, 1);
if (irq <= 0) {
pr_err("Can't get irq\n");
return -EINVAL;
}
/* We use CPU0's DGT for the clocksource */
if (of_property_read_u32(np, "cpu-offset", &percpu_offset))
percpu_offset = 0;
ret = of_address_to_resource(np, 0, &res);
if (ret) {
pr_err("Failed to parse DGT resource\n");
return ret;
}
cpu0_base = ioremap(res.start + percpu_offset, resource_size(&res));
if (!cpu0_base) {
pr_err("Failed to map source base\n");
return -EINVAL;
}
if (of_property_read_u32(np, "clock-frequency", &freq)) {
pr_err("Unknown frequency\n");
return -EINVAL;
}
event_base = base + 0x4;
sts_base = base + 0x88;
source_base = cpu0_base + 0x24;
freq /= 4;
writel_relaxed(DGT_CLK_CTL_DIV_4, source_base + DGT_CLK_CTL);
return msm_timer_init(freq, 32, irq, !!percpu_offset);
}
TIMER_OF_DECLARE(kpss_timer, "qcom,kpss-timer", msm_dt_timer_init);
TIMER_OF_DECLARE(scss_timer, "qcom,scss-timer", msm_dt_timer_init);
|
linux-master
|
drivers/clocksource/timer-qcom.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Copyright (C) 2014 ARM Limited
*/
#include <linux/clocksource.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/sched_clock.h>
#define SYS_24MHZ 0x05c
static void __iomem *versatile_sys_24mhz;
static u64 notrace versatile_sys_24mhz_read(void)
{
return readl(versatile_sys_24mhz);
}
static int __init versatile_sched_clock_init(struct device_node *node)
{
void __iomem *base = of_iomap(node, 0);
of_node_clear_flag(node, OF_POPULATED);
if (!base)
return -ENXIO;
versatile_sys_24mhz = base + SYS_24MHZ;
sched_clock_register(versatile_sys_24mhz_read, 32, 24000000);
return 0;
}
TIMER_OF_DECLARE(vexpress, "arm,vexpress-sysreg",
versatile_sched_clock_init);
TIMER_OF_DECLARE(versatile, "arm,versatile-sysreg",
versatile_sched_clock_init);
|
linux-master
|
drivers/clocksource/timer-versatile.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* Faraday Technology FTTMR010 timer driver
* Copyright (C) 2017 Linus Walleij <[email protected]>
*
* Based on a rewrite of arch/arm/mach-gemini/timer.c:
* Copyright (C) 2001-2006 Storlink, Corp.
* Copyright (C) 2008-2009 Paulius Zaleckas <[email protected]>
*/
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/clockchips.h>
#include <linux/clocksource.h>
#include <linux/sched_clock.h>
#include <linux/clk.h>
#include <linux/slab.h>
#include <linux/bitops.h>
#include <linux/delay.h>
/*
* Register definitions common for all the timer variants.
*/
#define TIMER1_COUNT (0x00)
#define TIMER1_LOAD (0x04)
#define TIMER1_MATCH1 (0x08)
#define TIMER1_MATCH2 (0x0c)
#define TIMER2_COUNT (0x10)
#define TIMER2_LOAD (0x14)
#define TIMER2_MATCH1 (0x18)
#define TIMER2_MATCH2 (0x1c)
#define TIMER3_COUNT (0x20)
#define TIMER3_LOAD (0x24)
#define TIMER3_MATCH1 (0x28)
#define TIMER3_MATCH2 (0x2c)
#define TIMER_CR (0x30)
/*
* Control register set to clear for ast2600 only.
*/
#define AST2600_TIMER_CR_CLR (0x3c)
/*
* Control register (TMC30) bit fields for fttmr010/gemini/moxart timers.
*/
#define TIMER_1_CR_ENABLE BIT(0)
#define TIMER_1_CR_CLOCK BIT(1)
#define TIMER_1_CR_INT BIT(2)
#define TIMER_2_CR_ENABLE BIT(3)
#define TIMER_2_CR_CLOCK BIT(4)
#define TIMER_2_CR_INT BIT(5)
#define TIMER_3_CR_ENABLE BIT(6)
#define TIMER_3_CR_CLOCK BIT(7)
#define TIMER_3_CR_INT BIT(8)
#define TIMER_1_CR_UPDOWN BIT(9)
#define TIMER_2_CR_UPDOWN BIT(10)
#define TIMER_3_CR_UPDOWN BIT(11)
/*
* Control register (TMC30) bit fields for aspeed ast2400/ast2500 timers.
* The aspeed timers move bits around in the control register and lacks
* bits for setting the timer to count upwards.
*/
#define TIMER_1_CR_ASPEED_ENABLE BIT(0)
#define TIMER_1_CR_ASPEED_CLOCK BIT(1)
#define TIMER_1_CR_ASPEED_INT BIT(2)
#define TIMER_2_CR_ASPEED_ENABLE BIT(4)
#define TIMER_2_CR_ASPEED_CLOCK BIT(5)
#define TIMER_2_CR_ASPEED_INT BIT(6)
#define TIMER_3_CR_ASPEED_ENABLE BIT(8)
#define TIMER_3_CR_ASPEED_CLOCK BIT(9)
#define TIMER_3_CR_ASPEED_INT BIT(10)
/*
* Interrupt status/mask register definitions for fttmr010/gemini/moxart
* timers.
* The registers don't exist and they are not needed on aspeed timers
* because:
* - aspeed timer overflow interrupt is controlled by bits in Control
* Register (TMC30).
* - aspeed timers always generate interrupt when either one of the
* Match registers equals to Status register.
*/
#define TIMER_INTR_STATE (0x34)
#define TIMER_INTR_MASK (0x38)
#define TIMER_1_INT_MATCH1 BIT(0)
#define TIMER_1_INT_MATCH2 BIT(1)
#define TIMER_1_INT_OVERFLOW BIT(2)
#define TIMER_2_INT_MATCH1 BIT(3)
#define TIMER_2_INT_MATCH2 BIT(4)
#define TIMER_2_INT_OVERFLOW BIT(5)
#define TIMER_3_INT_MATCH1 BIT(6)
#define TIMER_3_INT_MATCH2 BIT(7)
#define TIMER_3_INT_OVERFLOW BIT(8)
#define TIMER_INT_ALL_MASK 0x1ff
struct fttmr010 {
void __iomem *base;
unsigned int tick_rate;
bool is_aspeed;
u32 t1_enable_val;
struct clock_event_device clkevt;
int (*timer_shutdown)(struct clock_event_device *evt);
#ifdef CONFIG_ARM
struct delay_timer delay_timer;
#endif
};
/*
* A local singleton used by sched_clock and delay timer reads, which are
* fast and stateless
*/
static struct fttmr010 *local_fttmr;
static inline struct fttmr010 *to_fttmr010(struct clock_event_device *evt)
{
return container_of(evt, struct fttmr010, clkevt);
}
static unsigned long fttmr010_read_current_timer_up(void)
{
return readl(local_fttmr->base + TIMER2_COUNT);
}
static unsigned long fttmr010_read_current_timer_down(void)
{
return ~readl(local_fttmr->base + TIMER2_COUNT);
}
static u64 notrace fttmr010_read_sched_clock_up(void)
{
return fttmr010_read_current_timer_up();
}
static u64 notrace fttmr010_read_sched_clock_down(void)
{
return fttmr010_read_current_timer_down();
}
static int fttmr010_timer_set_next_event(unsigned long cycles,
struct clock_event_device *evt)
{
struct fttmr010 *fttmr010 = to_fttmr010(evt);
u32 cr;
/* Stop */
fttmr010->timer_shutdown(evt);
if (fttmr010->is_aspeed) {
/*
* ASPEED Timer Controller will load TIMER1_LOAD register
* into TIMER1_COUNT register when the timer is re-enabled.
*/
writel(cycles, fttmr010->base + TIMER1_LOAD);
} else {
/* Setup the match register forward in time */
cr = readl(fttmr010->base + TIMER1_COUNT);
writel(cr + cycles, fttmr010->base + TIMER1_MATCH1);
}
/* Start */
cr = readl(fttmr010->base + TIMER_CR);
cr |= fttmr010->t1_enable_val;
writel(cr, fttmr010->base + TIMER_CR);
return 0;
}
static int ast2600_timer_shutdown(struct clock_event_device *evt)
{
struct fttmr010 *fttmr010 = to_fttmr010(evt);
/* Stop */
writel(fttmr010->t1_enable_val, fttmr010->base + AST2600_TIMER_CR_CLR);
return 0;
}
static int fttmr010_timer_shutdown(struct clock_event_device *evt)
{
struct fttmr010 *fttmr010 = to_fttmr010(evt);
u32 cr;
/* Stop */
cr = readl(fttmr010->base + TIMER_CR);
cr &= ~fttmr010->t1_enable_val;
writel(cr, fttmr010->base + TIMER_CR);
return 0;
}
static int fttmr010_timer_set_oneshot(struct clock_event_device *evt)
{
struct fttmr010 *fttmr010 = to_fttmr010(evt);
u32 cr;
/* Stop */
fttmr010->timer_shutdown(evt);
/* Setup counter start from 0 or ~0 */
writel(0, fttmr010->base + TIMER1_COUNT);
if (fttmr010->is_aspeed) {
writel(~0, fttmr010->base + TIMER1_LOAD);
} else {
writel(0, fttmr010->base + TIMER1_LOAD);
/* Enable interrupt */
cr = readl(fttmr010->base + TIMER_INTR_MASK);
cr &= ~(TIMER_1_INT_OVERFLOW | TIMER_1_INT_MATCH2);
cr |= TIMER_1_INT_MATCH1;
writel(cr, fttmr010->base + TIMER_INTR_MASK);
}
return 0;
}
static int fttmr010_timer_set_periodic(struct clock_event_device *evt)
{
struct fttmr010 *fttmr010 = to_fttmr010(evt);
u32 period = DIV_ROUND_CLOSEST(fttmr010->tick_rate, HZ);
u32 cr;
/* Stop */
fttmr010->timer_shutdown(evt);
/* Setup timer to fire at 1/HZ intervals. */
if (fttmr010->is_aspeed) {
writel(period, fttmr010->base + TIMER1_LOAD);
} else {
cr = 0xffffffff - (period - 1);
writel(cr, fttmr010->base + TIMER1_COUNT);
writel(cr, fttmr010->base + TIMER1_LOAD);
/* Enable interrupt on overflow */
cr = readl(fttmr010->base + TIMER_INTR_MASK);
cr &= ~(TIMER_1_INT_MATCH1 | TIMER_1_INT_MATCH2);
cr |= TIMER_1_INT_OVERFLOW;
writel(cr, fttmr010->base + TIMER_INTR_MASK);
}
/* Start the timer */
cr = readl(fttmr010->base + TIMER_CR);
cr |= fttmr010->t1_enable_val;
writel(cr, fttmr010->base + TIMER_CR);
return 0;
}
/*
* IRQ handler for the timer
*/
static irqreturn_t fttmr010_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evt = dev_id;
evt->event_handler(evt);
return IRQ_HANDLED;
}
static irqreturn_t ast2600_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evt = dev_id;
struct fttmr010 *fttmr010 = to_fttmr010(evt);
writel(0x1, fttmr010->base + TIMER_INTR_STATE);
evt->event_handler(evt);
return IRQ_HANDLED;
}
static int __init fttmr010_common_init(struct device_node *np,
bool is_aspeed, bool is_ast2600)
{
struct fttmr010 *fttmr010;
int irq;
struct clk *clk;
int ret;
u32 val;
/*
* These implementations require a clock reference.
* FIXME: we currently only support clocking using PCLK
* and using EXTCLK is not supported in the driver.
*/
clk = of_clk_get_by_name(np, "PCLK");
if (IS_ERR(clk)) {
pr_err("could not get PCLK\n");
return PTR_ERR(clk);
}
ret = clk_prepare_enable(clk);
if (ret) {
pr_err("failed to enable PCLK\n");
return ret;
}
fttmr010 = kzalloc(sizeof(*fttmr010), GFP_KERNEL);
if (!fttmr010) {
ret = -ENOMEM;
goto out_disable_clock;
}
fttmr010->tick_rate = clk_get_rate(clk);
fttmr010->base = of_iomap(np, 0);
if (!fttmr010->base) {
pr_err("Can't remap registers\n");
ret = -ENXIO;
goto out_free;
}
/* IRQ for timer 1 */
irq = irq_of_parse_and_map(np, 0);
if (irq <= 0) {
pr_err("Can't parse IRQ\n");
ret = -EINVAL;
goto out_unmap;
}
/*
* The Aspeed timers move bits around in the control register.
*/
if (is_aspeed) {
fttmr010->t1_enable_val = TIMER_1_CR_ASPEED_ENABLE |
TIMER_1_CR_ASPEED_INT;
fttmr010->is_aspeed = true;
} else {
fttmr010->t1_enable_val = TIMER_1_CR_ENABLE | TIMER_1_CR_INT;
/*
* Reset the interrupt mask and status
*/
writel(TIMER_INT_ALL_MASK, fttmr010->base + TIMER_INTR_MASK);
writel(0, fttmr010->base + TIMER_INTR_STATE);
}
/*
* Enable timer 1 count up, timer 2 count up, except on Aspeed,
* where everything just counts down.
*/
if (is_aspeed)
val = TIMER_2_CR_ASPEED_ENABLE;
else {
val = TIMER_2_CR_ENABLE | TIMER_1_CR_UPDOWN |
TIMER_2_CR_UPDOWN;
}
writel(val, fttmr010->base + TIMER_CR);
/*
* Setup free-running clocksource timer (interrupts
* disabled.)
*/
local_fttmr = fttmr010;
writel(0, fttmr010->base + TIMER2_COUNT);
writel(0, fttmr010->base + TIMER2_MATCH1);
writel(0, fttmr010->base + TIMER2_MATCH2);
if (fttmr010->is_aspeed) {
writel(~0, fttmr010->base + TIMER2_LOAD);
clocksource_mmio_init(fttmr010->base + TIMER2_COUNT,
"FTTMR010-TIMER2",
fttmr010->tick_rate,
300, 32, clocksource_mmio_readl_down);
sched_clock_register(fttmr010_read_sched_clock_down, 32,
fttmr010->tick_rate);
} else {
writel(0, fttmr010->base + TIMER2_LOAD);
clocksource_mmio_init(fttmr010->base + TIMER2_COUNT,
"FTTMR010-TIMER2",
fttmr010->tick_rate,
300, 32, clocksource_mmio_readl_up);
sched_clock_register(fttmr010_read_sched_clock_up, 32,
fttmr010->tick_rate);
}
/*
* Setup clockevent timer (interrupt-driven) on timer 1.
*/
writel(0, fttmr010->base + TIMER1_COUNT);
writel(0, fttmr010->base + TIMER1_LOAD);
writel(0, fttmr010->base + TIMER1_MATCH1);
writel(0, fttmr010->base + TIMER1_MATCH2);
if (is_ast2600) {
fttmr010->timer_shutdown = ast2600_timer_shutdown;
ret = request_irq(irq, ast2600_timer_interrupt,
IRQF_TIMER, "FTTMR010-TIMER1",
&fttmr010->clkevt);
} else {
fttmr010->timer_shutdown = fttmr010_timer_shutdown;
ret = request_irq(irq, fttmr010_timer_interrupt,
IRQF_TIMER, "FTTMR010-TIMER1",
&fttmr010->clkevt);
}
if (ret) {
pr_err("FTTMR010-TIMER1 no IRQ\n");
goto out_unmap;
}
fttmr010->clkevt.name = "FTTMR010-TIMER1";
/* Reasonably fast and accurate clock event */
fttmr010->clkevt.rating = 300;
fttmr010->clkevt.features = CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_ONESHOT;
fttmr010->clkevt.set_next_event = fttmr010_timer_set_next_event;
fttmr010->clkevt.set_state_shutdown = fttmr010->timer_shutdown;
fttmr010->clkevt.set_state_periodic = fttmr010_timer_set_periodic;
fttmr010->clkevt.set_state_oneshot = fttmr010_timer_set_oneshot;
fttmr010->clkevt.tick_resume = fttmr010->timer_shutdown;
fttmr010->clkevt.cpumask = cpumask_of(0);
fttmr010->clkevt.irq = irq;
clockevents_config_and_register(&fttmr010->clkevt,
fttmr010->tick_rate,
1, 0xffffffff);
#ifdef CONFIG_ARM
/* Also use this timer for delays */
if (fttmr010->is_aspeed)
fttmr010->delay_timer.read_current_timer =
fttmr010_read_current_timer_down;
else
fttmr010->delay_timer.read_current_timer =
fttmr010_read_current_timer_up;
fttmr010->delay_timer.freq = fttmr010->tick_rate;
register_current_timer_delay(&fttmr010->delay_timer);
#endif
return 0;
out_unmap:
iounmap(fttmr010->base);
out_free:
kfree(fttmr010);
out_disable_clock:
clk_disable_unprepare(clk);
return ret;
}
static __init int ast2600_timer_init(struct device_node *np)
{
return fttmr010_common_init(np, true, true);
}
static __init int aspeed_timer_init(struct device_node *np)
{
return fttmr010_common_init(np, true, false);
}
static __init int fttmr010_timer_init(struct device_node *np)
{
return fttmr010_common_init(np, false, false);
}
TIMER_OF_DECLARE(fttmr010, "faraday,fttmr010", fttmr010_timer_init);
TIMER_OF_DECLARE(gemini, "cortina,gemini-timer", fttmr010_timer_init);
TIMER_OF_DECLARE(moxart, "moxa,moxart-timer", fttmr010_timer_init);
TIMER_OF_DECLARE(ast2400, "aspeed,ast2400-timer", aspeed_timer_init);
TIMER_OF_DECLARE(ast2500, "aspeed,ast2500-timer", aspeed_timer_init);
TIMER_OF_DECLARE(ast2600, "aspeed,ast2600-timer", ast2600_timer_init);
|
linux-master
|
drivers/clocksource/timer-fttmr010.c
|
// SPDX-License-Identifier: GPL-2.0-only
/**
* timer-ti-32k.c - OMAP2 32k Timer Support
*
* Copyright (C) 2009 Nokia Corporation
*
* Update to use new clocksource/clockevent layers
* Author: Kevin Hilman, MontaVista Software, Inc. <[email protected]>
* Copyright (C) 2007 MontaVista Software, Inc.
*
* Original driver:
* Copyright (C) 2005 Nokia Corporation
* Author: Paul Mundt <[email protected]>
* Juha Yrjölä <[email protected]>
* OMAP Dual-mode timer framework support by Timo Teras
*
* Some parts based off of TI's 24xx code:
*
* Copyright (C) 2004-2009 Texas Instruments, Inc.
*
* Roughly modelled after the OMAP1 MPU timer code.
* Added OMAP4 support - Santosh Shilimkar <[email protected]>
*
* Copyright (C) 2015 Texas Instruments Incorporated - https://www.ti.com
*/
#include <linux/clk.h>
#include <linux/init.h>
#include <linux/time.h>
#include <linux/sched_clock.h>
#include <linux/clocksource.h>
#include <linux/of.h>
#include <linux/of_address.h>
/*
* 32KHz clocksource ... always available, on pretty most chips except
* OMAP 730 and 1510. Other timers could be used as clocksources, with
* higher resolution in free-running counter modes (e.g. 12 MHz xtal),
* but systems won't necessarily want to spend resources that way.
*/
#define OMAP2_32KSYNCNT_REV_OFF 0x0
#define OMAP2_32KSYNCNT_REV_SCHEME (0x3 << 30)
#define OMAP2_32KSYNCNT_CR_OFF_LOW 0x10
#define OMAP2_32KSYNCNT_CR_OFF_HIGH 0x30
struct ti_32k {
void __iomem *base;
void __iomem *counter;
struct clocksource cs;
};
static inline struct ti_32k *to_ti_32k(struct clocksource *cs)
{
return container_of(cs, struct ti_32k, cs);
}
static u64 notrace ti_32k_read_cycles(struct clocksource *cs)
{
struct ti_32k *ti = to_ti_32k(cs);
return (u64)readl_relaxed(ti->counter);
}
static struct ti_32k ti_32k_timer = {
.cs = {
.name = "32k_counter",
.rating = 250,
.read = ti_32k_read_cycles,
.mask = CLOCKSOURCE_MASK(32),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
},
};
static u64 notrace omap_32k_read_sched_clock(void)
{
return ti_32k_read_cycles(&ti_32k_timer.cs);
}
static void __init ti_32k_timer_enable_clock(struct device_node *np,
const char *name)
{
struct clk *clock;
int error;
clock = of_clk_get_by_name(np->parent, name);
if (IS_ERR(clock)) {
/* Only some SoCs have a separate interface clock */
if (PTR_ERR(clock) == -EINVAL && !strncmp("ick", name, 3))
return;
pr_warn("%s: could not get clock %s %li\n",
__func__, name, PTR_ERR(clock));
return;
}
error = clk_prepare_enable(clock);
if (error) {
pr_warn("%s: could not enable %s: %i\n",
__func__, name, error);
return;
}
}
static void __init ti_32k_timer_module_init(struct device_node *np,
void __iomem *base)
{
void __iomem *sysc = base + 4;
if (!of_device_is_compatible(np->parent, "ti,sysc"))
return;
ti_32k_timer_enable_clock(np, "fck");
ti_32k_timer_enable_clock(np, "ick");
/*
* Force idle module as wkup domain is active with MPU.
* No need to tag the module disabled for ti-sysc probe.
*/
writel_relaxed(0, sysc);
}
static int __init ti_32k_timer_init(struct device_node *np)
{
int ret;
ti_32k_timer.base = of_iomap(np, 0);
if (!ti_32k_timer.base) {
pr_err("Can't ioremap 32k timer base\n");
return -ENXIO;
}
if (!of_machine_is_compatible("ti,am43"))
ti_32k_timer.cs.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
ti_32k_timer.counter = ti_32k_timer.base;
ti_32k_timer_module_init(np, ti_32k_timer.base);
/*
* 32k sync Counter IP register offsets vary between the highlander
* version and the legacy ones.
*
* The 'SCHEME' bits(30-31) of the revision register is used to identify
* the version.
*/
if (readl_relaxed(ti_32k_timer.base + OMAP2_32KSYNCNT_REV_OFF) &
OMAP2_32KSYNCNT_REV_SCHEME)
ti_32k_timer.counter += OMAP2_32KSYNCNT_CR_OFF_HIGH;
else
ti_32k_timer.counter += OMAP2_32KSYNCNT_CR_OFF_LOW;
pr_info("OMAP clocksource: 32k_counter at 32768 Hz\n");
ret = clocksource_register_hz(&ti_32k_timer.cs, 32768);
if (ret) {
pr_err("32k_counter: can't register clocksource\n");
return ret;
}
sched_clock_register(omap_32k_read_sched_clock, 32, 32768);
return 0;
}
TIMER_OF_DECLARE(ti_32k_timer, "ti,omap-counter32k",
ti_32k_timer_init);
|
linux-master
|
drivers/clocksource/timer-ti-32k.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2020 Western Digital Corporation or its affiliates.
*
* Most of the M-mode (i.e. NoMMU) RISC-V systems usually have a
* CLINT MMIO timer device.
*/
#define pr_fmt(fmt) "clint: " fmt
#include <linux/bitops.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <linux/cpu.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/sched_clock.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/of_irq.h>
#include <linux/smp.h>
#include <linux/timex.h>
#ifndef CONFIG_RISCV_M_MODE
#include <asm/clint.h>
#endif
#define CLINT_IPI_OFF 0
#define CLINT_TIMER_CMP_OFF 0x4000
#define CLINT_TIMER_VAL_OFF 0xbff8
/* CLINT manages IPI and Timer for RISC-V M-mode */
static u32 __iomem *clint_ipi_base;
static unsigned int clint_ipi_irq;
static u64 __iomem *clint_timer_cmp;
static u64 __iomem *clint_timer_val;
static unsigned long clint_timer_freq;
static unsigned int clint_timer_irq;
#ifdef CONFIG_RISCV_M_MODE
u64 __iomem *clint_time_val;
EXPORT_SYMBOL(clint_time_val);
#endif
#ifdef CONFIG_SMP
static void clint_send_ipi(unsigned int cpu)
{
writel(1, clint_ipi_base + cpuid_to_hartid_map(cpu));
}
static void clint_clear_ipi(void)
{
writel(0, clint_ipi_base + cpuid_to_hartid_map(smp_processor_id()));
}
static void clint_ipi_interrupt(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
chained_irq_enter(chip, desc);
clint_clear_ipi();
ipi_mux_process();
chained_irq_exit(chip, desc);
}
#endif
#ifdef CONFIG_64BIT
#define clint_get_cycles() readq_relaxed(clint_timer_val)
#else
#define clint_get_cycles() readl_relaxed(clint_timer_val)
#define clint_get_cycles_hi() readl_relaxed(((u32 *)clint_timer_val) + 1)
#endif
#ifdef CONFIG_64BIT
static u64 notrace clint_get_cycles64(void)
{
return clint_get_cycles();
}
#else /* CONFIG_64BIT */
static u64 notrace clint_get_cycles64(void)
{
u32 hi, lo;
do {
hi = clint_get_cycles_hi();
lo = clint_get_cycles();
} while (hi != clint_get_cycles_hi());
return ((u64)hi << 32) | lo;
}
#endif /* CONFIG_64BIT */
static u64 clint_rdtime(struct clocksource *cs)
{
return clint_get_cycles64();
}
static struct clocksource clint_clocksource = {
.name = "clint_clocksource",
.rating = 300,
.mask = CLOCKSOURCE_MASK(64),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
.read = clint_rdtime,
};
static int clint_clock_next_event(unsigned long delta,
struct clock_event_device *ce)
{
void __iomem *r = clint_timer_cmp +
cpuid_to_hartid_map(smp_processor_id());
csr_set(CSR_IE, IE_TIE);
writeq_relaxed(clint_get_cycles64() + delta, r);
return 0;
}
static DEFINE_PER_CPU(struct clock_event_device, clint_clock_event) = {
.name = "clint_clockevent",
.features = CLOCK_EVT_FEAT_ONESHOT,
.rating = 100,
.set_next_event = clint_clock_next_event,
};
static int clint_timer_starting_cpu(unsigned int cpu)
{
struct clock_event_device *ce = per_cpu_ptr(&clint_clock_event, cpu);
ce->cpumask = cpumask_of(cpu);
clockevents_config_and_register(ce, clint_timer_freq, 100, 0x7fffffff);
enable_percpu_irq(clint_timer_irq,
irq_get_trigger_type(clint_timer_irq));
enable_percpu_irq(clint_ipi_irq,
irq_get_trigger_type(clint_ipi_irq));
return 0;
}
static int clint_timer_dying_cpu(unsigned int cpu)
{
disable_percpu_irq(clint_timer_irq);
/*
* Don't disable IPI when CPU goes offline because
* the masking/unmasking of virtual IPIs is done
* via generic IPI-Mux
*/
return 0;
}
static irqreturn_t clint_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evdev = this_cpu_ptr(&clint_clock_event);
csr_clear(CSR_IE, IE_TIE);
evdev->event_handler(evdev);
return IRQ_HANDLED;
}
static int __init clint_timer_init_dt(struct device_node *np)
{
int rc;
u32 i, nr_irqs;
void __iomem *base;
struct of_phandle_args oirq;
/*
* Ensure that CLINT device interrupts are either RV_IRQ_TIMER or
* RV_IRQ_SOFT. If it's anything else then we ignore the device.
*/
nr_irqs = of_irq_count(np);
for (i = 0; i < nr_irqs; i++) {
if (of_irq_parse_one(np, i, &oirq)) {
pr_err("%pOFP: failed to parse irq %d.\n", np, i);
continue;
}
if ((oirq.args_count != 1) ||
(oirq.args[0] != RV_IRQ_TIMER &&
oirq.args[0] != RV_IRQ_SOFT)) {
pr_err("%pOFP: invalid irq %d (hwirq %d)\n",
np, i, oirq.args[0]);
return -ENODEV;
}
/* Find parent irq domain and map ipi irq */
if (!clint_ipi_irq &&
oirq.args[0] == RV_IRQ_SOFT &&
irq_find_host(oirq.np))
clint_ipi_irq = irq_of_parse_and_map(np, i);
/* Find parent irq domain and map timer irq */
if (!clint_timer_irq &&
oirq.args[0] == RV_IRQ_TIMER &&
irq_find_host(oirq.np))
clint_timer_irq = irq_of_parse_and_map(np, i);
}
/* If CLINT ipi or timer irq not found then fail */
if (!clint_ipi_irq || !clint_timer_irq) {
pr_err("%pOFP: ipi/timer irq not found\n", np);
return -ENODEV;
}
base = of_iomap(np, 0);
if (!base) {
pr_err("%pOFP: could not map registers\n", np);
return -ENODEV;
}
clint_ipi_base = base + CLINT_IPI_OFF;
clint_timer_cmp = base + CLINT_TIMER_CMP_OFF;
clint_timer_val = base + CLINT_TIMER_VAL_OFF;
clint_timer_freq = riscv_timebase;
#ifdef CONFIG_RISCV_M_MODE
/*
* Yes, that's an odd naming scheme. time_val is public, but hopefully
* will die in favor of something cleaner.
*/
clint_time_val = clint_timer_val;
#endif
pr_info("%pOFP: timer running at %ld Hz\n", np, clint_timer_freq);
rc = clocksource_register_hz(&clint_clocksource, clint_timer_freq);
if (rc) {
pr_err("%pOFP: clocksource register failed [%d]\n", np, rc);
goto fail_iounmap;
}
sched_clock_register(clint_get_cycles64, 64, clint_timer_freq);
rc = request_percpu_irq(clint_timer_irq, clint_timer_interrupt,
"clint-timer", &clint_clock_event);
if (rc) {
pr_err("registering percpu irq failed [%d]\n", rc);
goto fail_iounmap;
}
#ifdef CONFIG_SMP
rc = ipi_mux_create(BITS_PER_BYTE, clint_send_ipi);
if (rc <= 0) {
pr_err("unable to create muxed IPIs\n");
rc = (rc < 0) ? rc : -ENODEV;
goto fail_free_irq;
}
irq_set_chained_handler(clint_ipi_irq, clint_ipi_interrupt);
riscv_ipi_set_virq_range(rc, BITS_PER_BYTE, true);
clint_clear_ipi();
#endif
rc = cpuhp_setup_state(CPUHP_AP_CLINT_TIMER_STARTING,
"clockevents/clint/timer:starting",
clint_timer_starting_cpu,
clint_timer_dying_cpu);
if (rc) {
pr_err("%pOFP: cpuhp setup state failed [%d]\n", np, rc);
goto fail_free_irq;
}
return 0;
fail_free_irq:
free_percpu_irq(clint_timer_irq, &clint_clock_event);
fail_iounmap:
iounmap(base);
return rc;
}
TIMER_OF_DECLARE(clint_timer, "riscv,clint0", clint_timer_init_dt);
TIMER_OF_DECLARE(clint_timer1, "sifive,clint0", clint_timer_init_dt);
|
linux-master
|
drivers/clocksource/timer-clint.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* Amlogic Meson6 SoCs timer handling.
*
* Copyright (C) 2014 Carlo Caione <[email protected]>
*
* Based on code from Amlogic, Inc
*/
#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqreturn.h>
#include <linux/sched_clock.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#ifdef CONFIG_ARM
#include <linux/delay.h>
#endif
#define MESON_ISA_TIMER_MUX 0x00
#define MESON_ISA_TIMER_MUX_TIMERD_EN BIT(19)
#define MESON_ISA_TIMER_MUX_TIMERC_EN BIT(18)
#define MESON_ISA_TIMER_MUX_TIMERB_EN BIT(17)
#define MESON_ISA_TIMER_MUX_TIMERA_EN BIT(16)
#define MESON_ISA_TIMER_MUX_TIMERD_MODE BIT(15)
#define MESON_ISA_TIMER_MUX_TIMERC_MODE BIT(14)
#define MESON_ISA_TIMER_MUX_TIMERB_MODE BIT(13)
#define MESON_ISA_TIMER_MUX_TIMERA_MODE BIT(12)
#define MESON_ISA_TIMER_MUX_TIMERE_INPUT_CLOCK_MASK GENMASK(10, 8)
#define MESON_ISA_TIMER_MUX_TIMERE_INPUT_CLOCK_SYSTEM_CLOCK 0x0
#define MESON_ISA_TIMER_MUX_TIMERE_INPUT_CLOCK_1US 0x1
#define MESON_ISA_TIMER_MUX_TIMERE_INPUT_CLOCK_10US 0x2
#define MESON_ISA_TIMER_MUX_TIMERE_INPUT_CLOCK_100US 0x3
#define MESON_ISA_TIMER_MUX_TIMERE_INPUT_CLOCK_1MS 0x4
#define MESON_ISA_TIMER_MUX_TIMERD_INPUT_CLOCK_MASK GENMASK(7, 6)
#define MESON_ISA_TIMER_MUX_TIMERC_INPUT_CLOCK_MASK GENMASK(5, 4)
#define MESON_ISA_TIMER_MUX_TIMERB_INPUT_CLOCK_MASK GENMASK(3, 2)
#define MESON_ISA_TIMER_MUX_TIMERA_INPUT_CLOCK_MASK GENMASK(1, 0)
#define MESON_ISA_TIMER_MUX_TIMERABCD_INPUT_CLOCK_1US 0x0
#define MESON_ISA_TIMER_MUX_TIMERABCD_INPUT_CLOCK_10US 0x1
#define MESON_ISA_TIMER_MUX_TIMERABCD_INPUT_CLOCK_100US 0x0
#define MESON_ISA_TIMER_MUX_TIMERABCD_INPUT_CLOCK_1MS 0x3
#define MESON_ISA_TIMERA 0x04
#define MESON_ISA_TIMERB 0x08
#define MESON_ISA_TIMERC 0x0c
#define MESON_ISA_TIMERD 0x10
#define MESON_ISA_TIMERE 0x14
static void __iomem *timer_base;
#ifdef CONFIG_ARM
static unsigned long meson6_read_current_timer(void)
{
return readl_relaxed(timer_base + MESON_ISA_TIMERE);
}
static struct delay_timer meson6_delay_timer = {
.read_current_timer = meson6_read_current_timer,
.freq = 1000 * 1000,
};
#endif
static u64 notrace meson6_timer_sched_read(void)
{
return (u64)readl(timer_base + MESON_ISA_TIMERE);
}
static void meson6_clkevt_time_stop(void)
{
u32 val = readl(timer_base + MESON_ISA_TIMER_MUX);
writel(val & ~MESON_ISA_TIMER_MUX_TIMERA_EN,
timer_base + MESON_ISA_TIMER_MUX);
}
static void meson6_clkevt_time_setup(unsigned long delay)
{
writel(delay, timer_base + MESON_ISA_TIMERA);
}
static void meson6_clkevt_time_start(bool periodic)
{
u32 val = readl(timer_base + MESON_ISA_TIMER_MUX);
if (periodic)
val |= MESON_ISA_TIMER_MUX_TIMERA_MODE;
else
val &= ~MESON_ISA_TIMER_MUX_TIMERA_MODE;
writel(val | MESON_ISA_TIMER_MUX_TIMERA_EN,
timer_base + MESON_ISA_TIMER_MUX);
}
static int meson6_shutdown(struct clock_event_device *evt)
{
meson6_clkevt_time_stop();
return 0;
}
static int meson6_set_oneshot(struct clock_event_device *evt)
{
meson6_clkevt_time_stop();
meson6_clkevt_time_start(false);
return 0;
}
static int meson6_set_periodic(struct clock_event_device *evt)
{
meson6_clkevt_time_stop();
meson6_clkevt_time_setup(USEC_PER_SEC / HZ - 1);
meson6_clkevt_time_start(true);
return 0;
}
static int meson6_clkevt_next_event(unsigned long evt,
struct clock_event_device *unused)
{
meson6_clkevt_time_stop();
meson6_clkevt_time_setup(evt);
meson6_clkevt_time_start(false);
return 0;
}
static struct clock_event_device meson6_clockevent = {
.name = "meson6_tick",
.rating = 400,
.features = CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_ONESHOT,
.set_state_shutdown = meson6_shutdown,
.set_state_periodic = meson6_set_periodic,
.set_state_oneshot = meson6_set_oneshot,
.tick_resume = meson6_shutdown,
.set_next_event = meson6_clkevt_next_event,
};
static irqreturn_t meson6_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evt = (struct clock_event_device *)dev_id;
evt->event_handler(evt);
return IRQ_HANDLED;
}
static int __init meson6_timer_init(struct device_node *node)
{
u32 val;
int ret, irq;
timer_base = of_io_request_and_map(node, 0, "meson6-timer");
if (IS_ERR(timer_base)) {
pr_err("Can't map registers\n");
return -ENXIO;
}
irq = irq_of_parse_and_map(node, 0);
if (irq <= 0) {
pr_err("Can't parse IRQ\n");
return -EINVAL;
}
/* Set 1us for timer E */
val = readl(timer_base + MESON_ISA_TIMER_MUX);
val &= ~MESON_ISA_TIMER_MUX_TIMERE_INPUT_CLOCK_MASK;
val |= FIELD_PREP(MESON_ISA_TIMER_MUX_TIMERE_INPUT_CLOCK_MASK,
MESON_ISA_TIMER_MUX_TIMERE_INPUT_CLOCK_1US);
writel(val, timer_base + MESON_ISA_TIMER_MUX);
sched_clock_register(meson6_timer_sched_read, 32, USEC_PER_SEC);
clocksource_mmio_init(timer_base + MESON_ISA_TIMERE, node->name,
1000 * 1000, 300, 32, clocksource_mmio_readl_up);
/* Timer A base 1us */
val &= ~MESON_ISA_TIMER_MUX_TIMERA_INPUT_CLOCK_MASK;
val |= FIELD_PREP(MESON_ISA_TIMER_MUX_TIMERA_INPUT_CLOCK_MASK,
MESON_ISA_TIMER_MUX_TIMERABCD_INPUT_CLOCK_1US);
writel(val, timer_base + MESON_ISA_TIMER_MUX);
/* Stop the timer A */
meson6_clkevt_time_stop();
ret = request_irq(irq, meson6_timer_interrupt,
IRQF_TIMER | IRQF_IRQPOLL, "meson6_timer",
&meson6_clockevent);
if (ret) {
pr_warn("failed to setup irq %d\n", irq);
return ret;
}
meson6_clockevent.cpumask = cpu_possible_mask;
meson6_clockevent.irq = irq;
clockevents_config_and_register(&meson6_clockevent, USEC_PER_SEC,
1, 0xfffe);
#ifdef CONFIG_ARM
/* Also use MESON_ISA_TIMERE for delays */
register_current_timer_delay(&meson6_delay_timer);
#endif
return 0;
}
TIMER_OF_DECLARE(meson6, "amlogic,meson6-timer",
meson6_timer_init);
|
linux-master
|
drivers/clocksource/timer-meson6.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* SuperH Timer Support - TMU
*
* Copyright (C) 2009 Magnus Damm
*/
#include <linux/clk.h>
#include <linux/clockchips.h>
#include <linux/clocksource.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
#include <linux/sh_timer.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#ifdef CONFIG_SUPERH
#include <asm/platform_early.h>
#endif
enum sh_tmu_model {
SH_TMU,
SH_TMU_SH3,
};
struct sh_tmu_device;
struct sh_tmu_channel {
struct sh_tmu_device *tmu;
unsigned int index;
void __iomem *base;
int irq;
unsigned long periodic;
struct clock_event_device ced;
struct clocksource cs;
bool cs_enabled;
unsigned int enable_count;
};
struct sh_tmu_device {
struct platform_device *pdev;
void __iomem *mapbase;
struct clk *clk;
unsigned long rate;
enum sh_tmu_model model;
raw_spinlock_t lock; /* Protect the shared start/stop register */
struct sh_tmu_channel *channels;
unsigned int num_channels;
bool has_clockevent;
bool has_clocksource;
};
#define TSTR -1 /* shared register */
#define TCOR 0 /* channel register */
#define TCNT 1 /* channel register */
#define TCR 2 /* channel register */
#define TCR_UNF (1 << 8)
#define TCR_UNIE (1 << 5)
#define TCR_TPSC_CLK4 (0 << 0)
#define TCR_TPSC_CLK16 (1 << 0)
#define TCR_TPSC_CLK64 (2 << 0)
#define TCR_TPSC_CLK256 (3 << 0)
#define TCR_TPSC_CLK1024 (4 << 0)
#define TCR_TPSC_MASK (7 << 0)
static inline unsigned long sh_tmu_read(struct sh_tmu_channel *ch, int reg_nr)
{
unsigned long offs;
if (reg_nr == TSTR) {
switch (ch->tmu->model) {
case SH_TMU_SH3:
return ioread8(ch->tmu->mapbase + 2);
case SH_TMU:
return ioread8(ch->tmu->mapbase + 4);
}
}
offs = reg_nr << 2;
if (reg_nr == TCR)
return ioread16(ch->base + offs);
else
return ioread32(ch->base + offs);
}
static inline void sh_tmu_write(struct sh_tmu_channel *ch, int reg_nr,
unsigned long value)
{
unsigned long offs;
if (reg_nr == TSTR) {
switch (ch->tmu->model) {
case SH_TMU_SH3:
return iowrite8(value, ch->tmu->mapbase + 2);
case SH_TMU:
return iowrite8(value, ch->tmu->mapbase + 4);
}
}
offs = reg_nr << 2;
if (reg_nr == TCR)
iowrite16(value, ch->base + offs);
else
iowrite32(value, ch->base + offs);
}
static void sh_tmu_start_stop_ch(struct sh_tmu_channel *ch, int start)
{
unsigned long flags, value;
/* start stop register shared by multiple timer channels */
raw_spin_lock_irqsave(&ch->tmu->lock, flags);
value = sh_tmu_read(ch, TSTR);
if (start)
value |= 1 << ch->index;
else
value &= ~(1 << ch->index);
sh_tmu_write(ch, TSTR, value);
raw_spin_unlock_irqrestore(&ch->tmu->lock, flags);
}
static int __sh_tmu_enable(struct sh_tmu_channel *ch)
{
int ret;
/* enable clock */
ret = clk_enable(ch->tmu->clk);
if (ret) {
dev_err(&ch->tmu->pdev->dev, "ch%u: cannot enable clock\n",
ch->index);
return ret;
}
/* make sure channel is disabled */
sh_tmu_start_stop_ch(ch, 0);
/* maximum timeout */
sh_tmu_write(ch, TCOR, 0xffffffff);
sh_tmu_write(ch, TCNT, 0xffffffff);
/* configure channel to parent clock / 4, irq off */
sh_tmu_write(ch, TCR, TCR_TPSC_CLK4);
/* enable channel */
sh_tmu_start_stop_ch(ch, 1);
return 0;
}
static int sh_tmu_enable(struct sh_tmu_channel *ch)
{
if (ch->enable_count++ > 0)
return 0;
pm_runtime_get_sync(&ch->tmu->pdev->dev);
dev_pm_syscore_device(&ch->tmu->pdev->dev, true);
return __sh_tmu_enable(ch);
}
static void __sh_tmu_disable(struct sh_tmu_channel *ch)
{
/* disable channel */
sh_tmu_start_stop_ch(ch, 0);
/* disable interrupts in TMU block */
sh_tmu_write(ch, TCR, TCR_TPSC_CLK4);
/* stop clock */
clk_disable(ch->tmu->clk);
}
static void sh_tmu_disable(struct sh_tmu_channel *ch)
{
if (WARN_ON(ch->enable_count == 0))
return;
if (--ch->enable_count > 0)
return;
__sh_tmu_disable(ch);
dev_pm_syscore_device(&ch->tmu->pdev->dev, false);
pm_runtime_put(&ch->tmu->pdev->dev);
}
static void sh_tmu_set_next(struct sh_tmu_channel *ch, unsigned long delta,
int periodic)
{
/* stop timer */
sh_tmu_start_stop_ch(ch, 0);
/* acknowledge interrupt */
sh_tmu_read(ch, TCR);
/* enable interrupt */
sh_tmu_write(ch, TCR, TCR_UNIE | TCR_TPSC_CLK4);
/* reload delta value in case of periodic timer */
if (periodic)
sh_tmu_write(ch, TCOR, delta);
else
sh_tmu_write(ch, TCOR, 0xffffffff);
sh_tmu_write(ch, TCNT, delta);
/* start timer */
sh_tmu_start_stop_ch(ch, 1);
}
static irqreturn_t sh_tmu_interrupt(int irq, void *dev_id)
{
struct sh_tmu_channel *ch = dev_id;
/* disable or acknowledge interrupt */
if (clockevent_state_oneshot(&ch->ced))
sh_tmu_write(ch, TCR, TCR_TPSC_CLK4);
else
sh_tmu_write(ch, TCR, TCR_UNIE | TCR_TPSC_CLK4);
/* notify clockevent layer */
ch->ced.event_handler(&ch->ced);
return IRQ_HANDLED;
}
static struct sh_tmu_channel *cs_to_sh_tmu(struct clocksource *cs)
{
return container_of(cs, struct sh_tmu_channel, cs);
}
static u64 sh_tmu_clocksource_read(struct clocksource *cs)
{
struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
return sh_tmu_read(ch, TCNT) ^ 0xffffffff;
}
static int sh_tmu_clocksource_enable(struct clocksource *cs)
{
struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
int ret;
if (WARN_ON(ch->cs_enabled))
return 0;
ret = sh_tmu_enable(ch);
if (!ret)
ch->cs_enabled = true;
return ret;
}
static void sh_tmu_clocksource_disable(struct clocksource *cs)
{
struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
if (WARN_ON(!ch->cs_enabled))
return;
sh_tmu_disable(ch);
ch->cs_enabled = false;
}
static void sh_tmu_clocksource_suspend(struct clocksource *cs)
{
struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
if (!ch->cs_enabled)
return;
if (--ch->enable_count == 0) {
__sh_tmu_disable(ch);
dev_pm_genpd_suspend(&ch->tmu->pdev->dev);
}
}
static void sh_tmu_clocksource_resume(struct clocksource *cs)
{
struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
if (!ch->cs_enabled)
return;
if (ch->enable_count++ == 0) {
dev_pm_genpd_resume(&ch->tmu->pdev->dev);
__sh_tmu_enable(ch);
}
}
static int sh_tmu_register_clocksource(struct sh_tmu_channel *ch,
const char *name)
{
struct clocksource *cs = &ch->cs;
cs->name = name;
cs->rating = 200;
cs->read = sh_tmu_clocksource_read;
cs->enable = sh_tmu_clocksource_enable;
cs->disable = sh_tmu_clocksource_disable;
cs->suspend = sh_tmu_clocksource_suspend;
cs->resume = sh_tmu_clocksource_resume;
cs->mask = CLOCKSOURCE_MASK(32);
cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
dev_info(&ch->tmu->pdev->dev, "ch%u: used as clock source\n",
ch->index);
clocksource_register_hz(cs, ch->tmu->rate);
return 0;
}
static struct sh_tmu_channel *ced_to_sh_tmu(struct clock_event_device *ced)
{
return container_of(ced, struct sh_tmu_channel, ced);
}
static void sh_tmu_clock_event_start(struct sh_tmu_channel *ch, int periodic)
{
sh_tmu_enable(ch);
if (periodic) {
ch->periodic = (ch->tmu->rate + HZ/2) / HZ;
sh_tmu_set_next(ch, ch->periodic, 1);
}
}
static int sh_tmu_clock_event_shutdown(struct clock_event_device *ced)
{
struct sh_tmu_channel *ch = ced_to_sh_tmu(ced);
if (clockevent_state_oneshot(ced) || clockevent_state_periodic(ced))
sh_tmu_disable(ch);
return 0;
}
static int sh_tmu_clock_event_set_state(struct clock_event_device *ced,
int periodic)
{
struct sh_tmu_channel *ch = ced_to_sh_tmu(ced);
/* deal with old setting first */
if (clockevent_state_oneshot(ced) || clockevent_state_periodic(ced))
sh_tmu_disable(ch);
dev_info(&ch->tmu->pdev->dev, "ch%u: used for %s clock events\n",
ch->index, periodic ? "periodic" : "oneshot");
sh_tmu_clock_event_start(ch, periodic);
return 0;
}
static int sh_tmu_clock_event_set_oneshot(struct clock_event_device *ced)
{
return sh_tmu_clock_event_set_state(ced, 0);
}
static int sh_tmu_clock_event_set_periodic(struct clock_event_device *ced)
{
return sh_tmu_clock_event_set_state(ced, 1);
}
static int sh_tmu_clock_event_next(unsigned long delta,
struct clock_event_device *ced)
{
struct sh_tmu_channel *ch = ced_to_sh_tmu(ced);
BUG_ON(!clockevent_state_oneshot(ced));
/* program new delta value */
sh_tmu_set_next(ch, delta, 0);
return 0;
}
static void sh_tmu_clock_event_suspend(struct clock_event_device *ced)
{
dev_pm_genpd_suspend(&ced_to_sh_tmu(ced)->tmu->pdev->dev);
}
static void sh_tmu_clock_event_resume(struct clock_event_device *ced)
{
dev_pm_genpd_resume(&ced_to_sh_tmu(ced)->tmu->pdev->dev);
}
static void sh_tmu_register_clockevent(struct sh_tmu_channel *ch,
const char *name)
{
struct clock_event_device *ced = &ch->ced;
int ret;
ced->name = name;
ced->features = CLOCK_EVT_FEAT_PERIODIC;
ced->features |= CLOCK_EVT_FEAT_ONESHOT;
ced->rating = 200;
ced->cpumask = cpu_possible_mask;
ced->set_next_event = sh_tmu_clock_event_next;
ced->set_state_shutdown = sh_tmu_clock_event_shutdown;
ced->set_state_periodic = sh_tmu_clock_event_set_periodic;
ced->set_state_oneshot = sh_tmu_clock_event_set_oneshot;
ced->suspend = sh_tmu_clock_event_suspend;
ced->resume = sh_tmu_clock_event_resume;
dev_info(&ch->tmu->pdev->dev, "ch%u: used for clock events\n",
ch->index);
clockevents_config_and_register(ced, ch->tmu->rate, 0x300, 0xffffffff);
ret = request_irq(ch->irq, sh_tmu_interrupt,
IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
dev_name(&ch->tmu->pdev->dev), ch);
if (ret) {
dev_err(&ch->tmu->pdev->dev, "ch%u: failed to request irq %d\n",
ch->index, ch->irq);
return;
}
}
static int sh_tmu_register(struct sh_tmu_channel *ch, const char *name,
bool clockevent, bool clocksource)
{
if (clockevent) {
ch->tmu->has_clockevent = true;
sh_tmu_register_clockevent(ch, name);
} else if (clocksource) {
ch->tmu->has_clocksource = true;
sh_tmu_register_clocksource(ch, name);
}
return 0;
}
static int sh_tmu_channel_setup(struct sh_tmu_channel *ch, unsigned int index,
bool clockevent, bool clocksource,
struct sh_tmu_device *tmu)
{
/* Skip unused channels. */
if (!clockevent && !clocksource)
return 0;
ch->tmu = tmu;
ch->index = index;
if (tmu->model == SH_TMU_SH3)
ch->base = tmu->mapbase + 4 + ch->index * 12;
else
ch->base = tmu->mapbase + 8 + ch->index * 12;
ch->irq = platform_get_irq(tmu->pdev, index);
if (ch->irq < 0)
return ch->irq;
ch->cs_enabled = false;
ch->enable_count = 0;
return sh_tmu_register(ch, dev_name(&tmu->pdev->dev),
clockevent, clocksource);
}
static int sh_tmu_map_memory(struct sh_tmu_device *tmu)
{
struct resource *res;
res = platform_get_resource(tmu->pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(&tmu->pdev->dev, "failed to get I/O memory\n");
return -ENXIO;
}
tmu->mapbase = ioremap(res->start, resource_size(res));
if (tmu->mapbase == NULL)
return -ENXIO;
return 0;
}
static int sh_tmu_parse_dt(struct sh_tmu_device *tmu)
{
struct device_node *np = tmu->pdev->dev.of_node;
tmu->model = SH_TMU;
tmu->num_channels = 3;
of_property_read_u32(np, "#renesas,channels", &tmu->num_channels);
if (tmu->num_channels != 2 && tmu->num_channels != 3) {
dev_err(&tmu->pdev->dev, "invalid number of channels %u\n",
tmu->num_channels);
return -EINVAL;
}
return 0;
}
static int sh_tmu_setup(struct sh_tmu_device *tmu, struct platform_device *pdev)
{
unsigned int i;
int ret;
tmu->pdev = pdev;
raw_spin_lock_init(&tmu->lock);
if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) {
ret = sh_tmu_parse_dt(tmu);
if (ret < 0)
return ret;
} else if (pdev->dev.platform_data) {
const struct platform_device_id *id = pdev->id_entry;
struct sh_timer_config *cfg = pdev->dev.platform_data;
tmu->model = id->driver_data;
tmu->num_channels = hweight8(cfg->channels_mask);
} else {
dev_err(&tmu->pdev->dev, "missing platform data\n");
return -ENXIO;
}
/* Get hold of clock. */
tmu->clk = clk_get(&tmu->pdev->dev, "fck");
if (IS_ERR(tmu->clk)) {
dev_err(&tmu->pdev->dev, "cannot get clock\n");
return PTR_ERR(tmu->clk);
}
ret = clk_prepare(tmu->clk);
if (ret < 0)
goto err_clk_put;
/* Determine clock rate. */
ret = clk_enable(tmu->clk);
if (ret < 0)
goto err_clk_unprepare;
tmu->rate = clk_get_rate(tmu->clk) / 4;
clk_disable(tmu->clk);
/* Map the memory resource. */
ret = sh_tmu_map_memory(tmu);
if (ret < 0) {
dev_err(&tmu->pdev->dev, "failed to remap I/O memory\n");
goto err_clk_unprepare;
}
/* Allocate and setup the channels. */
tmu->channels = kcalloc(tmu->num_channels, sizeof(*tmu->channels),
GFP_KERNEL);
if (tmu->channels == NULL) {
ret = -ENOMEM;
goto err_unmap;
}
/*
* Use the first channel as a clock event device and the second channel
* as a clock source.
*/
for (i = 0; i < tmu->num_channels; ++i) {
ret = sh_tmu_channel_setup(&tmu->channels[i], i,
i == 0, i == 1, tmu);
if (ret < 0)
goto err_unmap;
}
platform_set_drvdata(pdev, tmu);
return 0;
err_unmap:
kfree(tmu->channels);
iounmap(tmu->mapbase);
err_clk_unprepare:
clk_unprepare(tmu->clk);
err_clk_put:
clk_put(tmu->clk);
return ret;
}
static int sh_tmu_probe(struct platform_device *pdev)
{
struct sh_tmu_device *tmu = platform_get_drvdata(pdev);
int ret;
if (!is_sh_early_platform_device(pdev)) {
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
}
if (tmu) {
dev_info(&pdev->dev, "kept as earlytimer\n");
goto out;
}
tmu = kzalloc(sizeof(*tmu), GFP_KERNEL);
if (tmu == NULL)
return -ENOMEM;
ret = sh_tmu_setup(tmu, pdev);
if (ret) {
kfree(tmu);
pm_runtime_idle(&pdev->dev);
return ret;
}
if (is_sh_early_platform_device(pdev))
return 0;
out:
if (tmu->has_clockevent || tmu->has_clocksource)
pm_runtime_irq_safe(&pdev->dev);
else
pm_runtime_idle(&pdev->dev);
return 0;
}
static const struct platform_device_id sh_tmu_id_table[] = {
{ "sh-tmu", SH_TMU },
{ "sh-tmu-sh3", SH_TMU_SH3 },
{ }
};
MODULE_DEVICE_TABLE(platform, sh_tmu_id_table);
static const struct of_device_id sh_tmu_of_table[] __maybe_unused = {
{ .compatible = "renesas,tmu" },
{ }
};
MODULE_DEVICE_TABLE(of, sh_tmu_of_table);
static struct platform_driver sh_tmu_device_driver = {
.probe = sh_tmu_probe,
.driver = {
.name = "sh_tmu",
.of_match_table = of_match_ptr(sh_tmu_of_table),
.suppress_bind_attrs = true,
},
.id_table = sh_tmu_id_table,
};
static int __init sh_tmu_init(void)
{
return platform_driver_register(&sh_tmu_device_driver);
}
static void __exit sh_tmu_exit(void)
{
platform_driver_unregister(&sh_tmu_device_driver);
}
#ifdef CONFIG_SUPERH
sh_early_platform_init("earlytimer", &sh_tmu_device_driver);
#endif
subsys_initcall(sh_tmu_init);
module_exit(sh_tmu_exit);
MODULE_AUTHOR("Magnus Damm");
MODULE_DESCRIPTION("SuperH TMU Timer Driver");
|
linux-master
|
drivers/clocksource/sh_tmu.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* JZ47xx SoCs TCU Operating System Timer driver
*
* Copyright (C) 2016 Maarten ter Huurne <[email protected]>
* Copyright (C) 2020 Paul Cercueil <[email protected]>
*/
#include <linux/clk.h>
#include <linux/clocksource.h>
#include <linux/mfd/ingenic-tcu.h>
#include <linux/mfd/syscon.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/regmap.h>
#include <linux/sched_clock.h>
#define TCU_OST_TCSR_MASK 0xffc0
#define TCU_OST_TCSR_CNT_MD BIT(15)
#define TCU_OST_CHANNEL 15
/*
* The TCU_REG_OST_CNT{L,R} from <linux/mfd/ingenic-tcu.h> are only for the
* regmap; these are for use with the __iomem pointer.
*/
#define OST_REG_CNTL 0x4
#define OST_REG_CNTH 0x8
struct ingenic_ost_soc_info {
bool is64bit;
};
struct ingenic_ost {
void __iomem *regs;
struct clk *clk;
struct clocksource cs;
};
static struct ingenic_ost *ingenic_ost;
static u64 notrace ingenic_ost_read_cntl(void)
{
/* Read using __iomem pointer instead of regmap to avoid locking */
return readl(ingenic_ost->regs + OST_REG_CNTL);
}
static u64 notrace ingenic_ost_read_cnth(void)
{
/* Read using __iomem pointer instead of regmap to avoid locking */
return readl(ingenic_ost->regs + OST_REG_CNTH);
}
static u64 notrace ingenic_ost_clocksource_readl(struct clocksource *cs)
{
return ingenic_ost_read_cntl();
}
static u64 notrace ingenic_ost_clocksource_readh(struct clocksource *cs)
{
return ingenic_ost_read_cnth();
}
static int __init ingenic_ost_probe(struct platform_device *pdev)
{
const struct ingenic_ost_soc_info *soc_info;
struct device *dev = &pdev->dev;
struct ingenic_ost *ost;
struct clocksource *cs;
struct regmap *map;
unsigned long rate;
int err;
soc_info = device_get_match_data(dev);
if (!soc_info)
return -EINVAL;
ost = devm_kzalloc(dev, sizeof(*ost), GFP_KERNEL);
if (!ost)
return -ENOMEM;
ingenic_ost = ost;
ost->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ost->regs))
return PTR_ERR(ost->regs);
map = device_node_to_regmap(dev->parent->of_node);
if (IS_ERR(map)) {
dev_err(dev, "regmap not found");
return PTR_ERR(map);
}
ost->clk = devm_clk_get(dev, "ost");
if (IS_ERR(ost->clk))
return PTR_ERR(ost->clk);
err = clk_prepare_enable(ost->clk);
if (err)
return err;
/* Clear counter high/low registers */
if (soc_info->is64bit)
regmap_write(map, TCU_REG_OST_CNTL, 0);
regmap_write(map, TCU_REG_OST_CNTH, 0);
/* Don't reset counter at compare value. */
regmap_update_bits(map, TCU_REG_OST_TCSR,
TCU_OST_TCSR_MASK, TCU_OST_TCSR_CNT_MD);
rate = clk_get_rate(ost->clk);
/* Enable OST TCU channel */
regmap_write(map, TCU_REG_TESR, BIT(TCU_OST_CHANNEL));
cs = &ost->cs;
cs->name = "ingenic-ost";
cs->rating = 320;
cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
cs->mask = CLOCKSOURCE_MASK(32);
if (soc_info->is64bit)
cs->read = ingenic_ost_clocksource_readl;
else
cs->read = ingenic_ost_clocksource_readh;
err = clocksource_register_hz(cs, rate);
if (err) {
dev_err(dev, "clocksource registration failed");
clk_disable_unprepare(ost->clk);
return err;
}
if (soc_info->is64bit)
sched_clock_register(ingenic_ost_read_cntl, 32, rate);
else
sched_clock_register(ingenic_ost_read_cnth, 32, rate);
return 0;
}
static int ingenic_ost_suspend(struct device *dev)
{
struct ingenic_ost *ost = dev_get_drvdata(dev);
clk_disable(ost->clk);
return 0;
}
static int ingenic_ost_resume(struct device *dev)
{
struct ingenic_ost *ost = dev_get_drvdata(dev);
return clk_enable(ost->clk);
}
static const struct dev_pm_ops ingenic_ost_pm_ops = {
/* _noirq: We want the OST clock to be gated last / ungated first */
.suspend_noirq = ingenic_ost_suspend,
.resume_noirq = ingenic_ost_resume,
};
static const struct ingenic_ost_soc_info jz4725b_ost_soc_info = {
.is64bit = false,
};
static const struct ingenic_ost_soc_info jz4760b_ost_soc_info = {
.is64bit = true,
};
static const struct of_device_id ingenic_ost_of_match[] = {
{ .compatible = "ingenic,jz4725b-ost", .data = &jz4725b_ost_soc_info, },
{ .compatible = "ingenic,jz4760b-ost", .data = &jz4760b_ost_soc_info, },
{ .compatible = "ingenic,jz4770-ost", .data = &jz4760b_ost_soc_info, },
{ }
};
static struct platform_driver ingenic_ost_driver = {
.driver = {
.name = "ingenic-ost",
.pm = pm_sleep_ptr(&ingenic_ost_pm_ops),
.of_match_table = ingenic_ost_of_match,
},
};
builtin_platform_driver_probe(ingenic_ost_driver, ingenic_ost_probe);
|
linux-master
|
drivers/clocksource/ingenic-ost.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2019-2020 NVIDIA Corporation. All rights reserved.
*/
#include <linux/clocksource.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/watchdog.h>
/* shared registers */
#define TKETSC0 0x000
#define TKETSC1 0x004
#define TKEUSEC 0x008
#define TKEOSC 0x00c
#define TKEIE(x) (0x100 + ((x) * 4))
#define TKEIE_WDT_MASK(x, y) ((y) << (16 + 4 * (x)))
/* timer registers */
#define TMRCR 0x000
#define TMRCR_ENABLE BIT(31)
#define TMRCR_PERIODIC BIT(30)
#define TMRCR_PTV(x) ((x) & 0x0fffffff)
#define TMRSR 0x004
#define TMRSR_INTR_CLR BIT(30)
#define TMRCSSR 0x008
#define TMRCSSR_SRC_USEC (0 << 0)
/* watchdog registers */
#define WDTCR 0x000
#define WDTCR_SYSTEM_POR_RESET_ENABLE BIT(16)
#define WDTCR_SYSTEM_DEBUG_RESET_ENABLE BIT(15)
#define WDTCR_REMOTE_INT_ENABLE BIT(14)
#define WDTCR_LOCAL_FIQ_ENABLE BIT(13)
#define WDTCR_LOCAL_INT_ENABLE BIT(12)
#define WDTCR_PERIOD_MASK (0xff << 4)
#define WDTCR_PERIOD(x) (((x) & 0xff) << 4)
#define WDTCR_TIMER_SOURCE_MASK 0xf
#define WDTCR_TIMER_SOURCE(x) ((x) & 0xf)
#define WDTCMDR 0x008
#define WDTCMDR_DISABLE_COUNTER BIT(1)
#define WDTCMDR_START_COUNTER BIT(0)
#define WDTUR 0x00c
#define WDTUR_UNLOCK_PATTERN 0x0000c45a
struct tegra186_timer_soc {
unsigned int num_timers;
unsigned int num_wdts;
};
struct tegra186_tmr {
struct tegra186_timer *parent;
void __iomem *regs;
unsigned int index;
unsigned int hwirq;
};
struct tegra186_wdt {
struct watchdog_device base;
void __iomem *regs;
unsigned int index;
bool locked;
struct tegra186_tmr *tmr;
};
static inline struct tegra186_wdt *to_tegra186_wdt(struct watchdog_device *wdd)
{
return container_of(wdd, struct tegra186_wdt, base);
}
struct tegra186_timer {
const struct tegra186_timer_soc *soc;
struct device *dev;
void __iomem *regs;
struct tegra186_wdt *wdt;
struct clocksource usec;
struct clocksource tsc;
struct clocksource osc;
};
static void tmr_writel(struct tegra186_tmr *tmr, u32 value, unsigned int offset)
{
writel_relaxed(value, tmr->regs + offset);
}
static void wdt_writel(struct tegra186_wdt *wdt, u32 value, unsigned int offset)
{
writel_relaxed(value, wdt->regs + offset);
}
static u32 wdt_readl(struct tegra186_wdt *wdt, unsigned int offset)
{
return readl_relaxed(wdt->regs + offset);
}
static struct tegra186_tmr *tegra186_tmr_create(struct tegra186_timer *tegra,
unsigned int index)
{
unsigned int offset = 0x10000 + index * 0x10000;
struct tegra186_tmr *tmr;
tmr = devm_kzalloc(tegra->dev, sizeof(*tmr), GFP_KERNEL);
if (!tmr)
return ERR_PTR(-ENOMEM);
tmr->parent = tegra;
tmr->regs = tegra->regs + offset;
tmr->index = index;
tmr->hwirq = 0;
return tmr;
}
static const struct watchdog_info tegra186_wdt_info = {
.options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING,
.identity = "NVIDIA Tegra186 WDT",
};
static void tegra186_wdt_disable(struct tegra186_wdt *wdt)
{
/* unlock and disable the watchdog */
wdt_writel(wdt, WDTUR_UNLOCK_PATTERN, WDTUR);
wdt_writel(wdt, WDTCMDR_DISABLE_COUNTER, WDTCMDR);
/* disable timer */
tmr_writel(wdt->tmr, 0, TMRCR);
}
static void tegra186_wdt_enable(struct tegra186_wdt *wdt)
{
struct tegra186_timer *tegra = wdt->tmr->parent;
u32 value;
/* unmask hardware IRQ, this may have been lost across powergate */
value = TKEIE_WDT_MASK(wdt->index, 1);
writel(value, tegra->regs + TKEIE(wdt->tmr->hwirq));
/* clear interrupt */
tmr_writel(wdt->tmr, TMRSR_INTR_CLR, TMRSR);
/* select microsecond source */
tmr_writel(wdt->tmr, TMRCSSR_SRC_USEC, TMRCSSR);
/* configure timer (system reset happens on the fifth expiration) */
value = TMRCR_PTV(wdt->base.timeout * USEC_PER_SEC / 5) |
TMRCR_PERIODIC | TMRCR_ENABLE;
tmr_writel(wdt->tmr, value, TMRCR);
if (!wdt->locked) {
value = wdt_readl(wdt, WDTCR);
/* select the proper timer source */
value &= ~WDTCR_TIMER_SOURCE_MASK;
value |= WDTCR_TIMER_SOURCE(wdt->tmr->index);
/* single timer period since that's already configured */
value &= ~WDTCR_PERIOD_MASK;
value |= WDTCR_PERIOD(1);
/* enable local interrupt for WDT petting */
value |= WDTCR_LOCAL_INT_ENABLE;
/* enable local FIQ and remote interrupt for debug dump */
if (0)
value |= WDTCR_REMOTE_INT_ENABLE |
WDTCR_LOCAL_FIQ_ENABLE;
/* enable system debug reset (doesn't properly reboot) */
if (0)
value |= WDTCR_SYSTEM_DEBUG_RESET_ENABLE;
/* enable system POR reset */
value |= WDTCR_SYSTEM_POR_RESET_ENABLE;
wdt_writel(wdt, value, WDTCR);
}
wdt_writel(wdt, WDTCMDR_START_COUNTER, WDTCMDR);
}
static int tegra186_wdt_start(struct watchdog_device *wdd)
{
struct tegra186_wdt *wdt = to_tegra186_wdt(wdd);
tegra186_wdt_enable(wdt);
return 0;
}
static int tegra186_wdt_stop(struct watchdog_device *wdd)
{
struct tegra186_wdt *wdt = to_tegra186_wdt(wdd);
tegra186_wdt_disable(wdt);
return 0;
}
static int tegra186_wdt_ping(struct watchdog_device *wdd)
{
struct tegra186_wdt *wdt = to_tegra186_wdt(wdd);
tegra186_wdt_disable(wdt);
tegra186_wdt_enable(wdt);
return 0;
}
static int tegra186_wdt_set_timeout(struct watchdog_device *wdd,
unsigned int timeout)
{
struct tegra186_wdt *wdt = to_tegra186_wdt(wdd);
if (watchdog_active(&wdt->base))
tegra186_wdt_disable(wdt);
wdt->base.timeout = timeout;
if (watchdog_active(&wdt->base))
tegra186_wdt_enable(wdt);
return 0;
}
static const struct watchdog_ops tegra186_wdt_ops = {
.owner = THIS_MODULE,
.start = tegra186_wdt_start,
.stop = tegra186_wdt_stop,
.ping = tegra186_wdt_ping,
.set_timeout = tegra186_wdt_set_timeout,
};
static struct tegra186_wdt *tegra186_wdt_create(struct tegra186_timer *tegra,
unsigned int index)
{
unsigned int offset = 0x10000, source;
struct tegra186_wdt *wdt;
u32 value;
int err;
offset += tegra->soc->num_timers * 0x10000 + index * 0x10000;
wdt = devm_kzalloc(tegra->dev, sizeof(*wdt), GFP_KERNEL);
if (!wdt)
return ERR_PTR(-ENOMEM);
wdt->regs = tegra->regs + offset;
wdt->index = index;
/* read the watchdog configuration since it might be locked down */
value = wdt_readl(wdt, WDTCR);
if (value & WDTCR_LOCAL_INT_ENABLE)
wdt->locked = true;
source = value & WDTCR_TIMER_SOURCE_MASK;
wdt->tmr = tegra186_tmr_create(tegra, source);
if (IS_ERR(wdt->tmr))
return ERR_CAST(wdt->tmr);
wdt->base.info = &tegra186_wdt_info;
wdt->base.ops = &tegra186_wdt_ops;
wdt->base.min_timeout = 1;
wdt->base.max_timeout = 255;
wdt->base.parent = tegra->dev;
err = watchdog_init_timeout(&wdt->base, 5, tegra->dev);
if (err < 0) {
dev_err(tegra->dev, "failed to initialize timeout: %d\n", err);
return ERR_PTR(err);
}
err = devm_watchdog_register_device(tegra->dev, &wdt->base);
if (err < 0) {
dev_err(tegra->dev, "failed to register WDT: %d\n", err);
return ERR_PTR(err);
}
return wdt;
}
static u64 tegra186_timer_tsc_read(struct clocksource *cs)
{
struct tegra186_timer *tegra = container_of(cs, struct tegra186_timer,
tsc);
u32 hi, lo, ss;
hi = readl_relaxed(tegra->regs + TKETSC1);
/*
* The 56-bit value of the TSC is spread across two registers that are
* not synchronized. In order to read them atomically, ensure that the
* high 24 bits match before and after reading the low 32 bits.
*/
do {
/* snapshot the high 24 bits */
ss = hi;
lo = readl_relaxed(tegra->regs + TKETSC0);
hi = readl_relaxed(tegra->regs + TKETSC1);
} while (hi != ss);
return (u64)hi << 32 | lo;
}
static int tegra186_timer_tsc_init(struct tegra186_timer *tegra)
{
tegra->tsc.name = "tsc";
tegra->tsc.rating = 300;
tegra->tsc.read = tegra186_timer_tsc_read;
tegra->tsc.mask = CLOCKSOURCE_MASK(56);
tegra->tsc.flags = CLOCK_SOURCE_IS_CONTINUOUS;
return clocksource_register_hz(&tegra->tsc, 31250000);
}
static u64 tegra186_timer_osc_read(struct clocksource *cs)
{
struct tegra186_timer *tegra = container_of(cs, struct tegra186_timer,
osc);
return readl_relaxed(tegra->regs + TKEOSC);
}
static int tegra186_timer_osc_init(struct tegra186_timer *tegra)
{
tegra->osc.name = "osc";
tegra->osc.rating = 300;
tegra->osc.read = tegra186_timer_osc_read;
tegra->osc.mask = CLOCKSOURCE_MASK(32);
tegra->osc.flags = CLOCK_SOURCE_IS_CONTINUOUS;
return clocksource_register_hz(&tegra->osc, 38400000);
}
static u64 tegra186_timer_usec_read(struct clocksource *cs)
{
struct tegra186_timer *tegra = container_of(cs, struct tegra186_timer,
usec);
return readl_relaxed(tegra->regs + TKEUSEC);
}
static int tegra186_timer_usec_init(struct tegra186_timer *tegra)
{
tegra->usec.name = "usec";
tegra->usec.rating = 300;
tegra->usec.read = tegra186_timer_usec_read;
tegra->usec.mask = CLOCKSOURCE_MASK(32);
tegra->usec.flags = CLOCK_SOURCE_IS_CONTINUOUS;
return clocksource_register_hz(&tegra->usec, USEC_PER_SEC);
}
static irqreturn_t tegra186_timer_irq(int irq, void *data)
{
struct tegra186_timer *tegra = data;
if (watchdog_active(&tegra->wdt->base)) {
tegra186_wdt_disable(tegra->wdt);
tegra186_wdt_enable(tegra->wdt);
}
return IRQ_HANDLED;
}
static int tegra186_timer_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct tegra186_timer *tegra;
unsigned int irq;
int err;
tegra = devm_kzalloc(dev, sizeof(*tegra), GFP_KERNEL);
if (!tegra)
return -ENOMEM;
tegra->soc = of_device_get_match_data(dev);
dev_set_drvdata(dev, tegra);
tegra->dev = dev;
tegra->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(tegra->regs))
return PTR_ERR(tegra->regs);
err = platform_get_irq(pdev, 0);
if (err < 0)
return err;
irq = err;
/* create a watchdog using a preconfigured timer */
tegra->wdt = tegra186_wdt_create(tegra, 0);
if (IS_ERR(tegra->wdt)) {
err = PTR_ERR(tegra->wdt);
dev_err(dev, "failed to create WDT: %d\n", err);
return err;
}
err = tegra186_timer_tsc_init(tegra);
if (err < 0) {
dev_err(dev, "failed to register TSC counter: %d\n", err);
return err;
}
err = tegra186_timer_osc_init(tegra);
if (err < 0) {
dev_err(dev, "failed to register OSC counter: %d\n", err);
goto unregister_tsc;
}
err = tegra186_timer_usec_init(tegra);
if (err < 0) {
dev_err(dev, "failed to register USEC counter: %d\n", err);
goto unregister_osc;
}
err = devm_request_irq(dev, irq, tegra186_timer_irq, 0,
"tegra186-timer", tegra);
if (err < 0) {
dev_err(dev, "failed to request IRQ#%u: %d\n", irq, err);
goto unregister_usec;
}
return 0;
unregister_usec:
clocksource_unregister(&tegra->usec);
unregister_osc:
clocksource_unregister(&tegra->osc);
unregister_tsc:
clocksource_unregister(&tegra->tsc);
return err;
}
static void tegra186_timer_remove(struct platform_device *pdev)
{
struct tegra186_timer *tegra = platform_get_drvdata(pdev);
clocksource_unregister(&tegra->usec);
clocksource_unregister(&tegra->osc);
clocksource_unregister(&tegra->tsc);
}
static int __maybe_unused tegra186_timer_suspend(struct device *dev)
{
struct tegra186_timer *tegra = dev_get_drvdata(dev);
if (watchdog_active(&tegra->wdt->base))
tegra186_wdt_disable(tegra->wdt);
return 0;
}
static int __maybe_unused tegra186_timer_resume(struct device *dev)
{
struct tegra186_timer *tegra = dev_get_drvdata(dev);
if (watchdog_active(&tegra->wdt->base))
tegra186_wdt_enable(tegra->wdt);
return 0;
}
static SIMPLE_DEV_PM_OPS(tegra186_timer_pm_ops, tegra186_timer_suspend,
tegra186_timer_resume);
static const struct tegra186_timer_soc tegra186_timer = {
.num_timers = 10,
.num_wdts = 3,
};
static const struct tegra186_timer_soc tegra234_timer = {
.num_timers = 16,
.num_wdts = 3,
};
static const struct of_device_id tegra186_timer_of_match[] = {
{ .compatible = "nvidia,tegra186-timer", .data = &tegra186_timer },
{ .compatible = "nvidia,tegra234-timer", .data = &tegra234_timer },
{ }
};
MODULE_DEVICE_TABLE(of, tegra186_timer_of_match);
static struct platform_driver tegra186_wdt_driver = {
.driver = {
.name = "tegra186-timer",
.pm = &tegra186_timer_pm_ops,
.of_match_table = tegra186_timer_of_match,
},
.probe = tegra186_timer_probe,
.remove_new = tegra186_timer_remove,
};
module_platform_driver(tegra186_wdt_driver);
MODULE_AUTHOR("Thierry Reding <[email protected]>");
MODULE_DESCRIPTION("NVIDIA Tegra186 timers driver");
|
linux-master
|
drivers/clocksource/timer-tegra186.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Copyright (C) 2015 Numascale AS. All rights reserved.
*/
#include <linux/clockchips.h>
#include <asm/irq.h>
#include <asm/numachip/numachip.h>
#include <asm/numachip/numachip_csr.h>
static DEFINE_PER_CPU(struct clock_event_device, numachip2_ced);
static cycles_t numachip2_timer_read(struct clocksource *cs)
{
return numachip2_read64_lcsr(NUMACHIP2_TIMER_NOW);
}
static struct clocksource numachip2_clocksource = {
.name = "numachip2",
.rating = 295,
.read = numachip2_timer_read,
.mask = CLOCKSOURCE_MASK(64),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
.mult = 1,
.shift = 0,
};
static int numachip2_set_next_event(unsigned long delta, struct clock_event_device *ced)
{
numachip2_write64_lcsr(NUMACHIP2_TIMER_DEADLINE + numachip2_timer(),
delta);
return 0;
}
static const struct clock_event_device numachip2_clockevent __initconst = {
.name = "numachip2",
.rating = 400,
.set_next_event = numachip2_set_next_event,
.features = CLOCK_EVT_FEAT_ONESHOT,
.mult = 1,
.shift = 0,
.min_delta_ns = 1250,
.min_delta_ticks = 1250,
.max_delta_ns = LONG_MAX,
.max_delta_ticks = LONG_MAX,
};
static void numachip_timer_interrupt(void)
{
struct clock_event_device *ced = this_cpu_ptr(&numachip2_ced);
ced->event_handler(ced);
}
static __init void numachip_timer_each(struct work_struct *work)
{
unsigned local_apicid = __this_cpu_read(x86_cpu_to_apicid) & 0xff;
struct clock_event_device *ced = this_cpu_ptr(&numachip2_ced);
/* Setup IPI vector to local core and relative timing mode */
numachip2_write64_lcsr(NUMACHIP2_TIMER_INT + numachip2_timer(),
(3 << 22) | (X86_PLATFORM_IPI_VECTOR << 14) |
(local_apicid << 6));
*ced = numachip2_clockevent;
ced->cpumask = cpumask_of(smp_processor_id());
clockevents_register_device(ced);
}
static int __init numachip_timer_init(void)
{
if (numachip_system != 2)
return -ENODEV;
/* Reset timer */
numachip2_write64_lcsr(NUMACHIP2_TIMER_RESET, 0);
clocksource_register_hz(&numachip2_clocksource, NSEC_PER_SEC);
/* Setup per-cpu clockevents */
x86_platform_ipi_callback = numachip_timer_interrupt;
schedule_on_each_cpu(&numachip_timer_each);
return 0;
}
arch_initcall(numachip_timer_init);
|
linux-master
|
drivers/clocksource/numachip.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) ST-Ericsson SA 2011
*
* Author: Mattias Wallin <[email protected]> for ST-Ericsson
* Author: Sundar Iyer for ST-Ericsson
* sched_clock implementation is based on:
* plat-nomadik/timer.c Linus Walleij <[email protected]>
*
* DBx500-PRCMU Timer
* The PRCMU has 5 timers which are available in a always-on
* power domain. We use the Timer 4 for our always-on clock
* source on DB8500.
*/
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/clockchips.h>
#define RATE_32K 32768
#define TIMER_MODE_CONTINUOUS 0x1
#define TIMER_DOWNCOUNT_VAL 0xffffffff
#define PRCMU_TIMER_REF 0
#define PRCMU_TIMER_DOWNCOUNT 0x4
#define PRCMU_TIMER_MODE 0x8
static void __iomem *clksrc_dbx500_timer_base;
static u64 notrace clksrc_dbx500_prcmu_read(struct clocksource *cs)
{
void __iomem *base = clksrc_dbx500_timer_base;
u32 count, count2;
do {
count = readl_relaxed(base + PRCMU_TIMER_DOWNCOUNT);
count2 = readl_relaxed(base + PRCMU_TIMER_DOWNCOUNT);
} while (count2 != count);
/* Negate because the timer is a decrementing counter */
return ~count;
}
static struct clocksource clocksource_dbx500_prcmu = {
.name = "dbx500-prcmu-timer",
.rating = 100,
.read = clksrc_dbx500_prcmu_read,
.mask = CLOCKSOURCE_MASK(32),
.flags = CLOCK_SOURCE_IS_CONTINUOUS | CLOCK_SOURCE_SUSPEND_NONSTOP,
};
static int __init clksrc_dbx500_prcmu_init(struct device_node *node)
{
clksrc_dbx500_timer_base = of_iomap(node, 0);
/*
* The A9 sub system expects the timer to be configured as
* a continuous looping timer.
* The PRCMU should configure it but if it for some reason
* don't we do it here.
*/
if (readl(clksrc_dbx500_timer_base + PRCMU_TIMER_MODE) !=
TIMER_MODE_CONTINUOUS) {
writel(TIMER_MODE_CONTINUOUS,
clksrc_dbx500_timer_base + PRCMU_TIMER_MODE);
writel(TIMER_DOWNCOUNT_VAL,
clksrc_dbx500_timer_base + PRCMU_TIMER_REF);
}
return clocksource_register_hz(&clocksource_dbx500_prcmu, RATE_32K);
}
TIMER_OF_DECLARE(dbx500_prcmu, "stericsson,db8500-prcmu-timer-4",
clksrc_dbx500_prcmu_init);
|
linux-master
|
drivers/clocksource/clksrc-dbx500-prcmu.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* arch/arm/mach-vt8500/timer.c
*
* Copyright (C) 2012 Tony Prisk <[email protected]>
* Copyright (C) 2010 Alexey Charkov <[email protected]>
*/
/*
* This file is copied and modified from the original timer.c provided by
* Alexey Charkov. Minor changes have been made for Device Tree Support.
*/
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <linux/delay.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#define VT8500_TIMER_OFFSET 0x0100
#define VT8500_TIMER_HZ 3000000
#define TIMER_MATCH_VAL 0x0000
#define TIMER_COUNT_VAL 0x0010
#define TIMER_STATUS_VAL 0x0014
#define TIMER_IER_VAL 0x001c /* interrupt enable */
#define TIMER_CTRL_VAL 0x0020
#define TIMER_AS_VAL 0x0024 /* access status */
#define TIMER_COUNT_R_ACTIVE (1 << 5) /* not ready for read */
#define TIMER_COUNT_W_ACTIVE (1 << 4) /* not ready for write */
#define TIMER_MATCH_W_ACTIVE (1 << 0) /* not ready for write */
#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
#define MIN_OSCR_DELTA 16
static void __iomem *regbase;
static u64 vt8500_timer_read(struct clocksource *cs)
{
int loops = msecs_to_loops(10);
writel(3, regbase + TIMER_CTRL_VAL);
while ((readl((regbase + TIMER_AS_VAL)) & TIMER_COUNT_R_ACTIVE)
&& --loops)
cpu_relax();
return readl(regbase + TIMER_COUNT_VAL);
}
static struct clocksource clocksource = {
.name = "vt8500_timer",
.rating = 200,
.read = vt8500_timer_read,
.mask = CLOCKSOURCE_MASK(32),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static int vt8500_timer_set_next_event(unsigned long cycles,
struct clock_event_device *evt)
{
int loops = msecs_to_loops(10);
u64 alarm = clocksource.read(&clocksource) + cycles;
while ((readl(regbase + TIMER_AS_VAL) & TIMER_MATCH_W_ACTIVE)
&& --loops)
cpu_relax();
writel((unsigned long)alarm, regbase + TIMER_MATCH_VAL);
if ((signed)(alarm - clocksource.read(&clocksource)) <= MIN_OSCR_DELTA)
return -ETIME;
writel(1, regbase + TIMER_IER_VAL);
return 0;
}
static int vt8500_shutdown(struct clock_event_device *evt)
{
writel(readl(regbase + TIMER_CTRL_VAL) | 1, regbase + TIMER_CTRL_VAL);
writel(0, regbase + TIMER_IER_VAL);
return 0;
}
static struct clock_event_device clockevent = {
.name = "vt8500_timer",
.features = CLOCK_EVT_FEAT_ONESHOT,
.rating = 200,
.set_next_event = vt8500_timer_set_next_event,
.set_state_shutdown = vt8500_shutdown,
.set_state_oneshot = vt8500_shutdown,
};
static irqreturn_t vt8500_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evt = dev_id;
writel(0xf, regbase + TIMER_STATUS_VAL);
evt->event_handler(evt);
return IRQ_HANDLED;
}
static int __init vt8500_timer_init(struct device_node *np)
{
int timer_irq, ret;
regbase = of_iomap(np, 0);
if (!regbase) {
pr_err("%s: Missing iobase description in Device Tree\n",
__func__);
return -ENXIO;
}
timer_irq = irq_of_parse_and_map(np, 0);
if (!timer_irq) {
pr_err("%s: Missing irq description in Device Tree\n",
__func__);
return -EINVAL;
}
writel(1, regbase + TIMER_CTRL_VAL);
writel(0xf, regbase + TIMER_STATUS_VAL);
writel(~0, regbase + TIMER_MATCH_VAL);
ret = clocksource_register_hz(&clocksource, VT8500_TIMER_HZ);
if (ret) {
pr_err("%s: clocksource_register failed for %s\n",
__func__, clocksource.name);
return ret;
}
clockevent.cpumask = cpumask_of(0);
ret = request_irq(timer_irq, vt8500_timer_interrupt,
IRQF_TIMER | IRQF_IRQPOLL, "vt8500_timer",
&clockevent);
if (ret) {
pr_err("%s: setup_irq failed for %s\n", __func__,
clockevent.name);
return ret;
}
clockevents_config_and_register(&clockevent, VT8500_TIMER_HZ,
MIN_OSCR_DELTA * 2, 0xf0000000);
return 0;
}
TIMER_OF_DECLARE(vt8500, "via,vt8500-timer", vt8500_timer_init);
|
linux-master
|
drivers/clocksource/timer-vt8500.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* Marvell Orion SoC timer handling.
*
* Sebastian Hesselbarth <[email protected]>
*
* Timer 0 is used as free-running clocksource, while timer 1 is
* used as clock_event_device.
*/
#include <linux/kernel.h>
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/clockchips.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/spinlock.h>
#include <linux/sched_clock.h>
#define TIMER_CTRL 0x00
#define TIMER0_EN BIT(0)
#define TIMER0_RELOAD_EN BIT(1)
#define TIMER1_EN BIT(2)
#define TIMER1_RELOAD_EN BIT(3)
#define TIMER0_RELOAD 0x10
#define TIMER0_VAL 0x14
#define TIMER1_RELOAD 0x18
#define TIMER1_VAL 0x1c
#define ORION_ONESHOT_MIN 1
#define ORION_ONESHOT_MAX 0xfffffffe
static void __iomem *timer_base;
static unsigned long notrace orion_read_timer(void)
{
return ~readl(timer_base + TIMER0_VAL);
}
static struct delay_timer orion_delay_timer = {
.read_current_timer = orion_read_timer,
};
static void orion_delay_timer_init(unsigned long rate)
{
orion_delay_timer.freq = rate;
register_current_timer_delay(&orion_delay_timer);
}
/*
* Free-running clocksource handling.
*/
static u64 notrace orion_read_sched_clock(void)
{
return ~readl(timer_base + TIMER0_VAL);
}
/*
* Clockevent handling.
*/
static u32 ticks_per_jiffy;
static int orion_clkevt_next_event(unsigned long delta,
struct clock_event_device *dev)
{
/* setup and enable one-shot timer */
writel(delta, timer_base + TIMER1_VAL);
atomic_io_modify(timer_base + TIMER_CTRL,
TIMER1_RELOAD_EN | TIMER1_EN, TIMER1_EN);
return 0;
}
static int orion_clkevt_shutdown(struct clock_event_device *dev)
{
/* disable timer */
atomic_io_modify(timer_base + TIMER_CTRL,
TIMER1_RELOAD_EN | TIMER1_EN, 0);
return 0;
}
static int orion_clkevt_set_periodic(struct clock_event_device *dev)
{
/* setup and enable periodic timer at 1/HZ intervals */
writel(ticks_per_jiffy - 1, timer_base + TIMER1_RELOAD);
writel(ticks_per_jiffy - 1, timer_base + TIMER1_VAL);
atomic_io_modify(timer_base + TIMER_CTRL,
TIMER1_RELOAD_EN | TIMER1_EN,
TIMER1_RELOAD_EN | TIMER1_EN);
return 0;
}
static struct clock_event_device orion_clkevt = {
.name = "orion_event",
.features = CLOCK_EVT_FEAT_ONESHOT |
CLOCK_EVT_FEAT_PERIODIC,
.shift = 32,
.rating = 300,
.set_next_event = orion_clkevt_next_event,
.set_state_shutdown = orion_clkevt_shutdown,
.set_state_periodic = orion_clkevt_set_periodic,
.set_state_oneshot = orion_clkevt_shutdown,
.tick_resume = orion_clkevt_shutdown,
};
static irqreturn_t orion_clkevt_irq_handler(int irq, void *dev_id)
{
orion_clkevt.event_handler(&orion_clkevt);
return IRQ_HANDLED;
}
static int __init orion_timer_init(struct device_node *np)
{
unsigned long rate;
struct clk *clk;
int irq, ret;
/* timer registers are shared with watchdog timer */
timer_base = of_iomap(np, 0);
if (!timer_base) {
pr_err("%pOFn: unable to map resource\n", np);
return -ENXIO;
}
clk = of_clk_get(np, 0);
if (IS_ERR(clk)) {
pr_err("%pOFn: unable to get clk\n", np);
return PTR_ERR(clk);
}
ret = clk_prepare_enable(clk);
if (ret) {
pr_err("Failed to prepare clock\n");
return ret;
}
/* we are only interested in timer1 irq */
irq = irq_of_parse_and_map(np, 1);
if (irq <= 0) {
pr_err("%pOFn: unable to parse timer1 irq\n", np);
ret = -EINVAL;
goto out_unprep_clk;
}
rate = clk_get_rate(clk);
/* setup timer0 as free-running clocksource */
writel(~0, timer_base + TIMER0_VAL);
writel(~0, timer_base + TIMER0_RELOAD);
atomic_io_modify(timer_base + TIMER_CTRL,
TIMER0_RELOAD_EN | TIMER0_EN,
TIMER0_RELOAD_EN | TIMER0_EN);
ret = clocksource_mmio_init(timer_base + TIMER0_VAL,
"orion_clocksource", rate, 300, 32,
clocksource_mmio_readl_down);
if (ret) {
pr_err("Failed to initialize mmio timer\n");
goto out_unprep_clk;
}
sched_clock_register(orion_read_sched_clock, 32, rate);
/* setup timer1 as clockevent timer */
ret = request_irq(irq, orion_clkevt_irq_handler, IRQF_TIMER,
"orion_event", NULL);
if (ret) {
pr_err("%pOFn: unable to setup irq\n", np);
goto out_unprep_clk;
}
ticks_per_jiffy = (clk_get_rate(clk) + HZ/2) / HZ;
orion_clkevt.cpumask = cpumask_of(0);
orion_clkevt.irq = irq;
clockevents_config_and_register(&orion_clkevt, rate,
ORION_ONESHOT_MIN, ORION_ONESHOT_MAX);
orion_delay_timer_init(rate);
return 0;
out_unprep_clk:
clk_disable_unprepare(clk);
return ret;
}
TIMER_OF_DECLARE(orion_timer, "marvell,orion-timer", orion_timer_init);
|
linux-master
|
drivers/clocksource/timer-orion.c
|
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/sched_clock.h>
#include "timer-of.h"
#define CLKSRC_OFFSET 0x40
#define TIMER_STATUS 0x00
#define TIMER_VALUE 0x04
#define TIMER_CONTRL 0x10
#define TIMER_CONFIG 0x20
#define TIMER_DIV 0x24
#define TIMER_INI 0x28
#define GX6605S_STATUS_CLR BIT(0)
#define GX6605S_CONTRL_RST BIT(0)
#define GX6605S_CONTRL_START BIT(1)
#define GX6605S_CONFIG_EN BIT(0)
#define GX6605S_CONFIG_IRQ_EN BIT(1)
static irqreturn_t gx6605s_timer_interrupt(int irq, void *dev)
{
struct clock_event_device *ce = dev;
void __iomem *base = timer_of_base(to_timer_of(ce));
writel_relaxed(GX6605S_STATUS_CLR, base + TIMER_STATUS);
writel_relaxed(0, base + TIMER_INI);
ce->event_handler(ce);
return IRQ_HANDLED;
}
static int gx6605s_timer_set_oneshot(struct clock_event_device *ce)
{
void __iomem *base = timer_of_base(to_timer_of(ce));
/* reset and stop counter */
writel_relaxed(GX6605S_CONTRL_RST, base + TIMER_CONTRL);
/* enable with irq and start */
writel_relaxed(GX6605S_CONFIG_EN | GX6605S_CONFIG_IRQ_EN,
base + TIMER_CONFIG);
return 0;
}
static int gx6605s_timer_set_next_event(unsigned long delta,
struct clock_event_device *ce)
{
void __iomem *base = timer_of_base(to_timer_of(ce));
/* use reset to pause timer */
writel_relaxed(GX6605S_CONTRL_RST, base + TIMER_CONTRL);
/* config next timeout value */
writel_relaxed(ULONG_MAX - delta, base + TIMER_INI);
writel_relaxed(GX6605S_CONTRL_START, base + TIMER_CONTRL);
return 0;
}
static int gx6605s_timer_shutdown(struct clock_event_device *ce)
{
void __iomem *base = timer_of_base(to_timer_of(ce));
writel_relaxed(0, base + TIMER_CONTRL);
writel_relaxed(0, base + TIMER_CONFIG);
return 0;
}
static struct timer_of to = {
.flags = TIMER_OF_IRQ | TIMER_OF_BASE | TIMER_OF_CLOCK,
.clkevt = {
.rating = 300,
.features = CLOCK_EVT_FEAT_DYNIRQ |
CLOCK_EVT_FEAT_ONESHOT,
.set_state_shutdown = gx6605s_timer_shutdown,
.set_state_oneshot = gx6605s_timer_set_oneshot,
.set_next_event = gx6605s_timer_set_next_event,
.cpumask = cpu_possible_mask,
},
.of_irq = {
.handler = gx6605s_timer_interrupt,
.flags = IRQF_TIMER | IRQF_IRQPOLL,
},
};
static u64 notrace gx6605s_sched_clock_read(void)
{
void __iomem *base;
base = timer_of_base(&to) + CLKSRC_OFFSET;
return (u64)readl_relaxed(base + TIMER_VALUE);
}
static void gx6605s_clkevt_init(void __iomem *base)
{
writel_relaxed(0, base + TIMER_DIV);
writel_relaxed(0, base + TIMER_CONFIG);
clockevents_config_and_register(&to.clkevt, timer_of_rate(&to), 2,
ULONG_MAX);
}
static int gx6605s_clksrc_init(void __iomem *base)
{
writel_relaxed(0, base + TIMER_DIV);
writel_relaxed(0, base + TIMER_INI);
writel_relaxed(GX6605S_CONTRL_RST, base + TIMER_CONTRL);
writel_relaxed(GX6605S_CONFIG_EN, base + TIMER_CONFIG);
writel_relaxed(GX6605S_CONTRL_START, base + TIMER_CONTRL);
sched_clock_register(gx6605s_sched_clock_read, 32, timer_of_rate(&to));
return clocksource_mmio_init(base + TIMER_VALUE, "gx6605s",
timer_of_rate(&to), 200, 32, clocksource_mmio_readl_up);
}
static int __init gx6605s_timer_init(struct device_node *np)
{
int ret;
/*
* The timer driver is for nationalchip gx6605s SOC and there are two
* same timer in gx6605s. We use one for clkevt and another for clksrc.
*
* The timer is mmio map to access, so we need give mmio address in dts.
*
* It provides a 32bit countup timer and interrupt will be caused by
* count-overflow.
* So we need set-next-event by ULONG_MAX - delta in TIMER_INI reg.
*
* The counter at 0x0 offset is clock event.
* The counter at 0x40 offset is clock source.
* They are the same in hardware, just different used by driver.
*/
ret = timer_of_init(np, &to);
if (ret)
return ret;
gx6605s_clkevt_init(timer_of_base(&to));
return gx6605s_clksrc_init(timer_of_base(&to) + CLKSRC_OFFSET);
}
TIMER_OF_DECLARE(csky_gx6605s_timer, "csky,gx6605s-timer", gx6605s_timer_init);
|
linux-master
|
drivers/clocksource/timer-gx6605s.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* MStar timer driver
*
* Copyright (C) 2021 Daniel Palmer
* Copyright (C) 2021 Romain Perier
*
*/
#include <linux/clk.h>
#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqreturn.h>
#include <linux/sched_clock.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#ifdef CONFIG_ARM
#include <linux/delay.h>
#endif
#include "timer-of.h"
#define TIMER_NAME "msc313e_timer"
#define MSC313E_REG_CTRL 0x00
#define MSC313E_REG_CTRL_TIMER_EN BIT(0)
#define MSC313E_REG_CTRL_TIMER_TRIG BIT(1)
#define MSC313E_REG_CTRL_TIMER_INT_EN BIT(8)
#define MSC313E_REG_TIMER_MAX_LOW 0x08
#define MSC313E_REG_TIMER_MAX_HIGH 0x0c
#define MSC313E_REG_COUNTER_LOW 0x10
#define MSC313E_REG_COUNTER_HIGH 0x14
#define MSC313E_REG_TIMER_DIVIDE 0x18
#define MSC313E_CLK_DIVIDER 9
#define TIMER_SYNC_TICKS 3
#ifdef CONFIG_ARM
struct msc313e_delay {
void __iomem *base;
struct delay_timer delay;
};
static struct msc313e_delay msc313e_delay;
#endif
static void __iomem *msc313e_clksrc;
static void msc313e_timer_stop(void __iomem *base)
{
writew(0, base + MSC313E_REG_CTRL);
}
static void msc313e_timer_start(void __iomem *base, bool periodic)
{
u16 reg;
reg = readw(base + MSC313E_REG_CTRL);
if (periodic)
reg |= MSC313E_REG_CTRL_TIMER_EN;
else
reg |= MSC313E_REG_CTRL_TIMER_TRIG;
writew(reg | MSC313E_REG_CTRL_TIMER_INT_EN, base + MSC313E_REG_CTRL);
}
static void msc313e_timer_setup(void __iomem *base, unsigned long delay)
{
unsigned long flags;
local_irq_save(flags);
writew(delay >> 16, base + MSC313E_REG_TIMER_MAX_HIGH);
writew(delay & 0xffff, base + MSC313E_REG_TIMER_MAX_LOW);
local_irq_restore(flags);
}
static unsigned long msc313e_timer_current_value(void __iomem *base)
{
unsigned long flags;
u16 l, h;
local_irq_save(flags);
l = readw(base + MSC313E_REG_COUNTER_LOW);
h = readw(base + MSC313E_REG_COUNTER_HIGH);
local_irq_restore(flags);
return (((u32)h) << 16 | l);
}
static int msc313e_timer_clkevt_shutdown(struct clock_event_device *evt)
{
struct timer_of *timer = to_timer_of(evt);
msc313e_timer_stop(timer_of_base(timer));
return 0;
}
static int msc313e_timer_clkevt_set_oneshot(struct clock_event_device *evt)
{
struct timer_of *timer = to_timer_of(evt);
msc313e_timer_stop(timer_of_base(timer));
msc313e_timer_start(timer_of_base(timer), false);
return 0;
}
static int msc313e_timer_clkevt_set_periodic(struct clock_event_device *evt)
{
struct timer_of *timer = to_timer_of(evt);
msc313e_timer_stop(timer_of_base(timer));
msc313e_timer_setup(timer_of_base(timer), timer_of_period(timer));
msc313e_timer_start(timer_of_base(timer), true);
return 0;
}
static int msc313e_timer_clkevt_next_event(unsigned long evt, struct clock_event_device *clkevt)
{
struct timer_of *timer = to_timer_of(clkevt);
msc313e_timer_stop(timer_of_base(timer));
msc313e_timer_setup(timer_of_base(timer), evt);
msc313e_timer_start(timer_of_base(timer), false);
return 0;
}
static irqreturn_t msc313e_timer_clkevt_irq(int irq, void *dev_id)
{
struct clock_event_device *evt = dev_id;
evt->event_handler(evt);
return IRQ_HANDLED;
}
static u64 msc313e_timer_clksrc_read(struct clocksource *cs)
{
return msc313e_timer_current_value(msc313e_clksrc) & cs->mask;
}
#ifdef CONFIG_ARM
static unsigned long msc313e_read_delay_timer_read(void)
{
return msc313e_timer_current_value(msc313e_delay.base);
}
#endif
static u64 msc313e_timer_sched_clock_read(void)
{
return msc313e_timer_current_value(msc313e_clksrc);
}
static struct clock_event_device msc313e_clkevt = {
.name = TIMER_NAME,
.rating = 300,
.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
.set_state_shutdown = msc313e_timer_clkevt_shutdown,
.set_state_periodic = msc313e_timer_clkevt_set_periodic,
.set_state_oneshot = msc313e_timer_clkevt_set_oneshot,
.tick_resume = msc313e_timer_clkevt_shutdown,
.set_next_event = msc313e_timer_clkevt_next_event,
};
static int __init msc313e_clkevt_init(struct device_node *np)
{
int ret;
struct timer_of *to;
to = kzalloc(sizeof(*to), GFP_KERNEL);
if (!to)
return -ENOMEM;
to->flags = TIMER_OF_IRQ | TIMER_OF_CLOCK | TIMER_OF_BASE;
to->of_irq.handler = msc313e_timer_clkevt_irq;
ret = timer_of_init(np, to);
if (ret)
return ret;
if (of_device_is_compatible(np, "sstar,ssd20xd-timer")) {
to->of_clk.rate = clk_get_rate(to->of_clk.clk) / MSC313E_CLK_DIVIDER;
to->of_clk.period = DIV_ROUND_UP(to->of_clk.rate, HZ);
writew(MSC313E_CLK_DIVIDER - 1, timer_of_base(to) + MSC313E_REG_TIMER_DIVIDE);
}
msc313e_clkevt.cpumask = cpu_possible_mask;
msc313e_clkevt.irq = to->of_irq.irq;
to->clkevt = msc313e_clkevt;
clockevents_config_and_register(&to->clkevt, timer_of_rate(to),
TIMER_SYNC_TICKS, 0xffffffff);
return 0;
}
static int __init msc313e_clksrc_init(struct device_node *np)
{
struct timer_of to = { 0 };
int ret;
u16 reg;
to.flags = TIMER_OF_BASE | TIMER_OF_CLOCK;
ret = timer_of_init(np, &to);
if (ret)
return ret;
msc313e_clksrc = timer_of_base(&to);
reg = readw(msc313e_clksrc + MSC313E_REG_CTRL);
reg |= MSC313E_REG_CTRL_TIMER_EN;
writew(reg, msc313e_clksrc + MSC313E_REG_CTRL);
#ifdef CONFIG_ARM
msc313e_delay.base = timer_of_base(&to);
msc313e_delay.delay.read_current_timer = msc313e_read_delay_timer_read;
msc313e_delay.delay.freq = timer_of_rate(&to);
register_current_timer_delay(&msc313e_delay.delay);
#endif
sched_clock_register(msc313e_timer_sched_clock_read, 32, timer_of_rate(&to));
return clocksource_mmio_init(timer_of_base(&to), TIMER_NAME, timer_of_rate(&to), 300, 32,
msc313e_timer_clksrc_read);
}
static int __init msc313e_timer_init(struct device_node *np)
{
int ret = 0;
static int num_called;
switch (num_called) {
case 0:
ret = msc313e_clksrc_init(np);
if (ret)
return ret;
break;
default:
ret = msc313e_clkevt_init(np);
if (ret)
return ret;
break;
}
num_called++;
return 0;
}
TIMER_OF_DECLARE(msc313, "mstar,msc313e-timer", msc313e_timer_init);
TIMER_OF_DECLARE(ssd20xd, "sstar,ssd20xd-timer", msc313e_timer_init);
|
linux-master
|
drivers/clocksource/timer-msc313e.c
|
// SPDX-License-Identifier: GPL-2.0+
//
// Copyright (C) 2000-2001 Deep Blue Solutions
// Copyright (C) 2002 Shane Nay ([email protected])
// Copyright (C) 2006-2007 Pavel Pisa ([email protected])
// Copyright (C) 2008 Juergen Beisert ([email protected])
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/clockchips.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/sched_clock.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
/*
* There are 4 versions of the timer hardware on Freescale MXC hardware.
* - MX1/MXL
* - MX21, MX27.
* - MX25, MX31, MX35, MX37, MX51, MX6Q(rev1.0)
* - MX6DL, MX6SX, MX6Q(rev1.1+)
*/
enum imx_gpt_type {
GPT_TYPE_IMX1, /* i.MX1 */
GPT_TYPE_IMX21, /* i.MX21/27 */
GPT_TYPE_IMX31, /* i.MX31/35/25/37/51/6Q */
GPT_TYPE_IMX6DL, /* i.MX6DL/SX/SL */
};
/* defines common for all i.MX */
#define MXC_TCTL 0x00
#define MXC_TCTL_TEN (1 << 0) /* Enable module */
#define MXC_TPRER 0x04
/* MX1, MX21, MX27 */
#define MX1_2_TCTL_CLK_PCLK1 (1 << 1)
#define MX1_2_TCTL_IRQEN (1 << 4)
#define MX1_2_TCTL_FRR (1 << 8)
#define MX1_2_TCMP 0x08
#define MX1_2_TCN 0x10
#define MX1_2_TSTAT 0x14
/* MX21, MX27 */
#define MX2_TSTAT_CAPT (1 << 1)
#define MX2_TSTAT_COMP (1 << 0)
/* MX31, MX35, MX25, MX5, MX6 */
#define V2_TCTL_WAITEN (1 << 3) /* Wait enable mode */
#define V2_TCTL_CLK_IPG (1 << 6)
#define V2_TCTL_CLK_PER (2 << 6)
#define V2_TCTL_CLK_OSC_DIV8 (5 << 6)
#define V2_TCTL_FRR (1 << 9)
#define V2_TCTL_24MEN (1 << 10)
#define V2_TPRER_PRE24M 12
#define V2_IR 0x0c
#define V2_TSTAT 0x08
#define V2_TSTAT_OF1 (1 << 0)
#define V2_TCN 0x24
#define V2_TCMP 0x10
#define V2_TIMER_RATE_OSC_DIV8 3000000
struct imx_timer {
enum imx_gpt_type type;
void __iomem *base;
int irq;
struct clk *clk_per;
struct clk *clk_ipg;
const struct imx_gpt_data *gpt;
struct clock_event_device ced;
};
struct imx_gpt_data {
int reg_tstat;
int reg_tcn;
int reg_tcmp;
void (*gpt_setup_tctl)(struct imx_timer *imxtm);
void (*gpt_irq_enable)(struct imx_timer *imxtm);
void (*gpt_irq_disable)(struct imx_timer *imxtm);
void (*gpt_irq_acknowledge)(struct imx_timer *imxtm);
int (*set_next_event)(unsigned long evt,
struct clock_event_device *ced);
};
static inline struct imx_timer *to_imx_timer(struct clock_event_device *ced)
{
return container_of(ced, struct imx_timer, ced);
}
static void imx1_gpt_irq_disable(struct imx_timer *imxtm)
{
unsigned int tmp;
tmp = readl_relaxed(imxtm->base + MXC_TCTL);
writel_relaxed(tmp & ~MX1_2_TCTL_IRQEN, imxtm->base + MXC_TCTL);
}
static void imx31_gpt_irq_disable(struct imx_timer *imxtm)
{
writel_relaxed(0, imxtm->base + V2_IR);
}
static void imx1_gpt_irq_enable(struct imx_timer *imxtm)
{
unsigned int tmp;
tmp = readl_relaxed(imxtm->base + MXC_TCTL);
writel_relaxed(tmp | MX1_2_TCTL_IRQEN, imxtm->base + MXC_TCTL);
}
static void imx31_gpt_irq_enable(struct imx_timer *imxtm)
{
writel_relaxed(1<<0, imxtm->base + V2_IR);
}
static void imx1_gpt_irq_acknowledge(struct imx_timer *imxtm)
{
writel_relaxed(0, imxtm->base + MX1_2_TSTAT);
}
static void imx21_gpt_irq_acknowledge(struct imx_timer *imxtm)
{
writel_relaxed(MX2_TSTAT_CAPT | MX2_TSTAT_COMP,
imxtm->base + MX1_2_TSTAT);
}
static void imx31_gpt_irq_acknowledge(struct imx_timer *imxtm)
{
writel_relaxed(V2_TSTAT_OF1, imxtm->base + V2_TSTAT);
}
static void __iomem *sched_clock_reg;
static u64 notrace mxc_read_sched_clock(void)
{
return sched_clock_reg ? readl_relaxed(sched_clock_reg) : 0;
}
#if defined(CONFIG_ARM)
static struct delay_timer imx_delay_timer;
static unsigned long imx_read_current_timer(void)
{
return readl_relaxed(sched_clock_reg);
}
#endif
static int __init mxc_clocksource_init(struct imx_timer *imxtm)
{
unsigned int c = clk_get_rate(imxtm->clk_per);
void __iomem *reg = imxtm->base + imxtm->gpt->reg_tcn;
#if defined(CONFIG_ARM)
imx_delay_timer.read_current_timer = &imx_read_current_timer;
imx_delay_timer.freq = c;
register_current_timer_delay(&imx_delay_timer);
#endif
sched_clock_reg = reg;
sched_clock_register(mxc_read_sched_clock, 32, c);
return clocksource_mmio_init(reg, "mxc_timer1", c, 200, 32,
clocksource_mmio_readl_up);
}
/* clock event */
static int mx1_2_set_next_event(unsigned long evt,
struct clock_event_device *ced)
{
struct imx_timer *imxtm = to_imx_timer(ced);
unsigned long tcmp;
tcmp = readl_relaxed(imxtm->base + MX1_2_TCN) + evt;
writel_relaxed(tcmp, imxtm->base + MX1_2_TCMP);
return (int)(tcmp - readl_relaxed(imxtm->base + MX1_2_TCN)) < 0 ?
-ETIME : 0;
}
static int v2_set_next_event(unsigned long evt,
struct clock_event_device *ced)
{
struct imx_timer *imxtm = to_imx_timer(ced);
unsigned long tcmp;
tcmp = readl_relaxed(imxtm->base + V2_TCN) + evt;
writel_relaxed(tcmp, imxtm->base + V2_TCMP);
return evt < 0x7fffffff &&
(int)(tcmp - readl_relaxed(imxtm->base + V2_TCN)) < 0 ?
-ETIME : 0;
}
static int mxc_shutdown(struct clock_event_device *ced)
{
struct imx_timer *imxtm = to_imx_timer(ced);
u32 tcn;
/* Disable interrupt in GPT module */
imxtm->gpt->gpt_irq_disable(imxtm);
tcn = readl_relaxed(imxtm->base + imxtm->gpt->reg_tcn);
/* Set event time into far-far future */
writel_relaxed(tcn - 3, imxtm->base + imxtm->gpt->reg_tcmp);
/* Clear pending interrupt */
imxtm->gpt->gpt_irq_acknowledge(imxtm);
#ifdef DEBUG
printk(KERN_INFO "%s: changing mode\n", __func__);
#endif /* DEBUG */
return 0;
}
static int mxc_set_oneshot(struct clock_event_device *ced)
{
struct imx_timer *imxtm = to_imx_timer(ced);
/* Disable interrupt in GPT module */
imxtm->gpt->gpt_irq_disable(imxtm);
if (!clockevent_state_oneshot(ced)) {
u32 tcn = readl_relaxed(imxtm->base + imxtm->gpt->reg_tcn);
/* Set event time into far-far future */
writel_relaxed(tcn - 3, imxtm->base + imxtm->gpt->reg_tcmp);
/* Clear pending interrupt */
imxtm->gpt->gpt_irq_acknowledge(imxtm);
}
#ifdef DEBUG
printk(KERN_INFO "%s: changing mode\n", __func__);
#endif /* DEBUG */
/*
* Do not put overhead of interrupt enable/disable into
* mxc_set_next_event(), the core has about 4 minutes
* to call mxc_set_next_event() or shutdown clock after
* mode switching
*/
imxtm->gpt->gpt_irq_enable(imxtm);
return 0;
}
/*
* IRQ handler for the timer
*/
static irqreturn_t mxc_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *ced = dev_id;
struct imx_timer *imxtm = to_imx_timer(ced);
uint32_t tstat;
tstat = readl_relaxed(imxtm->base + imxtm->gpt->reg_tstat);
imxtm->gpt->gpt_irq_acknowledge(imxtm);
ced->event_handler(ced);
return IRQ_HANDLED;
}
static int __init mxc_clockevent_init(struct imx_timer *imxtm)
{
struct clock_event_device *ced = &imxtm->ced;
ced->name = "mxc_timer1";
ced->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_DYNIRQ;
ced->set_state_shutdown = mxc_shutdown;
ced->set_state_oneshot = mxc_set_oneshot;
ced->tick_resume = mxc_shutdown;
ced->set_next_event = imxtm->gpt->set_next_event;
ced->rating = 200;
ced->cpumask = cpumask_of(0);
ced->irq = imxtm->irq;
clockevents_config_and_register(ced, clk_get_rate(imxtm->clk_per),
0xff, 0xfffffffe);
return request_irq(imxtm->irq, mxc_timer_interrupt,
IRQF_TIMER | IRQF_IRQPOLL, "i.MX Timer Tick", ced);
}
static void imx1_gpt_setup_tctl(struct imx_timer *imxtm)
{
u32 tctl_val;
tctl_val = MX1_2_TCTL_FRR | MX1_2_TCTL_CLK_PCLK1 | MXC_TCTL_TEN;
writel_relaxed(tctl_val, imxtm->base + MXC_TCTL);
}
static void imx31_gpt_setup_tctl(struct imx_timer *imxtm)
{
u32 tctl_val;
tctl_val = V2_TCTL_FRR | V2_TCTL_WAITEN | MXC_TCTL_TEN;
if (clk_get_rate(imxtm->clk_per) == V2_TIMER_RATE_OSC_DIV8)
tctl_val |= V2_TCTL_CLK_OSC_DIV8;
else
tctl_val |= V2_TCTL_CLK_PER;
writel_relaxed(tctl_val, imxtm->base + MXC_TCTL);
}
static void imx6dl_gpt_setup_tctl(struct imx_timer *imxtm)
{
u32 tctl_val;
tctl_val = V2_TCTL_FRR | V2_TCTL_WAITEN | MXC_TCTL_TEN;
if (clk_get_rate(imxtm->clk_per) == V2_TIMER_RATE_OSC_DIV8) {
tctl_val |= V2_TCTL_CLK_OSC_DIV8;
/* 24 / 8 = 3 MHz */
writel_relaxed(7 << V2_TPRER_PRE24M, imxtm->base + MXC_TPRER);
tctl_val |= V2_TCTL_24MEN;
} else {
tctl_val |= V2_TCTL_CLK_PER;
}
writel_relaxed(tctl_val, imxtm->base + MXC_TCTL);
}
static const struct imx_gpt_data imx1_gpt_data = {
.reg_tstat = MX1_2_TSTAT,
.reg_tcn = MX1_2_TCN,
.reg_tcmp = MX1_2_TCMP,
.gpt_irq_enable = imx1_gpt_irq_enable,
.gpt_irq_disable = imx1_gpt_irq_disable,
.gpt_irq_acknowledge = imx1_gpt_irq_acknowledge,
.gpt_setup_tctl = imx1_gpt_setup_tctl,
.set_next_event = mx1_2_set_next_event,
};
static const struct imx_gpt_data imx21_gpt_data = {
.reg_tstat = MX1_2_TSTAT,
.reg_tcn = MX1_2_TCN,
.reg_tcmp = MX1_2_TCMP,
.gpt_irq_enable = imx1_gpt_irq_enable,
.gpt_irq_disable = imx1_gpt_irq_disable,
.gpt_irq_acknowledge = imx21_gpt_irq_acknowledge,
.gpt_setup_tctl = imx1_gpt_setup_tctl,
.set_next_event = mx1_2_set_next_event,
};
static const struct imx_gpt_data imx31_gpt_data = {
.reg_tstat = V2_TSTAT,
.reg_tcn = V2_TCN,
.reg_tcmp = V2_TCMP,
.gpt_irq_enable = imx31_gpt_irq_enable,
.gpt_irq_disable = imx31_gpt_irq_disable,
.gpt_irq_acknowledge = imx31_gpt_irq_acknowledge,
.gpt_setup_tctl = imx31_gpt_setup_tctl,
.set_next_event = v2_set_next_event,
};
static const struct imx_gpt_data imx6dl_gpt_data = {
.reg_tstat = V2_TSTAT,
.reg_tcn = V2_TCN,
.reg_tcmp = V2_TCMP,
.gpt_irq_enable = imx31_gpt_irq_enable,
.gpt_irq_disable = imx31_gpt_irq_disable,
.gpt_irq_acknowledge = imx31_gpt_irq_acknowledge,
.gpt_setup_tctl = imx6dl_gpt_setup_tctl,
.set_next_event = v2_set_next_event,
};
static int __init _mxc_timer_init(struct imx_timer *imxtm)
{
int ret;
switch (imxtm->type) {
case GPT_TYPE_IMX1:
imxtm->gpt = &imx1_gpt_data;
break;
case GPT_TYPE_IMX21:
imxtm->gpt = &imx21_gpt_data;
break;
case GPT_TYPE_IMX31:
imxtm->gpt = &imx31_gpt_data;
break;
case GPT_TYPE_IMX6DL:
imxtm->gpt = &imx6dl_gpt_data;
break;
default:
return -EINVAL;
}
if (IS_ERR(imxtm->clk_per)) {
pr_err("i.MX timer: unable to get clk\n");
return PTR_ERR(imxtm->clk_per);
}
if (!IS_ERR(imxtm->clk_ipg))
clk_prepare_enable(imxtm->clk_ipg);
clk_prepare_enable(imxtm->clk_per);
/*
* Initialise to a known state (all timers off, and timing reset)
*/
writel_relaxed(0, imxtm->base + MXC_TCTL);
writel_relaxed(0, imxtm->base + MXC_TPRER); /* see datasheet note */
imxtm->gpt->gpt_setup_tctl(imxtm);
/* init and register the timer to the framework */
ret = mxc_clocksource_init(imxtm);
if (ret)
return ret;
return mxc_clockevent_init(imxtm);
}
static int __init mxc_timer_init_dt(struct device_node *np, enum imx_gpt_type type)
{
struct imx_timer *imxtm;
static int initialized;
int ret;
/* Support one instance only */
if (initialized)
return 0;
imxtm = kzalloc(sizeof(*imxtm), GFP_KERNEL);
if (!imxtm)
return -ENOMEM;
imxtm->base = of_iomap(np, 0);
if (!imxtm->base)
return -ENXIO;
imxtm->irq = irq_of_parse_and_map(np, 0);
if (imxtm->irq <= 0)
return -EINVAL;
imxtm->clk_ipg = of_clk_get_by_name(np, "ipg");
/* Try osc_per first, and fall back to per otherwise */
imxtm->clk_per = of_clk_get_by_name(np, "osc_per");
if (IS_ERR(imxtm->clk_per))
imxtm->clk_per = of_clk_get_by_name(np, "per");
imxtm->type = type;
ret = _mxc_timer_init(imxtm);
if (ret)
return ret;
initialized = 1;
return 0;
}
static int __init imx1_timer_init_dt(struct device_node *np)
{
return mxc_timer_init_dt(np, GPT_TYPE_IMX1);
}
static int __init imx21_timer_init_dt(struct device_node *np)
{
return mxc_timer_init_dt(np, GPT_TYPE_IMX21);
}
static int __init imx31_timer_init_dt(struct device_node *np)
{
enum imx_gpt_type type = GPT_TYPE_IMX31;
/*
* We were using the same compatible string for i.MX6Q/D and i.MX6DL/S
* GPT device, while they actually have different programming model.
* This is a workaround to keep the existing i.MX6DL/S DTBs continue
* working with the new kernel.
*/
if (of_machine_is_compatible("fsl,imx6dl"))
type = GPT_TYPE_IMX6DL;
return mxc_timer_init_dt(np, type);
}
static int __init imx6dl_timer_init_dt(struct device_node *np)
{
return mxc_timer_init_dt(np, GPT_TYPE_IMX6DL);
}
TIMER_OF_DECLARE(imx1_timer, "fsl,imx1-gpt", imx1_timer_init_dt);
TIMER_OF_DECLARE(imx21_timer, "fsl,imx21-gpt", imx21_timer_init_dt);
TIMER_OF_DECLARE(imx27_timer, "fsl,imx27-gpt", imx21_timer_init_dt);
TIMER_OF_DECLARE(imx31_timer, "fsl,imx31-gpt", imx31_timer_init_dt);
TIMER_OF_DECLARE(imx25_timer, "fsl,imx25-gpt", imx31_timer_init_dt);
TIMER_OF_DECLARE(imx50_timer, "fsl,imx50-gpt", imx31_timer_init_dt);
TIMER_OF_DECLARE(imx51_timer, "fsl,imx51-gpt", imx31_timer_init_dt);
TIMER_OF_DECLARE(imx53_timer, "fsl,imx53-gpt", imx31_timer_init_dt);
TIMER_OF_DECLARE(imx6q_timer, "fsl,imx6q-gpt", imx31_timer_init_dt);
TIMER_OF_DECLARE(imx6dl_timer, "fsl,imx6dl-gpt", imx6dl_timer_init_dt);
TIMER_OF_DECLARE(imx6sl_timer, "fsl,imx6sl-gpt", imx6dl_timer_init_dt);
TIMER_OF_DECLARE(imx6sx_timer, "fsl,imx6sx-gpt", imx6dl_timer_init_dt);
|
linux-master
|
drivers/clocksource/timer-imx-gpt.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
*
* samsung - Common hr-timer support (s3c and s5p)
*/
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/clockchips.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/sched_clock.h>
#include <clocksource/samsung_pwm.h>
/*
* Clocksource driver
*/
#define REG_TCFG0 0x00
#define REG_TCFG1 0x04
#define REG_TCON 0x08
#define REG_TINT_CSTAT 0x44
#define REG_TCNTB(chan) (0x0c + 12 * (chan))
#define REG_TCMPB(chan) (0x10 + 12 * (chan))
#define TCFG0_PRESCALER_MASK 0xff
#define TCFG0_PRESCALER1_SHIFT 8
#define TCFG1_SHIFT(x) ((x) * 4)
#define TCFG1_MUX_MASK 0xf
/*
* Each channel occupies 4 bits in TCON register, but there is a gap of 4
* bits (one channel) after channel 0, so channels have different numbering
* when accessing TCON register.
*
* In addition, the location of autoreload bit for channel 4 (TCON channel 5)
* in its set of bits is 2 as opposed to 3 for other channels.
*/
#define TCON_START(chan) (1 << (4 * (chan) + 0))
#define TCON_MANUALUPDATE(chan) (1 << (4 * (chan) + 1))
#define TCON_INVERT(chan) (1 << (4 * (chan) + 2))
#define _TCON_AUTORELOAD(chan) (1 << (4 * (chan) + 3))
#define _TCON_AUTORELOAD4(chan) (1 << (4 * (chan) + 2))
#define TCON_AUTORELOAD(chan) \
((chan < 5) ? _TCON_AUTORELOAD(chan) : _TCON_AUTORELOAD4(chan))
DEFINE_SPINLOCK(samsung_pwm_lock);
EXPORT_SYMBOL(samsung_pwm_lock);
struct samsung_pwm_clocksource {
void __iomem *base;
const void __iomem *source_reg;
unsigned int irq[SAMSUNG_PWM_NUM];
struct samsung_pwm_variant variant;
struct clk *timerclk;
unsigned int event_id;
unsigned int source_id;
unsigned int tcnt_max;
unsigned int tscaler_div;
unsigned int tdiv;
unsigned long clock_count_per_tick;
};
static struct samsung_pwm_clocksource pwm;
static void samsung_timer_set_prescale(unsigned int channel, u16 prescale)
{
unsigned long flags;
u8 shift = 0;
u32 reg;
if (channel >= 2)
shift = TCFG0_PRESCALER1_SHIFT;
spin_lock_irqsave(&samsung_pwm_lock, flags);
reg = readl(pwm.base + REG_TCFG0);
reg &= ~(TCFG0_PRESCALER_MASK << shift);
reg |= (prescale - 1) << shift;
writel(reg, pwm.base + REG_TCFG0);
spin_unlock_irqrestore(&samsung_pwm_lock, flags);
}
static void samsung_timer_set_divisor(unsigned int channel, u8 divisor)
{
u8 shift = TCFG1_SHIFT(channel);
unsigned long flags;
u32 reg;
u8 bits;
bits = (fls(divisor) - 1) - pwm.variant.div_base;
spin_lock_irqsave(&samsung_pwm_lock, flags);
reg = readl(pwm.base + REG_TCFG1);
reg &= ~(TCFG1_MUX_MASK << shift);
reg |= bits << shift;
writel(reg, pwm.base + REG_TCFG1);
spin_unlock_irqrestore(&samsung_pwm_lock, flags);
}
static void samsung_time_stop(unsigned int channel)
{
unsigned long tcon;
unsigned long flags;
if (channel > 0)
++channel;
spin_lock_irqsave(&samsung_pwm_lock, flags);
tcon = readl_relaxed(pwm.base + REG_TCON);
tcon &= ~TCON_START(channel);
writel_relaxed(tcon, pwm.base + REG_TCON);
spin_unlock_irqrestore(&samsung_pwm_lock, flags);
}
static void samsung_time_setup(unsigned int channel, unsigned long tcnt)
{
unsigned long tcon;
unsigned long flags;
unsigned int tcon_chan = channel;
if (tcon_chan > 0)
++tcon_chan;
spin_lock_irqsave(&samsung_pwm_lock, flags);
tcon = readl_relaxed(pwm.base + REG_TCON);
tcon &= ~(TCON_START(tcon_chan) | TCON_AUTORELOAD(tcon_chan));
tcon |= TCON_MANUALUPDATE(tcon_chan);
writel_relaxed(tcnt, pwm.base + REG_TCNTB(channel));
writel_relaxed(tcnt, pwm.base + REG_TCMPB(channel));
writel_relaxed(tcon, pwm.base + REG_TCON);
spin_unlock_irqrestore(&samsung_pwm_lock, flags);
}
static void samsung_time_start(unsigned int channel, bool periodic)
{
unsigned long tcon;
unsigned long flags;
if (channel > 0)
++channel;
spin_lock_irqsave(&samsung_pwm_lock, flags);
tcon = readl_relaxed(pwm.base + REG_TCON);
tcon &= ~TCON_MANUALUPDATE(channel);
tcon |= TCON_START(channel);
if (periodic)
tcon |= TCON_AUTORELOAD(channel);
else
tcon &= ~TCON_AUTORELOAD(channel);
writel_relaxed(tcon, pwm.base + REG_TCON);
spin_unlock_irqrestore(&samsung_pwm_lock, flags);
}
static int samsung_set_next_event(unsigned long cycles,
struct clock_event_device *evt)
{
/*
* This check is needed to account for internal rounding
* errors inside clockevents core, which might result in
* passing cycles = 0, which in turn would not generate any
* timer interrupt and hang the system.
*
* Another solution would be to set up the clockevent device
* with min_delta = 2, but this would unnecessarily increase
* the minimum sleep period.
*/
if (!cycles)
cycles = 1;
samsung_time_setup(pwm.event_id, cycles);
samsung_time_start(pwm.event_id, false);
return 0;
}
static int samsung_shutdown(struct clock_event_device *evt)
{
samsung_time_stop(pwm.event_id);
return 0;
}
static int samsung_set_periodic(struct clock_event_device *evt)
{
samsung_time_stop(pwm.event_id);
samsung_time_setup(pwm.event_id, pwm.clock_count_per_tick - 1);
samsung_time_start(pwm.event_id, true);
return 0;
}
static void samsung_clockevent_resume(struct clock_event_device *cev)
{
samsung_timer_set_prescale(pwm.event_id, pwm.tscaler_div);
samsung_timer_set_divisor(pwm.event_id, pwm.tdiv);
if (pwm.variant.has_tint_cstat) {
u32 mask = (1 << pwm.event_id);
writel(mask | (mask << 5), pwm.base + REG_TINT_CSTAT);
}
}
static struct clock_event_device time_event_device = {
.name = "samsung_event_timer",
.features = CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_ONESHOT,
.rating = 200,
.set_next_event = samsung_set_next_event,
.set_state_shutdown = samsung_shutdown,
.set_state_periodic = samsung_set_periodic,
.set_state_oneshot = samsung_shutdown,
.tick_resume = samsung_shutdown,
.resume = samsung_clockevent_resume,
};
static irqreturn_t samsung_clock_event_isr(int irq, void *dev_id)
{
struct clock_event_device *evt = dev_id;
if (pwm.variant.has_tint_cstat) {
u32 mask = (1 << pwm.event_id);
writel(mask | (mask << 5), pwm.base + REG_TINT_CSTAT);
}
evt->event_handler(evt);
return IRQ_HANDLED;
}
static void __init samsung_clockevent_init(void)
{
unsigned long pclk;
unsigned long clock_rate;
unsigned int irq_number;
pclk = clk_get_rate(pwm.timerclk);
samsung_timer_set_prescale(pwm.event_id, pwm.tscaler_div);
samsung_timer_set_divisor(pwm.event_id, pwm.tdiv);
clock_rate = pclk / (pwm.tscaler_div * pwm.tdiv);
pwm.clock_count_per_tick = clock_rate / HZ;
time_event_device.cpumask = cpumask_of(0);
clockevents_config_and_register(&time_event_device,
clock_rate, 1, pwm.tcnt_max);
irq_number = pwm.irq[pwm.event_id];
if (request_irq(irq_number, samsung_clock_event_isr,
IRQF_TIMER | IRQF_IRQPOLL, "samsung_time_irq",
&time_event_device))
pr_err("%s: request_irq() failed\n", "samsung_time_irq");
if (pwm.variant.has_tint_cstat) {
u32 mask = (1 << pwm.event_id);
writel(mask | (mask << 5), pwm.base + REG_TINT_CSTAT);
}
}
static void samsung_clocksource_suspend(struct clocksource *cs)
{
samsung_time_stop(pwm.source_id);
}
static void samsung_clocksource_resume(struct clocksource *cs)
{
samsung_timer_set_prescale(pwm.source_id, pwm.tscaler_div);
samsung_timer_set_divisor(pwm.source_id, pwm.tdiv);
samsung_time_setup(pwm.source_id, pwm.tcnt_max);
samsung_time_start(pwm.source_id, true);
}
static u64 notrace samsung_clocksource_read(struct clocksource *c)
{
return ~readl_relaxed(pwm.source_reg);
}
static struct clocksource samsung_clocksource = {
.name = "samsung_clocksource_timer",
.rating = 250,
.read = samsung_clocksource_read,
.suspend = samsung_clocksource_suspend,
.resume = samsung_clocksource_resume,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
/*
* Override the global weak sched_clock symbol with this
* local implementation which uses the clocksource to get some
* better resolution when scheduling the kernel. We accept that
* this wraps around for now, since it is just a relative time
* stamp. (Inspired by U300 implementation.)
*/
static u64 notrace samsung_read_sched_clock(void)
{
return samsung_clocksource_read(NULL);
}
static int __init samsung_clocksource_init(void)
{
unsigned long pclk;
unsigned long clock_rate;
pclk = clk_get_rate(pwm.timerclk);
samsung_timer_set_prescale(pwm.source_id, pwm.tscaler_div);
samsung_timer_set_divisor(pwm.source_id, pwm.tdiv);
clock_rate = pclk / (pwm.tscaler_div * pwm.tdiv);
samsung_time_setup(pwm.source_id, pwm.tcnt_max);
samsung_time_start(pwm.source_id, true);
if (pwm.source_id == 4)
pwm.source_reg = pwm.base + 0x40;
else
pwm.source_reg = pwm.base + pwm.source_id * 0x0c + 0x14;
sched_clock_register(samsung_read_sched_clock,
pwm.variant.bits, clock_rate);
samsung_clocksource.mask = CLOCKSOURCE_MASK(pwm.variant.bits);
return clocksource_register_hz(&samsung_clocksource, clock_rate);
}
static void __init samsung_timer_resources(void)
{
clk_prepare_enable(pwm.timerclk);
pwm.tcnt_max = (1UL << pwm.variant.bits) - 1;
if (pwm.variant.bits == 16) {
pwm.tscaler_div = 25;
pwm.tdiv = 2;
} else {
pwm.tscaler_div = 2;
pwm.tdiv = 1;
}
}
/*
* PWM master driver
*/
static int __init _samsung_pwm_clocksource_init(void)
{
u8 mask;
int channel;
mask = ~pwm.variant.output_mask & ((1 << SAMSUNG_PWM_NUM) - 1);
channel = fls(mask) - 1;
if (channel < 0) {
pr_crit("failed to find PWM channel for clocksource\n");
return -EINVAL;
}
pwm.source_id = channel;
mask &= ~(1 << channel);
channel = fls(mask) - 1;
if (channel < 0) {
pr_crit("failed to find PWM channel for clock event\n");
return -EINVAL;
}
pwm.event_id = channel;
samsung_timer_resources();
samsung_clockevent_init();
return samsung_clocksource_init();
}
void __init samsung_pwm_clocksource_init(void __iomem *base,
unsigned int *irqs,
const struct samsung_pwm_variant *variant)
{
pwm.base = base;
memcpy(&pwm.variant, variant, sizeof(pwm.variant));
memcpy(pwm.irq, irqs, SAMSUNG_PWM_NUM * sizeof(*irqs));
pwm.timerclk = clk_get(NULL, "timers");
if (IS_ERR(pwm.timerclk))
panic("failed to get timers clock for timer");
_samsung_pwm_clocksource_init();
}
#ifdef CONFIG_TIMER_OF
static int __init samsung_pwm_alloc(struct device_node *np,
const struct samsung_pwm_variant *variant)
{
struct property *prop;
const __be32 *cur;
u32 val;
int i, ret;
memcpy(&pwm.variant, variant, sizeof(pwm.variant));
for (i = 0; i < SAMSUNG_PWM_NUM; ++i)
pwm.irq[i] = irq_of_parse_and_map(np, i);
of_property_for_each_u32(np, "samsung,pwm-outputs", prop, cur, val) {
if (val >= SAMSUNG_PWM_NUM) {
pr_warn("%s: invalid channel index in samsung,pwm-outputs property\n", __func__);
continue;
}
pwm.variant.output_mask |= 1 << val;
}
pwm.base = of_iomap(np, 0);
if (!pwm.base) {
pr_err("%s: failed to map PWM registers\n", __func__);
return -ENXIO;
}
pwm.timerclk = of_clk_get_by_name(np, "timers");
if (IS_ERR(pwm.timerclk)) {
pr_crit("failed to get timers clock for timer\n");
ret = PTR_ERR(pwm.timerclk);
goto err_clk;
}
ret = _samsung_pwm_clocksource_init();
if (ret)
goto err_clocksource;
return 0;
err_clocksource:
clk_put(pwm.timerclk);
pwm.timerclk = NULL;
err_clk:
iounmap(pwm.base);
pwm.base = NULL;
return ret;
}
static const struct samsung_pwm_variant s3c24xx_variant = {
.bits = 16,
.div_base = 1,
.has_tint_cstat = false,
.tclk_mask = (1 << 4),
};
static int __init s3c2410_pwm_clocksource_init(struct device_node *np)
{
return samsung_pwm_alloc(np, &s3c24xx_variant);
}
TIMER_OF_DECLARE(s3c2410_pwm, "samsung,s3c2410-pwm", s3c2410_pwm_clocksource_init);
static const struct samsung_pwm_variant s3c64xx_variant = {
.bits = 32,
.div_base = 0,
.has_tint_cstat = true,
.tclk_mask = (1 << 7) | (1 << 6) | (1 << 5),
};
static int __init s3c64xx_pwm_clocksource_init(struct device_node *np)
{
return samsung_pwm_alloc(np, &s3c64xx_variant);
}
TIMER_OF_DECLARE(s3c6400_pwm, "samsung,s3c6400-pwm", s3c64xx_pwm_clocksource_init);
static const struct samsung_pwm_variant s5p64x0_variant = {
.bits = 32,
.div_base = 0,
.has_tint_cstat = true,
.tclk_mask = 0,
};
static int __init s5p64x0_pwm_clocksource_init(struct device_node *np)
{
return samsung_pwm_alloc(np, &s5p64x0_variant);
}
TIMER_OF_DECLARE(s5p6440_pwm, "samsung,s5p6440-pwm", s5p64x0_pwm_clocksource_init);
static const struct samsung_pwm_variant s5p_variant = {
.bits = 32,
.div_base = 0,
.has_tint_cstat = true,
.tclk_mask = (1 << 5),
};
static int __init s5p_pwm_clocksource_init(struct device_node *np)
{
return samsung_pwm_alloc(np, &s5p_variant);
}
TIMER_OF_DECLARE(s5pc100_pwm, "samsung,s5pc100-pwm", s5p_pwm_clocksource_init);
#endif
|
linux-master
|
drivers/clocksource/samsung_pwm_timer.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) Maxime Coquelin 2015
* Author: Maxime Coquelin <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/clk.h>
#include <linux/bitops.h>
#define SYST_CSR 0x00
#define SYST_RVR 0x04
#define SYST_CVR 0x08
#define SYST_CALIB 0x0c
#define SYST_CSR_ENABLE BIT(0)
#define SYSTICK_LOAD_RELOAD_MASK 0x00FFFFFF
static int __init system_timer_of_register(struct device_node *np)
{
struct clk *clk = NULL;
void __iomem *base;
u32 rate;
int ret;
base = of_iomap(np, 0);
if (!base) {
pr_warn("system-timer: invalid base address\n");
return -ENXIO;
}
ret = of_property_read_u32(np, "clock-frequency", &rate);
if (ret) {
clk = of_clk_get(np, 0);
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
goto out_unmap;
}
ret = clk_prepare_enable(clk);
if (ret)
goto out_clk_put;
rate = clk_get_rate(clk);
if (!rate) {
ret = -EINVAL;
goto out_clk_disable;
}
}
writel_relaxed(SYSTICK_LOAD_RELOAD_MASK, base + SYST_RVR);
writel_relaxed(SYST_CSR_ENABLE, base + SYST_CSR);
ret = clocksource_mmio_init(base + SYST_CVR, "arm_system_timer", rate,
200, 24, clocksource_mmio_readl_down);
if (ret) {
pr_err("failed to init clocksource (%d)\n", ret);
if (clk)
goto out_clk_disable;
else
goto out_unmap;
}
pr_info("ARM System timer initialized as clocksource\n");
return 0;
out_clk_disable:
clk_disable_unprepare(clk);
out_clk_put:
clk_put(clk);
out_unmap:
iounmap(base);
pr_warn("ARM System timer register failed (%d)\n", ret);
return ret;
}
TIMER_OF_DECLARE(arm_systick, "arm,armv7m-systick",
system_timer_of_register);
|
linux-master
|
drivers/clocksource/armv7m_systick.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* J-Core SoC PIT/clocksource driver
*
* Copyright (C) 2015-2016 Smart Energy Instruments, Inc.
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/clockchips.h>
#include <linux/clocksource.h>
#include <linux/sched_clock.h>
#include <linux/cpu.h>
#include <linux/cpuhotplug.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#define PIT_IRQ_SHIFT 12
#define PIT_PRIO_SHIFT 20
#define PIT_ENABLE_SHIFT 26
#define PIT_PRIO_MASK 0xf
#define REG_PITEN 0x00
#define REG_THROT 0x10
#define REG_COUNT 0x14
#define REG_BUSPD 0x18
#define REG_SECHI 0x20
#define REG_SECLO 0x24
#define REG_NSEC 0x28
struct jcore_pit {
struct clock_event_device ced;
void __iomem *base;
unsigned long periodic_delta;
u32 enable_val;
};
static void __iomem *jcore_pit_base;
static struct jcore_pit __percpu *jcore_pit_percpu;
static notrace u64 jcore_sched_clock_read(void)
{
u32 seclo, nsec, seclo0;
__iomem void *base = jcore_pit_base;
seclo = readl(base + REG_SECLO);
do {
seclo0 = seclo;
nsec = readl(base + REG_NSEC);
seclo = readl(base + REG_SECLO);
} while (seclo0 != seclo);
return seclo * NSEC_PER_SEC + nsec;
}
static u64 jcore_clocksource_read(struct clocksource *cs)
{
return jcore_sched_clock_read();
}
static int jcore_pit_disable(struct jcore_pit *pit)
{
writel(0, pit->base + REG_PITEN);
return 0;
}
static int jcore_pit_set(unsigned long delta, struct jcore_pit *pit)
{
jcore_pit_disable(pit);
writel(delta, pit->base + REG_THROT);
writel(pit->enable_val, pit->base + REG_PITEN);
return 0;
}
static int jcore_pit_set_state_shutdown(struct clock_event_device *ced)
{
struct jcore_pit *pit = container_of(ced, struct jcore_pit, ced);
return jcore_pit_disable(pit);
}
static int jcore_pit_set_state_oneshot(struct clock_event_device *ced)
{
struct jcore_pit *pit = container_of(ced, struct jcore_pit, ced);
return jcore_pit_disable(pit);
}
static int jcore_pit_set_state_periodic(struct clock_event_device *ced)
{
struct jcore_pit *pit = container_of(ced, struct jcore_pit, ced);
return jcore_pit_set(pit->periodic_delta, pit);
}
static int jcore_pit_set_next_event(unsigned long delta,
struct clock_event_device *ced)
{
struct jcore_pit *pit = container_of(ced, struct jcore_pit, ced);
return jcore_pit_set(delta, pit);
}
static int jcore_pit_local_init(unsigned cpu)
{
struct jcore_pit *pit = this_cpu_ptr(jcore_pit_percpu);
unsigned buspd, freq;
pr_info("Local J-Core PIT init on cpu %u\n", cpu);
buspd = readl(pit->base + REG_BUSPD);
freq = DIV_ROUND_CLOSEST(NSEC_PER_SEC, buspd);
pit->periodic_delta = DIV_ROUND_CLOSEST(NSEC_PER_SEC, HZ * buspd);
clockevents_config_and_register(&pit->ced, freq, 1, ULONG_MAX);
return 0;
}
static irqreturn_t jcore_timer_interrupt(int irq, void *dev_id)
{
struct jcore_pit *pit = this_cpu_ptr(dev_id);
if (clockevent_state_oneshot(&pit->ced))
jcore_pit_disable(pit);
pit->ced.event_handler(&pit->ced);
return IRQ_HANDLED;
}
static int __init jcore_pit_init(struct device_node *node)
{
int err;
unsigned pit_irq, cpu;
unsigned long hwirq;
u32 irqprio, enable_val;
jcore_pit_base = of_iomap(node, 0);
if (!jcore_pit_base) {
pr_err("Error: Cannot map base address for J-Core PIT\n");
return -ENXIO;
}
pit_irq = irq_of_parse_and_map(node, 0);
if (!pit_irq) {
pr_err("Error: J-Core PIT has no IRQ\n");
return -ENXIO;
}
pr_info("Initializing J-Core PIT at %p IRQ %d\n",
jcore_pit_base, pit_irq);
err = clocksource_mmio_init(jcore_pit_base, "jcore_pit_cs",
NSEC_PER_SEC, 400, 32,
jcore_clocksource_read);
if (err) {
pr_err("Error registering clocksource device: %d\n", err);
return err;
}
sched_clock_register(jcore_sched_clock_read, 32, NSEC_PER_SEC);
jcore_pit_percpu = alloc_percpu(struct jcore_pit);
if (!jcore_pit_percpu) {
pr_err("Failed to allocate memory for clock event device\n");
return -ENOMEM;
}
err = request_irq(pit_irq, jcore_timer_interrupt,
IRQF_TIMER | IRQF_PERCPU,
"jcore_pit", jcore_pit_percpu);
if (err) {
pr_err("pit irq request failed: %d\n", err);
free_percpu(jcore_pit_percpu);
return err;
}
/*
* The J-Core PIT is not hard-wired to a particular IRQ, but
* integrated with the interrupt controller such that the IRQ it
* generates is programmable, as follows:
*
* The bit layout of the PIT enable register is:
*
* .....e..ppppiiiiiiii............
*
* where the .'s indicate unrelated/unused bits, e is enable,
* p is priority, and i is hard irq number.
*
* For the PIT included in AIC1 (obsolete but still in use),
* any hard irq (trap number) can be programmed via the 8
* iiiiiiii bits, and a priority (0-15) is programmable
* separately in the pppp bits.
*
* For the PIT included in AIC2 (current), the programming
* interface is equivalent modulo interrupt mapping. This is
* why a different compatible tag was not used. However only
* traps 64-127 (the ones actually intended to be used for
* interrupts, rather than syscalls/exceptions/etc.) can be
* programmed (the high 2 bits of i are ignored) and the
* priority pppp is <<2'd and or'd onto the irq number. This
* choice seems to have been made on the hardware engineering
* side under an assumption that preserving old AIC1 priority
* mappings was important. Future models will likely ignore
* the pppp field.
*/
hwirq = irq_get_irq_data(pit_irq)->hwirq;
irqprio = (hwirq >> 2) & PIT_PRIO_MASK;
enable_val = (1U << PIT_ENABLE_SHIFT)
| (hwirq << PIT_IRQ_SHIFT)
| (irqprio << PIT_PRIO_SHIFT);
for_each_present_cpu(cpu) {
struct jcore_pit *pit = per_cpu_ptr(jcore_pit_percpu, cpu);
pit->base = of_iomap(node, cpu);
if (!pit->base) {
pr_err("Unable to map PIT for cpu %u\n", cpu);
continue;
}
pit->ced.name = "jcore_pit";
pit->ced.features = CLOCK_EVT_FEAT_PERIODIC
| CLOCK_EVT_FEAT_ONESHOT
| CLOCK_EVT_FEAT_PERCPU;
pit->ced.cpumask = cpumask_of(cpu);
pit->ced.rating = 400;
pit->ced.irq = pit_irq;
pit->ced.set_state_shutdown = jcore_pit_set_state_shutdown;
pit->ced.set_state_periodic = jcore_pit_set_state_periodic;
pit->ced.set_state_oneshot = jcore_pit_set_state_oneshot;
pit->ced.set_next_event = jcore_pit_set_next_event;
pit->enable_val = enable_val;
}
cpuhp_setup_state(CPUHP_AP_JCORE_TIMER_STARTING,
"clockevents/jcore:starting",
jcore_pit_local_init, NULL);
return 0;
}
TIMER_OF_DECLARE(jcore_pit, "jcore,pit", jcore_pit_init);
|
linux-master
|
drivers/clocksource/jcore-pit.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* Renesas Timer Support - OSTM
*
* Copyright (C) 2017 Renesas Electronics America, Inc.
* Copyright (C) 2017 Chris Brandt
*/
#include <linux/clk.h>
#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/sched_clock.h>
#include <linux/slab.h>
#include "timer-of.h"
/*
* The OSTM contains independent channels.
* The first OSTM channel probed will be set up as a free running
* clocksource. Additionally we will use this clocksource for the system
* schedule timer sched_clock().
*
* The second (or more) channel probed will be set up as an interrupt
* driven clock event.
*/
static void __iomem *system_clock; /* For sched_clock() */
/* OSTM REGISTERS */
#define OSTM_CMP 0x000 /* RW,32 */
#define OSTM_CNT 0x004 /* R,32 */
#define OSTM_TE 0x010 /* R,8 */
#define OSTM_TS 0x014 /* W,8 */
#define OSTM_TT 0x018 /* W,8 */
#define OSTM_CTL 0x020 /* RW,8 */
#define TE 0x01
#define TS 0x01
#define TT 0x01
#define CTL_PERIODIC 0x00
#define CTL_ONESHOT 0x02
#define CTL_FREERUN 0x02
static void ostm_timer_stop(struct timer_of *to)
{
if (readb(timer_of_base(to) + OSTM_TE) & TE) {
writeb(TT, timer_of_base(to) + OSTM_TT);
/*
* Read back the register simply to confirm the write operation
* has completed since I/O writes can sometimes get queued by
* the bus architecture.
*/
while (readb(timer_of_base(to) + OSTM_TE) & TE)
;
}
}
static int __init ostm_init_clksrc(struct timer_of *to)
{
ostm_timer_stop(to);
writel(0, timer_of_base(to) + OSTM_CMP);
writeb(CTL_FREERUN, timer_of_base(to) + OSTM_CTL);
writeb(TS, timer_of_base(to) + OSTM_TS);
return clocksource_mmio_init(timer_of_base(to) + OSTM_CNT,
to->np->full_name, timer_of_rate(to), 300,
32, clocksource_mmio_readl_up);
}
static u64 notrace ostm_read_sched_clock(void)
{
return readl(system_clock);
}
static void __init ostm_init_sched_clock(struct timer_of *to)
{
system_clock = timer_of_base(to) + OSTM_CNT;
sched_clock_register(ostm_read_sched_clock, 32, timer_of_rate(to));
}
static int ostm_clock_event_next(unsigned long delta,
struct clock_event_device *ced)
{
struct timer_of *to = to_timer_of(ced);
ostm_timer_stop(to);
writel(delta, timer_of_base(to) + OSTM_CMP);
writeb(CTL_ONESHOT, timer_of_base(to) + OSTM_CTL);
writeb(TS, timer_of_base(to) + OSTM_TS);
return 0;
}
static int ostm_shutdown(struct clock_event_device *ced)
{
struct timer_of *to = to_timer_of(ced);
ostm_timer_stop(to);
return 0;
}
static int ostm_set_periodic(struct clock_event_device *ced)
{
struct timer_of *to = to_timer_of(ced);
if (clockevent_state_oneshot(ced) || clockevent_state_periodic(ced))
ostm_timer_stop(to);
writel(timer_of_period(to) - 1, timer_of_base(to) + OSTM_CMP);
writeb(CTL_PERIODIC, timer_of_base(to) + OSTM_CTL);
writeb(TS, timer_of_base(to) + OSTM_TS);
return 0;
}
static int ostm_set_oneshot(struct clock_event_device *ced)
{
struct timer_of *to = to_timer_of(ced);
ostm_timer_stop(to);
return 0;
}
static irqreturn_t ostm_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *ced = dev_id;
if (clockevent_state_oneshot(ced))
ostm_timer_stop(to_timer_of(ced));
/* notify clockevent layer */
if (ced->event_handler)
ced->event_handler(ced);
return IRQ_HANDLED;
}
static int __init ostm_init_clkevt(struct timer_of *to)
{
struct clock_event_device *ced = &to->clkevt;
ced->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC;
ced->set_state_shutdown = ostm_shutdown;
ced->set_state_periodic = ostm_set_periodic;
ced->set_state_oneshot = ostm_set_oneshot;
ced->set_next_event = ostm_clock_event_next;
ced->shift = 32;
ced->rating = 300;
ced->cpumask = cpumask_of(0);
clockevents_config_and_register(ced, timer_of_rate(to), 0xf,
0xffffffff);
return 0;
}
static int __init ostm_init(struct device_node *np)
{
struct reset_control *rstc;
struct timer_of *to;
int ret;
to = kzalloc(sizeof(*to), GFP_KERNEL);
if (!to)
return -ENOMEM;
rstc = of_reset_control_get_optional_exclusive(np, NULL);
if (IS_ERR(rstc)) {
ret = PTR_ERR(rstc);
goto err_free;
}
reset_control_deassert(rstc);
to->flags = TIMER_OF_BASE | TIMER_OF_CLOCK;
if (system_clock) {
/*
* clock sources don't use interrupts, clock events do
*/
to->flags |= TIMER_OF_IRQ;
to->of_irq.flags = IRQF_TIMER | IRQF_IRQPOLL;
to->of_irq.handler = ostm_timer_interrupt;
}
ret = timer_of_init(np, to);
if (ret)
goto err_reset;
/*
* First probed device will be used as system clocksource. Any
* additional devices will be used as clock events.
*/
if (!system_clock) {
ret = ostm_init_clksrc(to);
if (ret)
goto err_cleanup;
ostm_init_sched_clock(to);
pr_info("%pOF: used for clocksource\n", np);
} else {
ret = ostm_init_clkevt(to);
if (ret)
goto err_cleanup;
pr_info("%pOF: used for clock events\n", np);
}
return 0;
err_cleanup:
timer_of_cleanup(to);
err_reset:
reset_control_assert(rstc);
reset_control_put(rstc);
err_free:
kfree(to);
return ret;
}
TIMER_OF_DECLARE(ostm, "renesas,ostm", ostm_init);
#ifdef CONFIG_ARCH_RZG2L
static int __init ostm_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
return ostm_init(dev->of_node);
}
static const struct of_device_id ostm_of_table[] = {
{ .compatible = "renesas,ostm", },
{ /* sentinel */ }
};
static struct platform_driver ostm_device_driver = {
.driver = {
.name = "renesas_ostm",
.of_match_table = of_match_ptr(ostm_of_table),
.suppress_bind_attrs = true,
},
};
builtin_platform_driver_probe(ostm_device_driver, ostm_probe);
#endif
|
linux-master
|
drivers/clocksource/renesas-ostm.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* TI DaVinci clocksource driver
*
* Copyright (C) 2019 Texas Instruments
* Author: Bartosz Golaszewski <[email protected]>
* (with tiny parts adopted from code by Kevin Hilman <[email protected]>)
*/
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/clk.h>
#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/sched_clock.h>
#include <clocksource/timer-davinci.h>
#define DAVINCI_TIMER_REG_TIM12 0x10
#define DAVINCI_TIMER_REG_TIM34 0x14
#define DAVINCI_TIMER_REG_PRD12 0x18
#define DAVINCI_TIMER_REG_PRD34 0x1c
#define DAVINCI_TIMER_REG_TCR 0x20
#define DAVINCI_TIMER_REG_TGCR 0x24
#define DAVINCI_TIMER_TIMMODE_MASK GENMASK(3, 2)
#define DAVINCI_TIMER_RESET_MASK GENMASK(1, 0)
#define DAVINCI_TIMER_TIMMODE_32BIT_UNCHAINED BIT(2)
#define DAVINCI_TIMER_UNRESET GENMASK(1, 0)
#define DAVINCI_TIMER_ENAMODE_MASK GENMASK(1, 0)
#define DAVINCI_TIMER_ENAMODE_DISABLED 0x00
#define DAVINCI_TIMER_ENAMODE_ONESHOT BIT(0)
#define DAVINCI_TIMER_ENAMODE_PERIODIC BIT(1)
#define DAVINCI_TIMER_ENAMODE_SHIFT_TIM12 6
#define DAVINCI_TIMER_ENAMODE_SHIFT_TIM34 22
#define DAVINCI_TIMER_MIN_DELTA 0x01
#define DAVINCI_TIMER_MAX_DELTA 0xfffffffe
#define DAVINCI_TIMER_CLKSRC_BITS 32
#define DAVINCI_TIMER_TGCR_DEFAULT \
(DAVINCI_TIMER_TIMMODE_32BIT_UNCHAINED | DAVINCI_TIMER_UNRESET)
struct davinci_clockevent {
struct clock_event_device dev;
void __iomem *base;
unsigned int cmp_off;
};
/*
* This must be globally accessible by davinci_timer_read_sched_clock(), so
* let's keep it here.
*/
static struct {
struct clocksource dev;
void __iomem *base;
unsigned int tim_off;
} davinci_clocksource;
static struct davinci_clockevent *
to_davinci_clockevent(struct clock_event_device *clockevent)
{
return container_of(clockevent, struct davinci_clockevent, dev);
}
static unsigned int
davinci_clockevent_read(struct davinci_clockevent *clockevent,
unsigned int reg)
{
return readl_relaxed(clockevent->base + reg);
}
static void davinci_clockevent_write(struct davinci_clockevent *clockevent,
unsigned int reg, unsigned int val)
{
writel_relaxed(val, clockevent->base + reg);
}
static void davinci_tim12_shutdown(void __iomem *base)
{
unsigned int tcr;
tcr = DAVINCI_TIMER_ENAMODE_DISABLED <<
DAVINCI_TIMER_ENAMODE_SHIFT_TIM12;
/*
* This function is only ever called if we're using both timer
* halves. In this case TIM34 runs in periodic mode and we must
* not modify it.
*/
tcr |= DAVINCI_TIMER_ENAMODE_PERIODIC <<
DAVINCI_TIMER_ENAMODE_SHIFT_TIM34;
writel_relaxed(tcr, base + DAVINCI_TIMER_REG_TCR);
}
static void davinci_tim12_set_oneshot(void __iomem *base)
{
unsigned int tcr;
tcr = DAVINCI_TIMER_ENAMODE_ONESHOT <<
DAVINCI_TIMER_ENAMODE_SHIFT_TIM12;
/* Same as above. */
tcr |= DAVINCI_TIMER_ENAMODE_PERIODIC <<
DAVINCI_TIMER_ENAMODE_SHIFT_TIM34;
writel_relaxed(tcr, base + DAVINCI_TIMER_REG_TCR);
}
static int davinci_clockevent_shutdown(struct clock_event_device *dev)
{
struct davinci_clockevent *clockevent;
clockevent = to_davinci_clockevent(dev);
davinci_tim12_shutdown(clockevent->base);
return 0;
}
static int davinci_clockevent_set_oneshot(struct clock_event_device *dev)
{
struct davinci_clockevent *clockevent = to_davinci_clockevent(dev);
davinci_clockevent_write(clockevent, DAVINCI_TIMER_REG_TIM12, 0x0);
davinci_tim12_set_oneshot(clockevent->base);
return 0;
}
static int
davinci_clockevent_set_next_event_std(unsigned long cycles,
struct clock_event_device *dev)
{
struct davinci_clockevent *clockevent = to_davinci_clockevent(dev);
davinci_clockevent_shutdown(dev);
davinci_clockevent_write(clockevent, DAVINCI_TIMER_REG_TIM12, 0x0);
davinci_clockevent_write(clockevent, DAVINCI_TIMER_REG_PRD12, cycles);
davinci_clockevent_set_oneshot(dev);
return 0;
}
static int
davinci_clockevent_set_next_event_cmp(unsigned long cycles,
struct clock_event_device *dev)
{
struct davinci_clockevent *clockevent = to_davinci_clockevent(dev);
unsigned int curr_time;
curr_time = davinci_clockevent_read(clockevent,
DAVINCI_TIMER_REG_TIM12);
davinci_clockevent_write(clockevent,
clockevent->cmp_off, curr_time + cycles);
return 0;
}
static irqreturn_t davinci_timer_irq_timer(int irq, void *data)
{
struct davinci_clockevent *clockevent = data;
if (!clockevent_state_oneshot(&clockevent->dev))
davinci_tim12_shutdown(clockevent->base);
clockevent->dev.event_handler(&clockevent->dev);
return IRQ_HANDLED;
}
static u64 notrace davinci_timer_read_sched_clock(void)
{
return readl_relaxed(davinci_clocksource.base +
davinci_clocksource.tim_off);
}
static u64 davinci_clocksource_read(struct clocksource *dev)
{
return davinci_timer_read_sched_clock();
}
/*
* Standard use-case: we're using tim12 for clockevent and tim34 for
* clocksource. The default is making the former run in oneshot mode
* and the latter in periodic mode.
*/
static void davinci_clocksource_init_tim34(void __iomem *base)
{
int tcr;
tcr = DAVINCI_TIMER_ENAMODE_PERIODIC <<
DAVINCI_TIMER_ENAMODE_SHIFT_TIM34;
tcr |= DAVINCI_TIMER_ENAMODE_ONESHOT <<
DAVINCI_TIMER_ENAMODE_SHIFT_TIM12;
writel_relaxed(0x0, base + DAVINCI_TIMER_REG_TIM34);
writel_relaxed(UINT_MAX, base + DAVINCI_TIMER_REG_PRD34);
writel_relaxed(tcr, base + DAVINCI_TIMER_REG_TCR);
}
/*
* Special use-case on da830: the DSP may use tim34. We're using tim12 for
* both clocksource and clockevent. We set tim12 to periodic and don't touch
* tim34.
*/
static void davinci_clocksource_init_tim12(void __iomem *base)
{
unsigned int tcr;
tcr = DAVINCI_TIMER_ENAMODE_PERIODIC <<
DAVINCI_TIMER_ENAMODE_SHIFT_TIM12;
writel_relaxed(0x0, base + DAVINCI_TIMER_REG_TIM12);
writel_relaxed(UINT_MAX, base + DAVINCI_TIMER_REG_PRD12);
writel_relaxed(tcr, base + DAVINCI_TIMER_REG_TCR);
}
static void davinci_timer_init(void __iomem *base)
{
/* Set clock to internal mode and disable it. */
writel_relaxed(0x0, base + DAVINCI_TIMER_REG_TCR);
/*
* Reset both 32-bit timers, set no prescaler for timer 34, set the
* timer to dual 32-bit unchained mode, unreset both 32-bit timers.
*/
writel_relaxed(DAVINCI_TIMER_TGCR_DEFAULT,
base + DAVINCI_TIMER_REG_TGCR);
/* Init both counters to zero. */
writel_relaxed(0x0, base + DAVINCI_TIMER_REG_TIM12);
writel_relaxed(0x0, base + DAVINCI_TIMER_REG_TIM34);
}
int __init davinci_timer_register(struct clk *clk,
const struct davinci_timer_cfg *timer_cfg)
{
struct davinci_clockevent *clockevent;
unsigned int tick_rate;
void __iomem *base;
int rv;
rv = clk_prepare_enable(clk);
if (rv) {
pr_err("Unable to prepare and enable the timer clock\n");
return rv;
}
if (!request_mem_region(timer_cfg->reg.start,
resource_size(&timer_cfg->reg),
"davinci-timer")) {
pr_err("Unable to request memory region\n");
rv = -EBUSY;
goto exit_clk_disable;
}
base = ioremap(timer_cfg->reg.start, resource_size(&timer_cfg->reg));
if (!base) {
pr_err("Unable to map the register range\n");
rv = -ENOMEM;
goto exit_mem_region;
}
davinci_timer_init(base);
tick_rate = clk_get_rate(clk);
clockevent = kzalloc(sizeof(*clockevent), GFP_KERNEL);
if (!clockevent) {
rv = -ENOMEM;
goto exit_iounmap_base;
}
clockevent->dev.name = "tim12";
clockevent->dev.features = CLOCK_EVT_FEAT_ONESHOT;
clockevent->dev.cpumask = cpumask_of(0);
clockevent->base = base;
if (timer_cfg->cmp_off) {
clockevent->cmp_off = timer_cfg->cmp_off;
clockevent->dev.set_next_event =
davinci_clockevent_set_next_event_cmp;
} else {
clockevent->dev.set_next_event =
davinci_clockevent_set_next_event_std;
clockevent->dev.set_state_oneshot =
davinci_clockevent_set_oneshot;
clockevent->dev.set_state_shutdown =
davinci_clockevent_shutdown;
}
rv = request_irq(timer_cfg->irq[DAVINCI_TIMER_CLOCKEVENT_IRQ].start,
davinci_timer_irq_timer, IRQF_TIMER,
"clockevent/tim12", clockevent);
if (rv) {
pr_err("Unable to request the clockevent interrupt\n");
goto exit_free_clockevent;
}
davinci_clocksource.dev.rating = 300;
davinci_clocksource.dev.read = davinci_clocksource_read;
davinci_clocksource.dev.mask =
CLOCKSOURCE_MASK(DAVINCI_TIMER_CLKSRC_BITS);
davinci_clocksource.dev.flags = CLOCK_SOURCE_IS_CONTINUOUS;
davinci_clocksource.base = base;
if (timer_cfg->cmp_off) {
davinci_clocksource.dev.name = "tim12";
davinci_clocksource.tim_off = DAVINCI_TIMER_REG_TIM12;
davinci_clocksource_init_tim12(base);
} else {
davinci_clocksource.dev.name = "tim34";
davinci_clocksource.tim_off = DAVINCI_TIMER_REG_TIM34;
davinci_clocksource_init_tim34(base);
}
clockevents_config_and_register(&clockevent->dev, tick_rate,
DAVINCI_TIMER_MIN_DELTA,
DAVINCI_TIMER_MAX_DELTA);
rv = clocksource_register_hz(&davinci_clocksource.dev, tick_rate);
if (rv) {
pr_err("Unable to register clocksource\n");
goto exit_free_irq;
}
sched_clock_register(davinci_timer_read_sched_clock,
DAVINCI_TIMER_CLKSRC_BITS, tick_rate);
return 0;
exit_free_irq:
free_irq(timer_cfg->irq[DAVINCI_TIMER_CLOCKEVENT_IRQ].start,
clockevent);
exit_free_clockevent:
kfree(clockevent);
exit_iounmap_base:
iounmap(base);
exit_mem_region:
release_mem_region(timer_cfg->reg.start,
resource_size(&timer_cfg->reg));
exit_clk_disable:
clk_disable_unprepare(clk);
return rv;
}
static int __init of_davinci_timer_register(struct device_node *np)
{
struct davinci_timer_cfg timer_cfg = { };
struct clk *clk;
int rv;
rv = of_address_to_resource(np, 0, &timer_cfg.reg);
if (rv) {
pr_err("Unable to get the register range for timer\n");
return rv;
}
rv = of_irq_to_resource_table(np, timer_cfg.irq,
DAVINCI_TIMER_NUM_IRQS);
if (rv != DAVINCI_TIMER_NUM_IRQS) {
pr_err("Unable to get the interrupts for timer\n");
return rv;
}
clk = of_clk_get(np, 0);
if (IS_ERR(clk)) {
pr_err("Unable to get the timer clock\n");
return PTR_ERR(clk);
}
rv = davinci_timer_register(clk, &timer_cfg);
if (rv)
clk_put(clk);
return rv;
}
TIMER_OF_DECLARE(davinci_timer, "ti,da830-timer", of_davinci_timer_register);
|
linux-master
|
drivers/clocksource/timer-davinci.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* linux/drivers/clocksource/timer-sp.c
*
* Copyright (C) 1999 - 2003 ARM Limited
* Copyright (C) 2000 Deep Blue Solutions Ltd
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/clk.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_clk.h>
#include <linux/of_irq.h>
#include <linux/sched_clock.h>
#include "timer-sp.h"
/* Hisilicon 64-bit timer(a variant of ARM SP804) */
#define HISI_TIMER_1_BASE 0x00
#define HISI_TIMER_2_BASE 0x40
#define HISI_TIMER_LOAD 0x00
#define HISI_TIMER_LOAD_H 0x04
#define HISI_TIMER_VALUE 0x08
#define HISI_TIMER_VALUE_H 0x0c
#define HISI_TIMER_CTRL 0x10
#define HISI_TIMER_INTCLR 0x14
#define HISI_TIMER_RIS 0x18
#define HISI_TIMER_MIS 0x1c
#define HISI_TIMER_BGLOAD 0x20
#define HISI_TIMER_BGLOAD_H 0x24
static struct sp804_timer arm_sp804_timer __initdata = {
.load = TIMER_LOAD,
.value = TIMER_VALUE,
.ctrl = TIMER_CTRL,
.intclr = TIMER_INTCLR,
.timer_base = {TIMER_1_BASE, TIMER_2_BASE},
.width = 32,
};
static struct sp804_timer hisi_sp804_timer __initdata = {
.load = HISI_TIMER_LOAD,
.load_h = HISI_TIMER_LOAD_H,
.value = HISI_TIMER_VALUE,
.value_h = HISI_TIMER_VALUE_H,
.ctrl = HISI_TIMER_CTRL,
.intclr = HISI_TIMER_INTCLR,
.timer_base = {HISI_TIMER_1_BASE, HISI_TIMER_2_BASE},
.width = 64,
};
static struct sp804_clkevt sp804_clkevt[NR_TIMERS];
static long __init sp804_get_clock_rate(struct clk *clk, const char *name)
{
int err;
if (!clk)
clk = clk_get_sys("sp804", name);
if (IS_ERR(clk)) {
pr_err("%s clock not found: %ld\n", name, PTR_ERR(clk));
return PTR_ERR(clk);
}
err = clk_prepare_enable(clk);
if (err) {
pr_err("clock failed to enable: %d\n", err);
clk_put(clk);
return err;
}
return clk_get_rate(clk);
}
static struct sp804_clkevt * __init sp804_clkevt_get(void __iomem *base)
{
int i;
for (i = 0; i < NR_TIMERS; i++) {
if (sp804_clkevt[i].base == base)
return &sp804_clkevt[i];
}
/* It's impossible to reach here */
WARN_ON(1);
return NULL;
}
static struct sp804_clkevt *sched_clkevt;
static u64 notrace sp804_read(void)
{
return ~readl_relaxed(sched_clkevt->value);
}
static int __init sp804_clocksource_and_sched_clock_init(void __iomem *base,
const char *name,
struct clk *clk,
int use_sched_clock)
{
long rate;
struct sp804_clkevt *clkevt;
rate = sp804_get_clock_rate(clk, name);
if (rate < 0)
return -EINVAL;
clkevt = sp804_clkevt_get(base);
writel(0, clkevt->ctrl);
writel(0xffffffff, clkevt->load);
writel(0xffffffff, clkevt->value);
if (clkevt->width == 64) {
writel(0xffffffff, clkevt->load_h);
writel(0xffffffff, clkevt->value_h);
}
writel(TIMER_CTRL_32BIT | TIMER_CTRL_ENABLE | TIMER_CTRL_PERIODIC,
clkevt->ctrl);
clocksource_mmio_init(clkevt->value, name,
rate, 200, 32, clocksource_mmio_readl_down);
if (use_sched_clock) {
sched_clkevt = clkevt;
sched_clock_register(sp804_read, 32, rate);
}
return 0;
}
static struct sp804_clkevt *common_clkevt;
/*
* IRQ handler for the timer
*/
static irqreturn_t sp804_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evt = dev_id;
/* clear the interrupt */
writel(1, common_clkevt->intclr);
evt->event_handler(evt);
return IRQ_HANDLED;
}
static inline void evt_timer_shutdown(struct clock_event_device *evt)
{
writel(0, common_clkevt->ctrl);
}
static int sp804_shutdown(struct clock_event_device *evt)
{
evt_timer_shutdown(evt);
return 0;
}
static int sp804_set_periodic(struct clock_event_device *evt)
{
unsigned long ctrl = TIMER_CTRL_32BIT | TIMER_CTRL_IE |
TIMER_CTRL_PERIODIC | TIMER_CTRL_ENABLE;
evt_timer_shutdown(evt);
writel(common_clkevt->reload, common_clkevt->load);
writel(ctrl, common_clkevt->ctrl);
return 0;
}
static int sp804_set_next_event(unsigned long next,
struct clock_event_device *evt)
{
unsigned long ctrl = TIMER_CTRL_32BIT | TIMER_CTRL_IE |
TIMER_CTRL_ONESHOT | TIMER_CTRL_ENABLE;
writel(next, common_clkevt->load);
writel(ctrl, common_clkevt->ctrl);
return 0;
}
static struct clock_event_device sp804_clockevent = {
.features = CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_ONESHOT |
CLOCK_EVT_FEAT_DYNIRQ,
.set_state_shutdown = sp804_shutdown,
.set_state_periodic = sp804_set_periodic,
.set_state_oneshot = sp804_shutdown,
.tick_resume = sp804_shutdown,
.set_next_event = sp804_set_next_event,
.rating = 300,
};
static int __init sp804_clockevents_init(void __iomem *base, unsigned int irq,
struct clk *clk, const char *name)
{
struct clock_event_device *evt = &sp804_clockevent;
long rate;
rate = sp804_get_clock_rate(clk, name);
if (rate < 0)
return -EINVAL;
common_clkevt = sp804_clkevt_get(base);
common_clkevt->reload = DIV_ROUND_CLOSEST(rate, HZ);
evt->name = name;
evt->irq = irq;
evt->cpumask = cpu_possible_mask;
writel(0, common_clkevt->ctrl);
if (request_irq(irq, sp804_timer_interrupt, IRQF_TIMER | IRQF_IRQPOLL,
"timer", &sp804_clockevent))
pr_err("request_irq() failed\n");
clockevents_config_and_register(evt, rate, 0xf, 0xffffffff);
return 0;
}
static void __init sp804_clkevt_init(struct sp804_timer *timer, void __iomem *base)
{
int i;
for (i = 0; i < NR_TIMERS; i++) {
void __iomem *timer_base;
struct sp804_clkevt *clkevt;
timer_base = base + timer->timer_base[i];
clkevt = &sp804_clkevt[i];
clkevt->base = timer_base;
clkevt->load = timer_base + timer->load;
clkevt->load_h = timer_base + timer->load_h;
clkevt->value = timer_base + timer->value;
clkevt->value_h = timer_base + timer->value_h;
clkevt->ctrl = timer_base + timer->ctrl;
clkevt->intclr = timer_base + timer->intclr;
clkevt->width = timer->width;
}
}
static int __init sp804_of_init(struct device_node *np, struct sp804_timer *timer)
{
static bool initialized = false;
void __iomem *base;
void __iomem *timer1_base;
void __iomem *timer2_base;
int irq, ret = -EINVAL;
u32 irq_num = 0;
struct clk *clk1, *clk2;
const char *name = of_get_property(np, "compatible", NULL);
if (initialized) {
pr_debug("%pOF: skipping further SP804 timer device\n", np);
return 0;
}
base = of_iomap(np, 0);
if (!base)
return -ENXIO;
timer1_base = base + timer->timer_base[0];
timer2_base = base + timer->timer_base[1];
/* Ensure timers are disabled */
writel(0, timer1_base + timer->ctrl);
writel(0, timer2_base + timer->ctrl);
clk1 = of_clk_get(np, 0);
if (IS_ERR(clk1))
clk1 = NULL;
/* Get the 2nd clock if the timer has 3 timer clocks */
if (of_clk_get_parent_count(np) == 3) {
clk2 = of_clk_get(np, 1);
if (IS_ERR(clk2)) {
pr_err("%pOFn clock not found: %d\n", np,
(int)PTR_ERR(clk2));
clk2 = NULL;
}
} else
clk2 = clk1;
irq = irq_of_parse_and_map(np, 0);
if (irq <= 0)
goto err;
sp804_clkevt_init(timer, base);
of_property_read_u32(np, "arm,sp804-has-irq", &irq_num);
if (irq_num == 2) {
ret = sp804_clockevents_init(timer2_base, irq, clk2, name);
if (ret)
goto err;
ret = sp804_clocksource_and_sched_clock_init(timer1_base,
name, clk1, 1);
if (ret)
goto err;
} else {
ret = sp804_clockevents_init(timer1_base, irq, clk1, name);
if (ret)
goto err;
ret = sp804_clocksource_and_sched_clock_init(timer2_base,
name, clk2, 1);
if (ret)
goto err;
}
initialized = true;
return 0;
err:
iounmap(base);
return ret;
}
static int __init arm_sp804_of_init(struct device_node *np)
{
return sp804_of_init(np, &arm_sp804_timer);
}
TIMER_OF_DECLARE(sp804, "arm,sp804", arm_sp804_of_init);
static int __init hisi_sp804_of_init(struct device_node *np)
{
return sp804_of_init(np, &hisi_sp804_timer);
}
TIMER_OF_DECLARE(hisi_sp804, "hisilicon,sp804", hisi_sp804_of_init);
static int __init integrator_cp_of_init(struct device_node *np)
{
static int init_count = 0;
void __iomem *base;
int irq, ret = -EINVAL;
const char *name = of_get_property(np, "compatible", NULL);
struct clk *clk;
base = of_iomap(np, 0);
if (!base) {
pr_err("Failed to iomap\n");
return -ENXIO;
}
clk = of_clk_get(np, 0);
if (IS_ERR(clk)) {
pr_err("Failed to get clock\n");
return PTR_ERR(clk);
}
/* Ensure timer is disabled */
writel(0, base + arm_sp804_timer.ctrl);
if (init_count == 2 || !of_device_is_available(np))
goto err;
sp804_clkevt_init(&arm_sp804_timer, base);
if (!init_count) {
ret = sp804_clocksource_and_sched_clock_init(base,
name, clk, 0);
if (ret)
goto err;
} else {
irq = irq_of_parse_and_map(np, 0);
if (irq <= 0)
goto err;
ret = sp804_clockevents_init(base, irq, clk, name);
if (ret)
goto err;
}
init_count++;
return 0;
err:
iounmap(base);
return ret;
}
TIMER_OF_DECLARE(intcp, "arm,integrator-cp-timer", integrator_cp_of_init);
|
linux-master
|
drivers/clocksource/timer-sp804.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Actions Semi Owl timer
*
* Copyright 2012 Actions Semi Inc.
* Author: Actions Semi, Inc.
*
* Copyright (c) 2017 SUSE Linux GmbH
* Author: Andreas Färber
*/
#include <linux/clk.h>
#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqreturn.h>
#include <linux/sched_clock.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#define OWL_Tx_CTL 0x0
#define OWL_Tx_CMP 0x4
#define OWL_Tx_VAL 0x8
#define OWL_Tx_CTL_PD BIT(0)
#define OWL_Tx_CTL_INTEN BIT(1)
#define OWL_Tx_CTL_EN BIT(2)
static void __iomem *owl_timer_base;
static void __iomem *owl_clksrc_base;
static void __iomem *owl_clkevt_base;
static inline void owl_timer_reset(void __iomem *base)
{
writel(0, base + OWL_Tx_CTL);
writel(0, base + OWL_Tx_VAL);
writel(0, base + OWL_Tx_CMP);
}
static inline void owl_timer_set_enabled(void __iomem *base, bool enabled)
{
u32 ctl = readl(base + OWL_Tx_CTL);
/* PD bit is cleared when set */
ctl &= ~OWL_Tx_CTL_PD;
if (enabled)
ctl |= OWL_Tx_CTL_EN;
else
ctl &= ~OWL_Tx_CTL_EN;
writel(ctl, base + OWL_Tx_CTL);
}
static u64 notrace owl_timer_sched_read(void)
{
return (u64)readl(owl_clksrc_base + OWL_Tx_VAL);
}
static int owl_timer_set_state_shutdown(struct clock_event_device *evt)
{
owl_timer_set_enabled(owl_clkevt_base, false);
return 0;
}
static int owl_timer_set_state_oneshot(struct clock_event_device *evt)
{
owl_timer_reset(owl_clkevt_base);
return 0;
}
static int owl_timer_tick_resume(struct clock_event_device *evt)
{
return 0;
}
static int owl_timer_set_next_event(unsigned long evt,
struct clock_event_device *ev)
{
void __iomem *base = owl_clkevt_base;
owl_timer_set_enabled(base, false);
writel(OWL_Tx_CTL_INTEN, base + OWL_Tx_CTL);
writel(0, base + OWL_Tx_VAL);
writel(evt, base + OWL_Tx_CMP);
owl_timer_set_enabled(base, true);
return 0;
}
static struct clock_event_device owl_clockevent = {
.name = "owl_tick",
.rating = 200,
.features = CLOCK_EVT_FEAT_ONESHOT |
CLOCK_EVT_FEAT_DYNIRQ,
.set_state_shutdown = owl_timer_set_state_shutdown,
.set_state_oneshot = owl_timer_set_state_oneshot,
.tick_resume = owl_timer_tick_resume,
.set_next_event = owl_timer_set_next_event,
};
static irqreturn_t owl_timer1_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evt = (struct clock_event_device *)dev_id;
writel(OWL_Tx_CTL_PD, owl_clkevt_base + OWL_Tx_CTL);
evt->event_handler(evt);
return IRQ_HANDLED;
}
static int __init owl_timer_init(struct device_node *node)
{
struct clk *clk;
unsigned long rate;
int timer1_irq, ret;
owl_timer_base = of_io_request_and_map(node, 0, "owl-timer");
if (IS_ERR(owl_timer_base)) {
pr_err("Can't map timer registers\n");
return PTR_ERR(owl_timer_base);
}
owl_clksrc_base = owl_timer_base + 0x08;
owl_clkevt_base = owl_timer_base + 0x14;
timer1_irq = of_irq_get_byname(node, "timer1");
if (timer1_irq <= 0) {
pr_err("Can't parse timer1 IRQ\n");
return -EINVAL;
}
clk = of_clk_get(node, 0);
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
pr_err("Failed to get clock for clocksource (%d)\n", ret);
return ret;
}
rate = clk_get_rate(clk);
owl_timer_reset(owl_clksrc_base);
owl_timer_set_enabled(owl_clksrc_base, true);
sched_clock_register(owl_timer_sched_read, 32, rate);
ret = clocksource_mmio_init(owl_clksrc_base + OWL_Tx_VAL, node->name,
rate, 200, 32, clocksource_mmio_readl_up);
if (ret) {
pr_err("Failed to register clocksource (%d)\n", ret);
return ret;
}
owl_timer_reset(owl_clkevt_base);
ret = request_irq(timer1_irq, owl_timer1_interrupt, IRQF_TIMER,
"owl-timer", &owl_clockevent);
if (ret) {
pr_err("failed to request irq %d\n", timer1_irq);
return ret;
}
owl_clockevent.cpumask = cpumask_of(0);
owl_clockevent.irq = timer1_irq;
clockevents_config_and_register(&owl_clockevent, rate,
0xf, 0xffffffff);
return 0;
}
TIMER_OF_DECLARE(owl_s500, "actions,s500-timer", owl_timer_init);
TIMER_OF_DECLARE(owl_s700, "actions,s700-timer", owl_timer_init);
TIMER_OF_DECLARE(owl_s900, "actions,s900-timer", owl_timer_init);
|
linux-master
|
drivers/clocksource/timer-owl.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* This file contains driver for the Cadence Triple Timer Counter Rev 06
*
* Copyright (C) 2011-2013 Xilinx
*
* based on arch/mips/kernel/time.c timer driver
*/
#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/clockchips.h>
#include <linux/clocksource.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/sched_clock.h>
#include <linux/module.h>
#include <linux/of_platform.h>
/*
* This driver configures the 2 16/32-bit count-up timers as follows:
*
* T1: Timer 1, clocksource for generic timekeeping
* T2: Timer 2, clockevent source for hrtimers
* T3: Timer 3, <unused>
*
* The input frequency to the timer module for emulation is 2.5MHz which is
* common to all the timer channels (T1, T2, and T3). With a pre-scaler of 32,
* the timers are clocked at 78.125KHz (12.8 us resolution).
* The input frequency to the timer module in silicon is configurable and
* obtained from device tree. The pre-scaler of 32 is used.
*/
/*
* Timer Register Offset Definitions of Timer 1, Increment base address by 4
* and use same offsets for Timer 2
*/
#define TTC_CLK_CNTRL_OFFSET 0x00 /* Clock Control Reg, RW */
#define TTC_CNT_CNTRL_OFFSET 0x0C /* Counter Control Reg, RW */
#define TTC_COUNT_VAL_OFFSET 0x18 /* Counter Value Reg, RO */
#define TTC_INTR_VAL_OFFSET 0x24 /* Interval Count Reg, RW */
#define TTC_ISR_OFFSET 0x54 /* Interrupt Status Reg, RO */
#define TTC_IER_OFFSET 0x60 /* Interrupt Enable Reg, RW */
#define TTC_CNT_CNTRL_DISABLE_MASK 0x1
#define TTC_CLK_CNTRL_CSRC_MASK (1 << 5) /* clock source */
#define TTC_CLK_CNTRL_PSV_MASK 0x1e
#define TTC_CLK_CNTRL_PSV_SHIFT 1
/*
* Setup the timers to use pre-scaling, using a fixed value for now that will
* work across most input frequency, but it may need to be more dynamic
*/
#define PRESCALE_EXPONENT 11 /* 2 ^ PRESCALE_EXPONENT = PRESCALE */
#define PRESCALE 2048 /* The exponent must match this */
#define CLK_CNTRL_PRESCALE ((PRESCALE_EXPONENT - 1) << 1)
#define CLK_CNTRL_PRESCALE_EN 1
#define CNT_CNTRL_RESET (1 << 4)
#define MAX_F_ERR 50
/**
* struct ttc_timer - This definition defines local timer structure
*
* @base_addr: Base address of timer
* @freq: Timer input clock frequency
* @clk: Associated clock source
* @clk_rate_change_nb Notifier block for clock rate changes
*/
struct ttc_timer {
void __iomem *base_addr;
unsigned long freq;
struct clk *clk;
struct notifier_block clk_rate_change_nb;
};
#define to_ttc_timer(x) \
container_of(x, struct ttc_timer, clk_rate_change_nb)
struct ttc_timer_clocksource {
u32 scale_clk_ctrl_reg_old;
u32 scale_clk_ctrl_reg_new;
struct ttc_timer ttc;
struct clocksource cs;
};
#define to_ttc_timer_clksrc(x) \
container_of(x, struct ttc_timer_clocksource, cs)
struct ttc_timer_clockevent {
struct ttc_timer ttc;
struct clock_event_device ce;
};
#define to_ttc_timer_clkevent(x) \
container_of(x, struct ttc_timer_clockevent, ce)
static void __iomem *ttc_sched_clock_val_reg;
/**
* ttc_set_interval - Set the timer interval value
*
* @timer: Pointer to the timer instance
* @cycles: Timer interval ticks
**/
static void ttc_set_interval(struct ttc_timer *timer,
unsigned long cycles)
{
u32 ctrl_reg;
/* Disable the counter, set the counter value and re-enable counter */
ctrl_reg = readl_relaxed(timer->base_addr + TTC_CNT_CNTRL_OFFSET);
ctrl_reg |= TTC_CNT_CNTRL_DISABLE_MASK;
writel_relaxed(ctrl_reg, timer->base_addr + TTC_CNT_CNTRL_OFFSET);
writel_relaxed(cycles, timer->base_addr + TTC_INTR_VAL_OFFSET);
/*
* Reset the counter (0x10) so that it starts from 0, one-shot
* mode makes this needed for timing to be right.
*/
ctrl_reg |= CNT_CNTRL_RESET;
ctrl_reg &= ~TTC_CNT_CNTRL_DISABLE_MASK;
writel_relaxed(ctrl_reg, timer->base_addr + TTC_CNT_CNTRL_OFFSET);
}
/**
* ttc_clock_event_interrupt - Clock event timer interrupt handler
*
* @irq: IRQ number of the Timer
* @dev_id: void pointer to the ttc_timer instance
*
* returns: Always IRQ_HANDLED - success
**/
static irqreturn_t ttc_clock_event_interrupt(int irq, void *dev_id)
{
struct ttc_timer_clockevent *ttce = dev_id;
struct ttc_timer *timer = &ttce->ttc;
/* Acknowledge the interrupt and call event handler */
readl_relaxed(timer->base_addr + TTC_ISR_OFFSET);
ttce->ce.event_handler(&ttce->ce);
return IRQ_HANDLED;
}
/**
* __ttc_clocksource_read - Reads the timer counter register
*
* returns: Current timer counter register value
**/
static u64 __ttc_clocksource_read(struct clocksource *cs)
{
struct ttc_timer *timer = &to_ttc_timer_clksrc(cs)->ttc;
return (u64)readl_relaxed(timer->base_addr +
TTC_COUNT_VAL_OFFSET);
}
static u64 notrace ttc_sched_clock_read(void)
{
return readl_relaxed(ttc_sched_clock_val_reg);
}
/**
* ttc_set_next_event - Sets the time interval for next event
*
* @cycles: Timer interval ticks
* @evt: Address of clock event instance
*
* returns: Always 0 - success
**/
static int ttc_set_next_event(unsigned long cycles,
struct clock_event_device *evt)
{
struct ttc_timer_clockevent *ttce = to_ttc_timer_clkevent(evt);
struct ttc_timer *timer = &ttce->ttc;
ttc_set_interval(timer, cycles);
return 0;
}
/**
* ttc_set_{shutdown|oneshot|periodic} - Sets the state of timer
*
* @evt: Address of clock event instance
**/
static int ttc_shutdown(struct clock_event_device *evt)
{
struct ttc_timer_clockevent *ttce = to_ttc_timer_clkevent(evt);
struct ttc_timer *timer = &ttce->ttc;
u32 ctrl_reg;
ctrl_reg = readl_relaxed(timer->base_addr + TTC_CNT_CNTRL_OFFSET);
ctrl_reg |= TTC_CNT_CNTRL_DISABLE_MASK;
writel_relaxed(ctrl_reg, timer->base_addr + TTC_CNT_CNTRL_OFFSET);
return 0;
}
static int ttc_set_periodic(struct clock_event_device *evt)
{
struct ttc_timer_clockevent *ttce = to_ttc_timer_clkevent(evt);
struct ttc_timer *timer = &ttce->ttc;
ttc_set_interval(timer,
DIV_ROUND_CLOSEST(ttce->ttc.freq, PRESCALE * HZ));
return 0;
}
static int ttc_resume(struct clock_event_device *evt)
{
struct ttc_timer_clockevent *ttce = to_ttc_timer_clkevent(evt);
struct ttc_timer *timer = &ttce->ttc;
u32 ctrl_reg;
ctrl_reg = readl_relaxed(timer->base_addr + TTC_CNT_CNTRL_OFFSET);
ctrl_reg &= ~TTC_CNT_CNTRL_DISABLE_MASK;
writel_relaxed(ctrl_reg, timer->base_addr + TTC_CNT_CNTRL_OFFSET);
return 0;
}
static int ttc_rate_change_clocksource_cb(struct notifier_block *nb,
unsigned long event, void *data)
{
struct clk_notifier_data *ndata = data;
struct ttc_timer *ttc = to_ttc_timer(nb);
struct ttc_timer_clocksource *ttccs = container_of(ttc,
struct ttc_timer_clocksource, ttc);
switch (event) {
case PRE_RATE_CHANGE:
{
u32 psv;
unsigned long factor, rate_low, rate_high;
if (ndata->new_rate > ndata->old_rate) {
factor = DIV_ROUND_CLOSEST(ndata->new_rate,
ndata->old_rate);
rate_low = ndata->old_rate;
rate_high = ndata->new_rate;
} else {
factor = DIV_ROUND_CLOSEST(ndata->old_rate,
ndata->new_rate);
rate_low = ndata->new_rate;
rate_high = ndata->old_rate;
}
if (!is_power_of_2(factor))
return NOTIFY_BAD;
if (abs(rate_high - (factor * rate_low)) > MAX_F_ERR)
return NOTIFY_BAD;
factor = __ilog2_u32(factor);
/*
* store timer clock ctrl register so we can restore it in case
* of an abort.
*/
ttccs->scale_clk_ctrl_reg_old =
readl_relaxed(ttccs->ttc.base_addr +
TTC_CLK_CNTRL_OFFSET);
psv = (ttccs->scale_clk_ctrl_reg_old &
TTC_CLK_CNTRL_PSV_MASK) >>
TTC_CLK_CNTRL_PSV_SHIFT;
if (ndata->new_rate < ndata->old_rate)
psv -= factor;
else
psv += factor;
/* prescaler within legal range? */
if (psv & ~(TTC_CLK_CNTRL_PSV_MASK >> TTC_CLK_CNTRL_PSV_SHIFT))
return NOTIFY_BAD;
ttccs->scale_clk_ctrl_reg_new = ttccs->scale_clk_ctrl_reg_old &
~TTC_CLK_CNTRL_PSV_MASK;
ttccs->scale_clk_ctrl_reg_new |= psv << TTC_CLK_CNTRL_PSV_SHIFT;
/* scale down: adjust divider in post-change notification */
if (ndata->new_rate < ndata->old_rate)
return NOTIFY_DONE;
/* scale up: adjust divider now - before frequency change */
writel_relaxed(ttccs->scale_clk_ctrl_reg_new,
ttccs->ttc.base_addr + TTC_CLK_CNTRL_OFFSET);
break;
}
case POST_RATE_CHANGE:
/* scale up: pre-change notification did the adjustment */
if (ndata->new_rate > ndata->old_rate)
return NOTIFY_OK;
/* scale down: adjust divider now - after frequency change */
writel_relaxed(ttccs->scale_clk_ctrl_reg_new,
ttccs->ttc.base_addr + TTC_CLK_CNTRL_OFFSET);
break;
case ABORT_RATE_CHANGE:
/* we have to undo the adjustment in case we scale up */
if (ndata->new_rate < ndata->old_rate)
return NOTIFY_OK;
/* restore original register value */
writel_relaxed(ttccs->scale_clk_ctrl_reg_old,
ttccs->ttc.base_addr + TTC_CLK_CNTRL_OFFSET);
fallthrough;
default:
return NOTIFY_DONE;
}
return NOTIFY_DONE;
}
static int __init ttc_setup_clocksource(struct clk *clk, void __iomem *base,
u32 timer_width)
{
struct ttc_timer_clocksource *ttccs;
int err;
ttccs = kzalloc(sizeof(*ttccs), GFP_KERNEL);
if (!ttccs)
return -ENOMEM;
ttccs->ttc.clk = clk;
err = clk_prepare_enable(ttccs->ttc.clk);
if (err) {
kfree(ttccs);
return err;
}
ttccs->ttc.freq = clk_get_rate(ttccs->ttc.clk);
ttccs->ttc.clk_rate_change_nb.notifier_call =
ttc_rate_change_clocksource_cb;
ttccs->ttc.clk_rate_change_nb.next = NULL;
err = clk_notifier_register(ttccs->ttc.clk,
&ttccs->ttc.clk_rate_change_nb);
if (err)
pr_warn("Unable to register clock notifier.\n");
ttccs->ttc.base_addr = base;
ttccs->cs.name = "ttc_clocksource";
ttccs->cs.rating = 200;
ttccs->cs.read = __ttc_clocksource_read;
ttccs->cs.mask = CLOCKSOURCE_MASK(timer_width);
ttccs->cs.flags = CLOCK_SOURCE_IS_CONTINUOUS;
/*
* Setup the clock source counter to be an incrementing counter
* with no interrupt and it rolls over at 0xFFFF. Pre-scale
* it by 32 also. Let it start running now.
*/
writel_relaxed(0x0, ttccs->ttc.base_addr + TTC_IER_OFFSET);
writel_relaxed(CLK_CNTRL_PRESCALE | CLK_CNTRL_PRESCALE_EN,
ttccs->ttc.base_addr + TTC_CLK_CNTRL_OFFSET);
writel_relaxed(CNT_CNTRL_RESET,
ttccs->ttc.base_addr + TTC_CNT_CNTRL_OFFSET);
err = clocksource_register_hz(&ttccs->cs, ttccs->ttc.freq / PRESCALE);
if (err) {
kfree(ttccs);
return err;
}
ttc_sched_clock_val_reg = base + TTC_COUNT_VAL_OFFSET;
sched_clock_register(ttc_sched_clock_read, timer_width,
ttccs->ttc.freq / PRESCALE);
return 0;
}
static int ttc_rate_change_clockevent_cb(struct notifier_block *nb,
unsigned long event, void *data)
{
struct clk_notifier_data *ndata = data;
struct ttc_timer *ttc = to_ttc_timer(nb);
struct ttc_timer_clockevent *ttcce = container_of(ttc,
struct ttc_timer_clockevent, ttc);
switch (event) {
case POST_RATE_CHANGE:
/* update cached frequency */
ttc->freq = ndata->new_rate;
clockevents_update_freq(&ttcce->ce, ndata->new_rate / PRESCALE);
fallthrough;
case PRE_RATE_CHANGE:
case ABORT_RATE_CHANGE:
default:
return NOTIFY_DONE;
}
}
static int __init ttc_setup_clockevent(struct clk *clk,
void __iomem *base, u32 irq)
{
struct ttc_timer_clockevent *ttcce;
int err;
ttcce = kzalloc(sizeof(*ttcce), GFP_KERNEL);
if (!ttcce)
return -ENOMEM;
ttcce->ttc.clk = clk;
err = clk_prepare_enable(ttcce->ttc.clk);
if (err)
goto out_kfree;
ttcce->ttc.clk_rate_change_nb.notifier_call =
ttc_rate_change_clockevent_cb;
ttcce->ttc.clk_rate_change_nb.next = NULL;
err = clk_notifier_register(ttcce->ttc.clk,
&ttcce->ttc.clk_rate_change_nb);
if (err) {
pr_warn("Unable to register clock notifier.\n");
goto out_kfree;
}
ttcce->ttc.freq = clk_get_rate(ttcce->ttc.clk);
ttcce->ttc.base_addr = base;
ttcce->ce.name = "ttc_clockevent";
ttcce->ce.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
ttcce->ce.set_next_event = ttc_set_next_event;
ttcce->ce.set_state_shutdown = ttc_shutdown;
ttcce->ce.set_state_periodic = ttc_set_periodic;
ttcce->ce.set_state_oneshot = ttc_shutdown;
ttcce->ce.tick_resume = ttc_resume;
ttcce->ce.rating = 200;
ttcce->ce.irq = irq;
ttcce->ce.cpumask = cpu_possible_mask;
/*
* Setup the clock event timer to be an interval timer which
* is prescaled by 32 using the interval interrupt. Leave it
* disabled for now.
*/
writel_relaxed(0x23, ttcce->ttc.base_addr + TTC_CNT_CNTRL_OFFSET);
writel_relaxed(CLK_CNTRL_PRESCALE | CLK_CNTRL_PRESCALE_EN,
ttcce->ttc.base_addr + TTC_CLK_CNTRL_OFFSET);
writel_relaxed(0x1, ttcce->ttc.base_addr + TTC_IER_OFFSET);
err = request_irq(irq, ttc_clock_event_interrupt,
IRQF_TIMER, ttcce->ce.name, ttcce);
if (err)
goto out_kfree;
clockevents_config_and_register(&ttcce->ce,
ttcce->ttc.freq / PRESCALE, 1, 0xfffe);
return 0;
out_kfree:
kfree(ttcce);
return err;
}
static int __init ttc_timer_probe(struct platform_device *pdev)
{
unsigned int irq;
void __iomem *timer_baseaddr;
struct clk *clk_cs, *clk_ce;
static int initialized;
int clksel, ret;
u32 timer_width = 16;
struct device_node *timer = pdev->dev.of_node;
if (initialized)
return 0;
initialized = 1;
/*
* Get the 1st Triple Timer Counter (TTC) block from the device tree
* and use it. Note that the event timer uses the interrupt and it's the
* 2nd TTC hence the irq_of_parse_and_map(,1)
*/
timer_baseaddr = devm_of_iomap(&pdev->dev, timer, 0, NULL);
if (IS_ERR(timer_baseaddr)) {
pr_err("ERROR: invalid timer base address\n");
return PTR_ERR(timer_baseaddr);
}
irq = irq_of_parse_and_map(timer, 1);
if (irq <= 0) {
pr_err("ERROR: invalid interrupt number\n");
return -EINVAL;
}
of_property_read_u32(timer, "timer-width", &timer_width);
clksel = readl_relaxed(timer_baseaddr + TTC_CLK_CNTRL_OFFSET);
clksel = !!(clksel & TTC_CLK_CNTRL_CSRC_MASK);
clk_cs = of_clk_get(timer, clksel);
if (IS_ERR(clk_cs)) {
pr_err("ERROR: timer input clock not found\n");
return PTR_ERR(clk_cs);
}
clksel = readl_relaxed(timer_baseaddr + 4 + TTC_CLK_CNTRL_OFFSET);
clksel = !!(clksel & TTC_CLK_CNTRL_CSRC_MASK);
clk_ce = of_clk_get(timer, clksel);
if (IS_ERR(clk_ce)) {
pr_err("ERROR: timer input clock not found\n");
ret = PTR_ERR(clk_ce);
goto put_clk_cs;
}
ret = ttc_setup_clocksource(clk_cs, timer_baseaddr, timer_width);
if (ret)
goto put_clk_ce;
ret = ttc_setup_clockevent(clk_ce, timer_baseaddr + 4, irq);
if (ret)
goto put_clk_ce;
pr_info("%pOFn #0 at %p, irq=%d\n", timer, timer_baseaddr, irq);
return 0;
put_clk_ce:
clk_put(clk_ce);
put_clk_cs:
clk_put(clk_cs);
return ret;
}
static const struct of_device_id ttc_timer_of_match[] = {
{.compatible = "cdns,ttc"},
{},
};
MODULE_DEVICE_TABLE(of, ttc_timer_of_match);
static struct platform_driver ttc_timer_driver = {
.driver = {
.name = "cdns_ttc_timer",
.of_match_table = ttc_timer_of_match,
},
};
builtin_platform_driver_probe(ttc_timer_driver, ttc_timer_probe);
|
linux-master
|
drivers/clocksource/timer-cadence-ttc.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/drivers/clocksource/dummy_timer.c
*
* Copyright (C) 2013 ARM Ltd.
* All Rights Reserved
*/
#include <linux/clockchips.h>
#include <linux/cpu.h>
#include <linux/init.h>
#include <linux/percpu.h>
#include <linux/cpumask.h>
static DEFINE_PER_CPU(struct clock_event_device, dummy_timer_evt);
static int dummy_timer_starting_cpu(unsigned int cpu)
{
struct clock_event_device *evt = per_cpu_ptr(&dummy_timer_evt, cpu);
evt->name = "dummy_timer";
evt->features = CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_ONESHOT |
CLOCK_EVT_FEAT_DUMMY;
evt->rating = 100;
evt->cpumask = cpumask_of(cpu);
clockevents_register_device(evt);
return 0;
}
static int __init dummy_timer_register(void)
{
return cpuhp_setup_state(CPUHP_AP_DUMMY_TIMER_STARTING,
"clockevents/dummy_timer:starting",
dummy_timer_starting_cpu, NULL);
}
early_initcall(dummy_timer_register);
|
linux-master
|
drivers/clocksource/dummy_timer.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 Altera Corporation
* Copyright (c) 2011 Picochip Ltd., Jamie Iles
*
* Modified from mach-picoxcell/time.c
*/
#include <linux/delay.h>
#include <linux/dw_apb_timer.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/clk.h>
#include <linux/reset.h>
#include <linux/sched_clock.h>
static int __init timer_get_base_and_rate(struct device_node *np,
void __iomem **base, u32 *rate)
{
struct clk *timer_clk;
struct clk *pclk;
struct reset_control *rstc;
int ret;
*base = of_iomap(np, 0);
if (!*base)
panic("Unable to map regs for %pOFn", np);
/*
* Reset the timer if the reset control is available, wiping
* out the state the firmware may have left it
*/
rstc = of_reset_control_get(np, NULL);
if (!IS_ERR(rstc)) {
reset_control_assert(rstc);
reset_control_deassert(rstc);
}
/*
* Not all implementations use a peripheral clock, so don't panic
* if it's not present
*/
pclk = of_clk_get_by_name(np, "pclk");
if (!IS_ERR(pclk))
if (clk_prepare_enable(pclk))
pr_warn("pclk for %pOFn is present, but could not be activated\n",
np);
if (!of_property_read_u32(np, "clock-freq", rate) ||
!of_property_read_u32(np, "clock-frequency", rate))
return 0;
timer_clk = of_clk_get_by_name(np, "timer");
if (IS_ERR(timer_clk)) {
ret = PTR_ERR(timer_clk);
goto out_pclk_disable;
}
ret = clk_prepare_enable(timer_clk);
if (ret)
goto out_timer_clk_put;
*rate = clk_get_rate(timer_clk);
if (!(*rate)) {
ret = -EINVAL;
goto out_timer_clk_disable;
}
return 0;
out_timer_clk_disable:
clk_disable_unprepare(timer_clk);
out_timer_clk_put:
clk_put(timer_clk);
out_pclk_disable:
if (!IS_ERR(pclk)) {
clk_disable_unprepare(pclk);
clk_put(pclk);
}
iounmap(*base);
return ret;
}
static int __init add_clockevent(struct device_node *event_timer)
{
void __iomem *iobase;
struct dw_apb_clock_event_device *ced;
u32 irq, rate;
int ret = 0;
irq = irq_of_parse_and_map(event_timer, 0);
if (irq == 0)
panic("No IRQ for clock event timer");
ret = timer_get_base_and_rate(event_timer, &iobase, &rate);
if (ret)
return ret;
ced = dw_apb_clockevent_init(-1, event_timer->name, 300, iobase, irq,
rate);
if (!ced)
return -EINVAL;
dw_apb_clockevent_register(ced);
return 0;
}
static void __iomem *sched_io_base;
static u32 sched_rate;
static int __init add_clocksource(struct device_node *source_timer)
{
void __iomem *iobase;
struct dw_apb_clocksource *cs;
u32 rate;
int ret;
ret = timer_get_base_and_rate(source_timer, &iobase, &rate);
if (ret)
return ret;
cs = dw_apb_clocksource_init(300, source_timer->name, iobase, rate);
if (!cs)
return -EINVAL;
dw_apb_clocksource_start(cs);
dw_apb_clocksource_register(cs);
/*
* Fallback to use the clocksource as sched_clock if no separate
* timer is found. sched_io_base then points to the current_value
* register of the clocksource timer.
*/
sched_io_base = iobase + 0x04;
sched_rate = rate;
return 0;
}
static u64 notrace read_sched_clock(void)
{
return ~readl_relaxed(sched_io_base);
}
static const struct of_device_id sptimer_ids[] __initconst = {
{ .compatible = "picochip,pc3x2-rtc" },
{ /* Sentinel */ },
};
static void __init init_sched_clock(void)
{
struct device_node *sched_timer;
sched_timer = of_find_matching_node(NULL, sptimer_ids);
if (sched_timer) {
timer_get_base_and_rate(sched_timer, &sched_io_base,
&sched_rate);
of_node_put(sched_timer);
}
sched_clock_register(read_sched_clock, 32, sched_rate);
}
#ifdef CONFIG_ARM
static unsigned long dw_apb_delay_timer_read(void)
{
return ~readl_relaxed(sched_io_base);
}
static struct delay_timer dw_apb_delay_timer = {
.read_current_timer = dw_apb_delay_timer_read,
};
#endif
static int num_called;
static int __init dw_apb_timer_init(struct device_node *timer)
{
int ret = 0;
switch (num_called) {
case 1:
pr_debug("%s: found clocksource timer\n", __func__);
ret = add_clocksource(timer);
if (ret)
return ret;
init_sched_clock();
#ifdef CONFIG_ARM
dw_apb_delay_timer.freq = sched_rate;
register_current_timer_delay(&dw_apb_delay_timer);
#endif
break;
default:
pr_debug("%s: found clockevent timer\n", __func__);
ret = add_clockevent(timer);
if (ret)
return ret;
break;
}
num_called++;
return 0;
}
TIMER_OF_DECLARE(pc3x2_timer, "picochip,pc3x2-timer", dw_apb_timer_init);
TIMER_OF_DECLARE(apb_timer_osc, "snps,dw-apb-timer-osc", dw_apb_timer_init);
TIMER_OF_DECLARE(apb_timer_sp, "snps,dw-apb-timer-sp", dw_apb_timer_init);
TIMER_OF_DECLARE(apb_timer, "snps,dw-apb-timer", dw_apb_timer_init);
|
linux-master
|
drivers/clocksource/dw_apb_timer_of.c
|
// SPDX-License-Identifier: GPL-2.0+
//
// Copyright 2017-2019 NXP
#include <linux/interrupt.h>
#include <linux/clockchips.h>
#include "timer-of.h"
#define CMP_OFFSET 0x10000
#define CNTCV_LO 0x8
#define CNTCV_HI 0xc
#define CMPCV_LO (CMP_OFFSET + 0x20)
#define CMPCV_HI (CMP_OFFSET + 0x24)
#define CMPCR (CMP_OFFSET + 0x2c)
#define SYS_CTR_EN 0x1
#define SYS_CTR_IRQ_MASK 0x2
#define SYS_CTR_CLK_DIV 0x3
static void __iomem *sys_ctr_base __ro_after_init;
static u32 cmpcr __ro_after_init;
static void sysctr_timer_enable(bool enable)
{
writel(enable ? cmpcr | SYS_CTR_EN : cmpcr, sys_ctr_base + CMPCR);
}
static void sysctr_irq_acknowledge(void)
{
/*
* clear the enable bit(EN =0) will clear
* the status bit(ISTAT = 0), then the interrupt
* signal will be negated(acknowledged).
*/
sysctr_timer_enable(false);
}
static inline u64 sysctr_read_counter(void)
{
u32 cnt_hi, tmp_hi, cnt_lo;
do {
cnt_hi = readl_relaxed(sys_ctr_base + CNTCV_HI);
cnt_lo = readl_relaxed(sys_ctr_base + CNTCV_LO);
tmp_hi = readl_relaxed(sys_ctr_base + CNTCV_HI);
} while (tmp_hi != cnt_hi);
return ((u64) cnt_hi << 32) | cnt_lo;
}
static int sysctr_set_next_event(unsigned long delta,
struct clock_event_device *evt)
{
u32 cmp_hi, cmp_lo;
u64 next;
sysctr_timer_enable(false);
next = sysctr_read_counter();
next += delta;
cmp_hi = (next >> 32) & 0x00fffff;
cmp_lo = next & 0xffffffff;
writel_relaxed(cmp_hi, sys_ctr_base + CMPCV_HI);
writel_relaxed(cmp_lo, sys_ctr_base + CMPCV_LO);
sysctr_timer_enable(true);
return 0;
}
static int sysctr_set_state_oneshot(struct clock_event_device *evt)
{
return 0;
}
static int sysctr_set_state_shutdown(struct clock_event_device *evt)
{
sysctr_timer_enable(false);
return 0;
}
static irqreturn_t sysctr_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evt = dev_id;
sysctr_irq_acknowledge();
evt->event_handler(evt);
return IRQ_HANDLED;
}
static struct timer_of to_sysctr = {
.flags = TIMER_OF_IRQ | TIMER_OF_CLOCK | TIMER_OF_BASE,
.clkevt = {
.name = "i.MX system counter timer",
.features = CLOCK_EVT_FEAT_ONESHOT |
CLOCK_EVT_FEAT_DYNIRQ,
.set_state_oneshot = sysctr_set_state_oneshot,
.set_next_event = sysctr_set_next_event,
.set_state_shutdown = sysctr_set_state_shutdown,
.rating = 200,
},
.of_irq = {
.handler = sysctr_timer_interrupt,
.flags = IRQF_TIMER,
},
.of_clk = {
.name = "per",
},
};
static void __init sysctr_clockevent_init(void)
{
to_sysctr.clkevt.cpumask = cpu_possible_mask;
clockevents_config_and_register(&to_sysctr.clkevt,
timer_of_rate(&to_sysctr),
0xff, 0x7fffffff);
}
static int __init sysctr_timer_init(struct device_node *np)
{
int ret = 0;
ret = timer_of_init(np, &to_sysctr);
if (ret)
return ret;
if (!of_property_read_bool(np, "nxp,no-divider")) {
/* system counter clock is divided by 3 internally */
to_sysctr.of_clk.rate /= SYS_CTR_CLK_DIV;
}
sys_ctr_base = timer_of_base(&to_sysctr);
cmpcr = readl(sys_ctr_base + CMPCR);
cmpcr &= ~SYS_CTR_EN;
sysctr_clockevent_init();
return 0;
}
TIMER_OF_DECLARE(sysctr_timer, "nxp,sysctr-timer", sysctr_timer_init);
|
linux-master
|
drivers/clocksource/timer-imx-sysctr.c
|
// SPDX-License-Identifier: GPL-2.0+
//
// Copyright (C) 2000-2001 Deep Blue Solutions
// Copyright (C) 2002 Shane Nay ([email protected])
// Copyright (C) 2006-2007 Pavel Pisa ([email protected])
// Copyright (C) 2008 Juergen Beisert ([email protected])
// Copyright (C) 2010 Freescale Semiconductor, Inc. All Rights Reserved.
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/clockchips.h>
#include <linux/clk.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/stmp_device.h>
#include <linux/sched_clock.h>
/*
* There are 2 versions of the timrot on Freescale MXS-based SoCs.
* The v1 on MX23 only gets 16 bits counter, while v2 on MX28
* extends the counter to 32 bits.
*
* The implementation uses two timers, one for clock_event and
* another for clocksource. MX28 uses timrot 0 and 1, while MX23
* uses 0 and 2.
*/
#define MX23_TIMROT_VERSION_OFFSET 0x0a0
#define MX28_TIMROT_VERSION_OFFSET 0x120
#define BP_TIMROT_MAJOR_VERSION 24
#define BV_TIMROT_VERSION_1 0x01
#define BV_TIMROT_VERSION_2 0x02
#define timrot_is_v1() (timrot_major_version == BV_TIMROT_VERSION_1)
/*
* There are 4 registers for each timrotv2 instance, and 2 registers
* for each timrotv1. So address step 0x40 in macros below strides
* one instance of timrotv2 while two instances of timrotv1.
*
* As the result, HW_TIMROT_XXXn(1) defines the address of timrot1
* on MX28 while timrot2 on MX23.
*/
/* common between v1 and v2 */
#define HW_TIMROT_ROTCTRL 0x00
#define HW_TIMROT_TIMCTRLn(n) (0x20 + (n) * 0x40)
/* v1 only */
#define HW_TIMROT_TIMCOUNTn(n) (0x30 + (n) * 0x40)
/* v2 only */
#define HW_TIMROT_RUNNING_COUNTn(n) (0x30 + (n) * 0x40)
#define HW_TIMROT_FIXED_COUNTn(n) (0x40 + (n) * 0x40)
#define BM_TIMROT_TIMCTRLn_RELOAD (1 << 6)
#define BM_TIMROT_TIMCTRLn_UPDATE (1 << 7)
#define BM_TIMROT_TIMCTRLn_IRQ_EN (1 << 14)
#define BM_TIMROT_TIMCTRLn_IRQ (1 << 15)
#define BP_TIMROT_TIMCTRLn_SELECT 0
#define BV_TIMROTv1_TIMCTRLn_SELECT__32KHZ_XTAL 0x8
#define BV_TIMROTv2_TIMCTRLn_SELECT__32KHZ_XTAL 0xb
#define BV_TIMROTv2_TIMCTRLn_SELECT__TICK_ALWAYS 0xf
static struct clock_event_device mxs_clockevent_device;
static void __iomem *mxs_timrot_base;
static u32 timrot_major_version;
static inline void timrot_irq_disable(void)
{
__raw_writel(BM_TIMROT_TIMCTRLn_IRQ_EN, mxs_timrot_base +
HW_TIMROT_TIMCTRLn(0) + STMP_OFFSET_REG_CLR);
}
static inline void timrot_irq_enable(void)
{
__raw_writel(BM_TIMROT_TIMCTRLn_IRQ_EN, mxs_timrot_base +
HW_TIMROT_TIMCTRLn(0) + STMP_OFFSET_REG_SET);
}
static void timrot_irq_acknowledge(void)
{
__raw_writel(BM_TIMROT_TIMCTRLn_IRQ, mxs_timrot_base +
HW_TIMROT_TIMCTRLn(0) + STMP_OFFSET_REG_CLR);
}
static u64 timrotv1_get_cycles(struct clocksource *cs)
{
return ~((__raw_readl(mxs_timrot_base + HW_TIMROT_TIMCOUNTn(1))
& 0xffff0000) >> 16);
}
static int timrotv1_set_next_event(unsigned long evt,
struct clock_event_device *dev)
{
/* timrot decrements the count */
__raw_writel(evt, mxs_timrot_base + HW_TIMROT_TIMCOUNTn(0));
return 0;
}
static int timrotv2_set_next_event(unsigned long evt,
struct clock_event_device *dev)
{
/* timrot decrements the count */
__raw_writel(evt, mxs_timrot_base + HW_TIMROT_FIXED_COUNTn(0));
return 0;
}
static irqreturn_t mxs_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evt = dev_id;
timrot_irq_acknowledge();
evt->event_handler(evt);
return IRQ_HANDLED;
}
static void mxs_irq_clear(char *state)
{
/* Disable interrupt in timer module */
timrot_irq_disable();
/* Set event time into the furthest future */
if (timrot_is_v1())
__raw_writel(0xffff, mxs_timrot_base + HW_TIMROT_TIMCOUNTn(1));
else
__raw_writel(0xffffffff,
mxs_timrot_base + HW_TIMROT_FIXED_COUNTn(1));
/* Clear pending interrupt */
timrot_irq_acknowledge();
pr_debug("%s: changing mode to %s\n", __func__, state);
}
static int mxs_shutdown(struct clock_event_device *evt)
{
mxs_irq_clear("shutdown");
return 0;
}
static int mxs_set_oneshot(struct clock_event_device *evt)
{
if (clockevent_state_oneshot(evt))
mxs_irq_clear("oneshot");
timrot_irq_enable();
return 0;
}
static struct clock_event_device mxs_clockevent_device = {
.name = "mxs_timrot",
.features = CLOCK_EVT_FEAT_ONESHOT,
.set_state_shutdown = mxs_shutdown,
.set_state_oneshot = mxs_set_oneshot,
.tick_resume = mxs_shutdown,
.set_next_event = timrotv2_set_next_event,
.rating = 200,
};
static int __init mxs_clockevent_init(struct clk *timer_clk)
{
if (timrot_is_v1())
mxs_clockevent_device.set_next_event = timrotv1_set_next_event;
mxs_clockevent_device.cpumask = cpumask_of(0);
clockevents_config_and_register(&mxs_clockevent_device,
clk_get_rate(timer_clk),
timrot_is_v1() ? 0xf : 0x2,
timrot_is_v1() ? 0xfffe : 0xfffffffe);
return 0;
}
static struct clocksource clocksource_mxs = {
.name = "mxs_timer",
.rating = 200,
.read = timrotv1_get_cycles,
.mask = CLOCKSOURCE_MASK(16),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static u64 notrace mxs_read_sched_clock_v2(void)
{
return ~readl_relaxed(mxs_timrot_base + HW_TIMROT_RUNNING_COUNTn(1));
}
static int __init mxs_clocksource_init(struct clk *timer_clk)
{
unsigned int c = clk_get_rate(timer_clk);
if (timrot_is_v1())
clocksource_register_hz(&clocksource_mxs, c);
else {
clocksource_mmio_init(mxs_timrot_base + HW_TIMROT_RUNNING_COUNTn(1),
"mxs_timer", c, 200, 32, clocksource_mmio_readl_down);
sched_clock_register(mxs_read_sched_clock_v2, 32, c);
}
return 0;
}
static int __init mxs_timer_init(struct device_node *np)
{
struct clk *timer_clk;
int irq, ret;
mxs_timrot_base = of_iomap(np, 0);
WARN_ON(!mxs_timrot_base);
timer_clk = of_clk_get(np, 0);
if (IS_ERR(timer_clk)) {
pr_err("%s: failed to get clk\n", __func__);
return PTR_ERR(timer_clk);
}
ret = clk_prepare_enable(timer_clk);
if (ret)
return ret;
/*
* Initialize timers to a known state
*/
stmp_reset_block(mxs_timrot_base + HW_TIMROT_ROTCTRL);
/* get timrot version */
timrot_major_version = __raw_readl(mxs_timrot_base +
(of_device_is_compatible(np, "fsl,imx23-timrot") ?
MX23_TIMROT_VERSION_OFFSET :
MX28_TIMROT_VERSION_OFFSET));
timrot_major_version >>= BP_TIMROT_MAJOR_VERSION;
/* one for clock_event */
__raw_writel((timrot_is_v1() ?
BV_TIMROTv1_TIMCTRLn_SELECT__32KHZ_XTAL :
BV_TIMROTv2_TIMCTRLn_SELECT__TICK_ALWAYS) |
BM_TIMROT_TIMCTRLn_UPDATE |
BM_TIMROT_TIMCTRLn_IRQ_EN,
mxs_timrot_base + HW_TIMROT_TIMCTRLn(0));
/* another for clocksource */
__raw_writel((timrot_is_v1() ?
BV_TIMROTv1_TIMCTRLn_SELECT__32KHZ_XTAL :
BV_TIMROTv2_TIMCTRLn_SELECT__TICK_ALWAYS) |
BM_TIMROT_TIMCTRLn_RELOAD,
mxs_timrot_base + HW_TIMROT_TIMCTRLn(1));
/* set clocksource timer fixed count to the maximum */
if (timrot_is_v1())
__raw_writel(0xffff,
mxs_timrot_base + HW_TIMROT_TIMCOUNTn(1));
else
__raw_writel(0xffffffff,
mxs_timrot_base + HW_TIMROT_FIXED_COUNTn(1));
/* init and register the timer to the framework */
ret = mxs_clocksource_init(timer_clk);
if (ret)
return ret;
ret = mxs_clockevent_init(timer_clk);
if (ret)
return ret;
/* Make irqs happen */
irq = irq_of_parse_and_map(np, 0);
if (irq <= 0)
return -EINVAL;
return request_irq(irq, mxs_timer_interrupt, IRQF_TIMER | IRQF_IRQPOLL,
"MXS Timer Tick", &mxs_clockevent_device);
}
TIMER_OF_DECLARE(mxs, "fsl,timrot", mxs_timer_init);
|
linux-master
|
drivers/clocksource/mxs_timer.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) STMicroelectronics 2019 - All Rights Reserved
* Authors: Benjamin Gaignard <[email protected]> for STMicroelectronics.
* Pascal Paillet <[email protected]> for STMicroelectronics.
*/
#include <linux/clk.h>
#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/mfd/stm32-lptimer.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/pm_wakeirq.h>
#define CFGR_PSC_OFFSET 9
#define STM32_LP_RATING 1000
#define STM32_TARGET_CLKRATE (32000 * HZ)
#define STM32_LP_MAX_PSC 7
struct stm32_lp_private {
struct regmap *reg;
struct clock_event_device clkevt;
unsigned long period;
struct device *dev;
};
static struct stm32_lp_private*
to_priv(struct clock_event_device *clkevt)
{
return container_of(clkevt, struct stm32_lp_private, clkevt);
}
static int stm32_clkevent_lp_shutdown(struct clock_event_device *clkevt)
{
struct stm32_lp_private *priv = to_priv(clkevt);
regmap_write(priv->reg, STM32_LPTIM_CR, 0);
regmap_write(priv->reg, STM32_LPTIM_IER, 0);
/* clear pending flags */
regmap_write(priv->reg, STM32_LPTIM_ICR, STM32_LPTIM_ARRMCF);
return 0;
}
static int stm32_clkevent_lp_set_timer(unsigned long evt,
struct clock_event_device *clkevt,
int is_periodic)
{
struct stm32_lp_private *priv = to_priv(clkevt);
/* disable LPTIMER to be able to write into IER register*/
regmap_write(priv->reg, STM32_LPTIM_CR, 0);
/* enable ARR interrupt */
regmap_write(priv->reg, STM32_LPTIM_IER, STM32_LPTIM_ARRMIE);
/* enable LPTIMER to be able to write into ARR register */
regmap_write(priv->reg, STM32_LPTIM_CR, STM32_LPTIM_ENABLE);
/* set next event counter */
regmap_write(priv->reg, STM32_LPTIM_ARR, evt);
/* start counter */
if (is_periodic)
regmap_write(priv->reg, STM32_LPTIM_CR,
STM32_LPTIM_CNTSTRT | STM32_LPTIM_ENABLE);
else
regmap_write(priv->reg, STM32_LPTIM_CR,
STM32_LPTIM_SNGSTRT | STM32_LPTIM_ENABLE);
return 0;
}
static int stm32_clkevent_lp_set_next_event(unsigned long evt,
struct clock_event_device *clkevt)
{
return stm32_clkevent_lp_set_timer(evt, clkevt,
clockevent_state_periodic(clkevt));
}
static int stm32_clkevent_lp_set_periodic(struct clock_event_device *clkevt)
{
struct stm32_lp_private *priv = to_priv(clkevt);
return stm32_clkevent_lp_set_timer(priv->period, clkevt, true);
}
static int stm32_clkevent_lp_set_oneshot(struct clock_event_device *clkevt)
{
struct stm32_lp_private *priv = to_priv(clkevt);
return stm32_clkevent_lp_set_timer(priv->period, clkevt, false);
}
static irqreturn_t stm32_clkevent_lp_irq_handler(int irq, void *dev_id)
{
struct clock_event_device *clkevt = (struct clock_event_device *)dev_id;
struct stm32_lp_private *priv = to_priv(clkevt);
regmap_write(priv->reg, STM32_LPTIM_ICR, STM32_LPTIM_ARRMCF);
if (clkevt->event_handler)
clkevt->event_handler(clkevt);
return IRQ_HANDLED;
}
static void stm32_clkevent_lp_set_prescaler(struct stm32_lp_private *priv,
unsigned long *rate)
{
int i;
for (i = 0; i <= STM32_LP_MAX_PSC; i++) {
if (DIV_ROUND_CLOSEST(*rate, 1 << i) < STM32_TARGET_CLKRATE)
break;
}
regmap_write(priv->reg, STM32_LPTIM_CFGR, i << CFGR_PSC_OFFSET);
/* Adjust rate and period given the prescaler value */
*rate = DIV_ROUND_CLOSEST(*rate, (1 << i));
priv->period = DIV_ROUND_UP(*rate, HZ);
}
static void stm32_clkevent_lp_init(struct stm32_lp_private *priv,
struct device_node *np, unsigned long rate)
{
priv->clkevt.name = np->full_name;
priv->clkevt.cpumask = cpu_possible_mask;
priv->clkevt.features = CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_ONESHOT;
priv->clkevt.set_state_shutdown = stm32_clkevent_lp_shutdown;
priv->clkevt.set_state_periodic = stm32_clkevent_lp_set_periodic;
priv->clkevt.set_state_oneshot = stm32_clkevent_lp_set_oneshot;
priv->clkevt.set_next_event = stm32_clkevent_lp_set_next_event;
priv->clkevt.rating = STM32_LP_RATING;
clockevents_config_and_register(&priv->clkevt, rate, 0x1,
STM32_LPTIM_MAX_ARR);
}
static int stm32_clkevent_lp_probe(struct platform_device *pdev)
{
struct stm32_lptimer *ddata = dev_get_drvdata(pdev->dev.parent);
struct stm32_lp_private *priv;
unsigned long rate;
int ret, irq;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->reg = ddata->regmap;
ret = clk_prepare_enable(ddata->clk);
if (ret)
return -EINVAL;
rate = clk_get_rate(ddata->clk);
if (!rate) {
ret = -EINVAL;
goto out_clk_disable;
}
irq = platform_get_irq(to_platform_device(pdev->dev.parent), 0);
if (irq <= 0) {
ret = irq;
goto out_clk_disable;
}
if (of_property_read_bool(pdev->dev.parent->of_node, "wakeup-source")) {
ret = device_init_wakeup(&pdev->dev, true);
if (ret)
goto out_clk_disable;
ret = dev_pm_set_wake_irq(&pdev->dev, irq);
if (ret)
goto out_clk_disable;
}
ret = devm_request_irq(&pdev->dev, irq, stm32_clkevent_lp_irq_handler,
IRQF_TIMER, pdev->name, &priv->clkevt);
if (ret)
goto out_clk_disable;
stm32_clkevent_lp_set_prescaler(priv, &rate);
stm32_clkevent_lp_init(priv, pdev->dev.parent->of_node, rate);
priv->dev = &pdev->dev;
return 0;
out_clk_disable:
clk_disable_unprepare(ddata->clk);
return ret;
}
static const struct of_device_id stm32_clkevent_lp_of_match[] = {
{ .compatible = "st,stm32-lptimer-timer", },
{},
};
MODULE_DEVICE_TABLE(of, stm32_clkevent_lp_of_match);
static struct platform_driver stm32_clkevent_lp_driver = {
.probe = stm32_clkevent_lp_probe,
.driver = {
.name = "stm32-lptimer-timer",
.of_match_table = stm32_clkevent_lp_of_match,
.suppress_bind_attrs = true,
},
};
module_platform_driver(stm32_clkevent_lp_driver);
MODULE_ALIAS("platform:stm32-lptimer-timer");
MODULE_DESCRIPTION("STMicroelectronics STM32 clockevent low power driver");
|
linux-master
|
drivers/clocksource/timer-stm32-lp.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* ADGS1408/ADGS1409 SPI MUX driver
*
* Copyright 2018 Analog Devices Inc.
*/
#include <linux/err.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/mux/driver.h>
#include <linux/property.h>
#include <linux/spi/spi.h>
#define ADGS1408_SW_DATA (0x01)
#define ADGS1408_REG_READ(reg) ((reg) | 0x80)
#define ADGS1408_DISABLE (0x00)
#define ADGS1408_MUX(state) (((state) << 1) | 1)
enum adgs1408_chip_id {
ADGS1408 = 1,
ADGS1409,
};
static int adgs1408_spi_reg_write(struct spi_device *spi,
u8 reg_addr, u8 reg_data)
{
u8 tx_buf[2];
tx_buf[0] = reg_addr;
tx_buf[1] = reg_data;
return spi_write_then_read(spi, tx_buf, sizeof(tx_buf), NULL, 0);
}
static int adgs1408_set(struct mux_control *mux, int state)
{
struct spi_device *spi = to_spi_device(mux->chip->dev.parent);
u8 reg;
if (state == MUX_IDLE_DISCONNECT)
reg = ADGS1408_DISABLE;
else
reg = ADGS1408_MUX(state);
return adgs1408_spi_reg_write(spi, ADGS1408_SW_DATA, reg);
}
static const struct mux_control_ops adgs1408_ops = {
.set = adgs1408_set,
};
static int adgs1408_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
enum adgs1408_chip_id chip_id;
struct mux_chip *mux_chip;
struct mux_control *mux;
s32 idle_state;
int ret;
chip_id = (enum adgs1408_chip_id)device_get_match_data(dev);
if (!chip_id)
chip_id = spi_get_device_id(spi)->driver_data;
mux_chip = devm_mux_chip_alloc(dev, 1, 0);
if (IS_ERR(mux_chip))
return PTR_ERR(mux_chip);
mux_chip->ops = &adgs1408_ops;
ret = adgs1408_spi_reg_write(spi, ADGS1408_SW_DATA, ADGS1408_DISABLE);
if (ret < 0)
return ret;
ret = device_property_read_u32(dev, "idle-state", (u32 *)&idle_state);
if (ret < 0)
idle_state = MUX_IDLE_AS_IS;
mux = mux_chip->mux;
if (chip_id == ADGS1408)
mux->states = 8;
else
mux->states = 4;
switch (idle_state) {
case MUX_IDLE_DISCONNECT:
case MUX_IDLE_AS_IS:
case 0 ... 7:
/* adgs1409 supports only 4 states */
if (idle_state < mux->states) {
mux->idle_state = idle_state;
break;
}
fallthrough;
default:
dev_err(dev, "invalid idle-state %d\n", idle_state);
return -EINVAL;
}
return devm_mux_chip_register(dev, mux_chip);
}
static const struct spi_device_id adgs1408_spi_id[] = {
{ "adgs1408", ADGS1408 },
{ "adgs1409", ADGS1409 },
{ }
};
MODULE_DEVICE_TABLE(spi, adgs1408_spi_id);
static const struct of_device_id adgs1408_of_match[] = {
{ .compatible = "adi,adgs1408", .data = (void *)ADGS1408, },
{ .compatible = "adi,adgs1409", .data = (void *)ADGS1409, },
{ }
};
MODULE_DEVICE_TABLE(of, adgs1408_of_match);
static struct spi_driver adgs1408_driver = {
.driver = {
.name = "adgs1408",
.of_match_table = adgs1408_of_match,
},
.probe = adgs1408_probe,
.id_table = adgs1408_spi_id,
};
module_spi_driver(adgs1408_driver);
MODULE_AUTHOR("Mircea Caprioru <[email protected]>");
MODULE_DESCRIPTION("Analog Devices ADGS1408 MUX driver");
MODULE_LICENSE("GPL");
|
linux-master
|
drivers/mux/adgs1408.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* GPIO-controlled multiplexer driver
*
* Copyright (C) 2017 Axentia Technologies AB
*
* Author: Peter Rosin <[email protected]>
*/
#include <linux/bitmap.h>
#include <linux/err.h>
#include <linux/gpio/consumer.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/mux/driver.h>
#include <linux/platform_device.h>
#include <linux/property.h>
struct mux_gpio {
struct gpio_descs *gpios;
};
static int mux_gpio_set(struct mux_control *mux, int state)
{
struct mux_gpio *mux_gpio = mux_chip_priv(mux->chip);
DECLARE_BITMAP(values, BITS_PER_TYPE(state));
u32 value = state;
bitmap_from_arr32(values, &value, BITS_PER_TYPE(value));
gpiod_set_array_value_cansleep(mux_gpio->gpios->ndescs,
mux_gpio->gpios->desc,
mux_gpio->gpios->info, values);
return 0;
}
static const struct mux_control_ops mux_gpio_ops = {
.set = mux_gpio_set,
};
static const struct of_device_id mux_gpio_dt_ids[] = {
{ .compatible = "gpio-mux", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, mux_gpio_dt_ids);
static int mux_gpio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mux_chip *mux_chip;
struct mux_gpio *mux_gpio;
int pins;
s32 idle_state;
int ret;
pins = gpiod_count(dev, "mux");
if (pins < 0)
return pins;
mux_chip = devm_mux_chip_alloc(dev, 1, sizeof(*mux_gpio));
if (IS_ERR(mux_chip))
return PTR_ERR(mux_chip);
mux_gpio = mux_chip_priv(mux_chip);
mux_chip->ops = &mux_gpio_ops;
mux_gpio->gpios = devm_gpiod_get_array(dev, "mux", GPIOD_OUT_LOW);
if (IS_ERR(mux_gpio->gpios))
return dev_err_probe(dev, PTR_ERR(mux_gpio->gpios),
"failed to get gpios\n");
WARN_ON(pins != mux_gpio->gpios->ndescs);
mux_chip->mux->states = BIT(pins);
ret = device_property_read_u32(dev, "idle-state", (u32 *)&idle_state);
if (ret >= 0 && idle_state != MUX_IDLE_AS_IS) {
if (idle_state < 0 || idle_state >= mux_chip->mux->states) {
dev_err(dev, "invalid idle-state %u\n", idle_state);
return -EINVAL;
}
mux_chip->mux->idle_state = idle_state;
}
ret = devm_mux_chip_register(dev, mux_chip);
if (ret < 0)
return ret;
dev_info(dev, "%u-way mux-controller registered\n",
mux_chip->mux->states);
return 0;
}
static struct platform_driver mux_gpio_driver = {
.driver = {
.name = "gpio-mux",
.of_match_table = mux_gpio_dt_ids,
},
.probe = mux_gpio_probe,
};
module_platform_driver(mux_gpio_driver);
MODULE_DESCRIPTION("GPIO-controlled multiplexer driver");
MODULE_AUTHOR("Peter Rosin <[email protected]>");
MODULE_LICENSE("GPL v2");
|
linux-master
|
drivers/mux/gpio.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* Multiplexer driver for Analog Devices ADG792A/G Triple 4:1 mux
*
* Copyright (C) 2017 Axentia Technologies AB
*
* Author: Peter Rosin <[email protected]>
*/
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/mux/driver.h>
#include <linux/property.h>
#define ADG792A_LDSW BIT(0)
#define ADG792A_RESETB BIT(1)
#define ADG792A_DISABLE(mux) (0x50 | (mux))
#define ADG792A_DISABLE_ALL (0x5f)
#define ADG792A_MUX(mux, state) (0xc0 | (((mux) + 1) << 2) | (state))
#define ADG792A_MUX_ALL(state) (0xc0 | (state))
static int adg792a_write_cmd(struct i2c_client *i2c, u8 cmd, int reset)
{
u8 data = ADG792A_RESETB | ADG792A_LDSW;
/* ADG792A_RESETB is active low, the chip resets when it is zero. */
if (reset)
data &= ~ADG792A_RESETB;
return i2c_smbus_write_byte_data(i2c, cmd, data);
}
static int adg792a_set(struct mux_control *mux, int state)
{
struct i2c_client *i2c = to_i2c_client(mux->chip->dev.parent);
u8 cmd;
if (mux->chip->controllers == 1) {
/* parallel mux controller operation */
if (state == MUX_IDLE_DISCONNECT)
cmd = ADG792A_DISABLE_ALL;
else
cmd = ADG792A_MUX_ALL(state);
} else {
unsigned int controller = mux_control_get_index(mux);
if (state == MUX_IDLE_DISCONNECT)
cmd = ADG792A_DISABLE(controller);
else
cmd = ADG792A_MUX(controller, state);
}
return adg792a_write_cmd(i2c, cmd, 0);
}
static const struct mux_control_ops adg792a_ops = {
.set = adg792a_set,
};
static int adg792a_probe(struct i2c_client *i2c)
{
struct device *dev = &i2c->dev;
struct mux_chip *mux_chip;
s32 idle_state[3];
u32 cells;
int ret;
int i;
if (!i2c_check_functionality(i2c->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -ENODEV;
ret = device_property_read_u32(dev, "#mux-control-cells", &cells);
if (ret < 0)
return ret;
if (cells >= 2)
return -EINVAL;
mux_chip = devm_mux_chip_alloc(dev, cells ? 3 : 1, 0);
if (IS_ERR(mux_chip))
return PTR_ERR(mux_chip);
mux_chip->ops = &adg792a_ops;
ret = adg792a_write_cmd(i2c, ADG792A_DISABLE_ALL, 1);
if (ret < 0)
return ret;
ret = device_property_read_u32_array(dev, "idle-state",
(u32 *)idle_state,
mux_chip->controllers);
if (ret < 0) {
idle_state[0] = MUX_IDLE_AS_IS;
idle_state[1] = MUX_IDLE_AS_IS;
idle_state[2] = MUX_IDLE_AS_IS;
}
for (i = 0; i < mux_chip->controllers; ++i) {
struct mux_control *mux = &mux_chip->mux[i];
mux->states = 4;
switch (idle_state[i]) {
case MUX_IDLE_DISCONNECT:
case MUX_IDLE_AS_IS:
case 0 ... 4:
mux->idle_state = idle_state[i];
break;
default:
dev_err(dev, "invalid idle-state %d\n", idle_state[i]);
return -EINVAL;
}
}
ret = devm_mux_chip_register(dev, mux_chip);
if (ret < 0)
return ret;
if (cells)
dev_info(dev, "3x single pole quadruple throw muxes registered\n");
else
dev_info(dev, "triple pole quadruple throw mux registered\n");
return 0;
}
static const struct i2c_device_id adg792a_id[] = {
{ .name = "adg792a", },
{ .name = "adg792g", },
{ }
};
MODULE_DEVICE_TABLE(i2c, adg792a_id);
static const struct of_device_id adg792a_of_match[] = {
{ .compatible = "adi,adg792a", },
{ .compatible = "adi,adg792g", },
{ }
};
MODULE_DEVICE_TABLE(of, adg792a_of_match);
static struct i2c_driver adg792a_driver = {
.driver = {
.name = "adg792a",
.of_match_table = of_match_ptr(adg792a_of_match),
},
.probe = adg792a_probe,
.id_table = adg792a_id,
};
module_i2c_driver(adg792a_driver);
MODULE_DESCRIPTION("Analog Devices ADG792A/G Triple 4:1 mux driver");
MODULE_AUTHOR("Peter Rosin <[email protected]>");
MODULE_LICENSE("GPL v2");
|
linux-master
|
drivers/mux/adg792a.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* Multiplexer subsystem
*
* Copyright (C) 2017 Axentia Technologies AB
*
* Author: Peter Rosin <[email protected]>
*/
#define pr_fmt(fmt) "mux-core: " fmt
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/export.h>
#include <linux/idr.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mux/consumer.h>
#include <linux/mux/driver.h>
#include <linux/of.h>
#include <linux/slab.h>
/*
* The idle-as-is "state" is not an actual state that may be selected, it
* only implies that the state should not be changed. So, use that state
* as indication that the cached state of the multiplexer is unknown.
*/
#define MUX_CACHE_UNKNOWN MUX_IDLE_AS_IS
/**
* struct mux_state - Represents a mux controller state specific to a given
* consumer.
* @mux: Pointer to a mux controller.
* @state: State of the mux to be selected.
*
* This structure is specific to the consumer that acquires it and has
* information specific to that consumer.
*/
struct mux_state {
struct mux_control *mux;
unsigned int state;
};
static struct class mux_class = {
.name = "mux",
};
static DEFINE_IDA(mux_ida);
static int __init mux_init(void)
{
ida_init(&mux_ida);
return class_register(&mux_class);
}
static void __exit mux_exit(void)
{
class_unregister(&mux_class);
ida_destroy(&mux_ida);
}
static void mux_chip_release(struct device *dev)
{
struct mux_chip *mux_chip = to_mux_chip(dev);
ida_simple_remove(&mux_ida, mux_chip->id);
kfree(mux_chip);
}
static const struct device_type mux_type = {
.name = "mux-chip",
.release = mux_chip_release,
};
/**
* mux_chip_alloc() - Allocate a mux-chip.
* @dev: The parent device implementing the mux interface.
* @controllers: The number of mux controllers to allocate for this chip.
* @sizeof_priv: Size of extra memory area for private use by the caller.
*
* After allocating the mux-chip with the desired number of mux controllers
* but before registering the chip, the mux driver is required to configure
* the number of valid mux states in the mux_chip->mux[N].states members and
* the desired idle state in the returned mux_chip->mux[N].idle_state members.
* The default idle state is MUX_IDLE_AS_IS. The mux driver also needs to
* provide a pointer to the operations struct in the mux_chip->ops member
* before registering the mux-chip with mux_chip_register.
*
* Return: A pointer to the new mux-chip, or an ERR_PTR with a negative errno.
*/
struct mux_chip *mux_chip_alloc(struct device *dev,
unsigned int controllers, size_t sizeof_priv)
{
struct mux_chip *mux_chip;
int i;
if (WARN_ON(!dev || !controllers))
return ERR_PTR(-EINVAL);
mux_chip = kzalloc(sizeof(*mux_chip) +
controllers * sizeof(*mux_chip->mux) +
sizeof_priv, GFP_KERNEL);
if (!mux_chip)
return ERR_PTR(-ENOMEM);
mux_chip->mux = (struct mux_control *)(mux_chip + 1);
mux_chip->dev.class = &mux_class;
mux_chip->dev.type = &mux_type;
mux_chip->dev.parent = dev;
mux_chip->dev.of_node = dev->of_node;
dev_set_drvdata(&mux_chip->dev, mux_chip);
mux_chip->id = ida_simple_get(&mux_ida, 0, 0, GFP_KERNEL);
if (mux_chip->id < 0) {
int err = mux_chip->id;
pr_err("muxchipX failed to get a device id\n");
kfree(mux_chip);
return ERR_PTR(err);
}
dev_set_name(&mux_chip->dev, "muxchip%d", mux_chip->id);
mux_chip->controllers = controllers;
for (i = 0; i < controllers; ++i) {
struct mux_control *mux = &mux_chip->mux[i];
mux->chip = mux_chip;
sema_init(&mux->lock, 1);
mux->cached_state = MUX_CACHE_UNKNOWN;
mux->idle_state = MUX_IDLE_AS_IS;
mux->last_change = ktime_get();
}
device_initialize(&mux_chip->dev);
return mux_chip;
}
EXPORT_SYMBOL_GPL(mux_chip_alloc);
static int mux_control_set(struct mux_control *mux, int state)
{
int ret = mux->chip->ops->set(mux, state);
mux->cached_state = ret < 0 ? MUX_CACHE_UNKNOWN : state;
if (ret >= 0)
mux->last_change = ktime_get();
return ret;
}
/**
* mux_chip_register() - Register a mux-chip, thus readying the controllers
* for use.
* @mux_chip: The mux-chip to register.
*
* Do not retry registration of the same mux-chip on failure. You should
* instead put it away with mux_chip_free() and allocate a new one, if you
* for some reason would like to retry registration.
*
* Return: Zero on success or a negative errno on error.
*/
int mux_chip_register(struct mux_chip *mux_chip)
{
int i;
int ret;
for (i = 0; i < mux_chip->controllers; ++i) {
struct mux_control *mux = &mux_chip->mux[i];
if (mux->idle_state == mux->cached_state)
continue;
ret = mux_control_set(mux, mux->idle_state);
if (ret < 0) {
dev_err(&mux_chip->dev, "unable to set idle state\n");
return ret;
}
}
ret = device_add(&mux_chip->dev);
if (ret < 0)
dev_err(&mux_chip->dev,
"device_add failed in %s: %d\n", __func__, ret);
return ret;
}
EXPORT_SYMBOL_GPL(mux_chip_register);
/**
* mux_chip_unregister() - Take the mux-chip off-line.
* @mux_chip: The mux-chip to unregister.
*
* mux_chip_unregister() reverses the effects of mux_chip_register().
* But not completely, you should not try to call mux_chip_register()
* on a mux-chip that has been registered before.
*/
void mux_chip_unregister(struct mux_chip *mux_chip)
{
device_del(&mux_chip->dev);
}
EXPORT_SYMBOL_GPL(mux_chip_unregister);
/**
* mux_chip_free() - Free the mux-chip for good.
* @mux_chip: The mux-chip to free.
*
* mux_chip_free() reverses the effects of mux_chip_alloc().
*/
void mux_chip_free(struct mux_chip *mux_chip)
{
if (!mux_chip)
return;
put_device(&mux_chip->dev);
}
EXPORT_SYMBOL_GPL(mux_chip_free);
static void devm_mux_chip_release(struct device *dev, void *res)
{
struct mux_chip *mux_chip = *(struct mux_chip **)res;
mux_chip_free(mux_chip);
}
/**
* devm_mux_chip_alloc() - Resource-managed version of mux_chip_alloc().
* @dev: The parent device implementing the mux interface.
* @controllers: The number of mux controllers to allocate for this chip.
* @sizeof_priv: Size of extra memory area for private use by the caller.
*
* See mux_chip_alloc() for more details.
*
* Return: A pointer to the new mux-chip, or an ERR_PTR with a negative errno.
*/
struct mux_chip *devm_mux_chip_alloc(struct device *dev,
unsigned int controllers,
size_t sizeof_priv)
{
struct mux_chip **ptr, *mux_chip;
ptr = devres_alloc(devm_mux_chip_release, sizeof(*ptr), GFP_KERNEL);
if (!ptr)
return ERR_PTR(-ENOMEM);
mux_chip = mux_chip_alloc(dev, controllers, sizeof_priv);
if (IS_ERR(mux_chip)) {
devres_free(ptr);
return mux_chip;
}
*ptr = mux_chip;
devres_add(dev, ptr);
return mux_chip;
}
EXPORT_SYMBOL_GPL(devm_mux_chip_alloc);
static void devm_mux_chip_reg_release(struct device *dev, void *res)
{
struct mux_chip *mux_chip = *(struct mux_chip **)res;
mux_chip_unregister(mux_chip);
}
/**
* devm_mux_chip_register() - Resource-managed version mux_chip_register().
* @dev: The parent device implementing the mux interface.
* @mux_chip: The mux-chip to register.
*
* See mux_chip_register() for more details.
*
* Return: Zero on success or a negative errno on error.
*/
int devm_mux_chip_register(struct device *dev,
struct mux_chip *mux_chip)
{
struct mux_chip **ptr;
int res;
ptr = devres_alloc(devm_mux_chip_reg_release, sizeof(*ptr), GFP_KERNEL);
if (!ptr)
return -ENOMEM;
res = mux_chip_register(mux_chip);
if (res) {
devres_free(ptr);
return res;
}
*ptr = mux_chip;
devres_add(dev, ptr);
return res;
}
EXPORT_SYMBOL_GPL(devm_mux_chip_register);
/**
* mux_control_states() - Query the number of multiplexer states.
* @mux: The mux-control to query.
*
* Return: The number of multiplexer states.
*/
unsigned int mux_control_states(struct mux_control *mux)
{
return mux->states;
}
EXPORT_SYMBOL_GPL(mux_control_states);
/*
* The mux->lock must be down when calling this function.
*/
static int __mux_control_select(struct mux_control *mux, int state)
{
int ret;
if (WARN_ON(state < 0 || state >= mux->states))
return -EINVAL;
if (mux->cached_state == state)
return 0;
ret = mux_control_set(mux, state);
if (ret >= 0)
return 0;
/* The mux update failed, try to revert if appropriate... */
if (mux->idle_state != MUX_IDLE_AS_IS)
mux_control_set(mux, mux->idle_state);
return ret;
}
static void mux_control_delay(struct mux_control *mux, unsigned int delay_us)
{
ktime_t delayend;
s64 remaining;
if (!delay_us)
return;
delayend = ktime_add_us(mux->last_change, delay_us);
remaining = ktime_us_delta(delayend, ktime_get());
if (remaining > 0)
fsleep(remaining);
}
/**
* mux_control_select_delay() - Select the given multiplexer state.
* @mux: The mux-control to request a change of state from.
* @state: The new requested state.
* @delay_us: The time to delay (in microseconds) if the mux state is changed.
*
* On successfully selecting the mux-control state, it will be locked until
* there is a call to mux_control_deselect(). If the mux-control is already
* selected when mux_control_select() is called, the caller will be blocked
* until mux_control_deselect() or mux_state_deselect() is called (by someone
* else).
*
* Therefore, make sure to call mux_control_deselect() when the operation is
* complete and the mux-control is free for others to use, but do not call
* mux_control_deselect() if mux_control_select() fails.
*
* Return: 0 when the mux-control state has the requested state or a negative
* errno on error.
*/
int mux_control_select_delay(struct mux_control *mux, unsigned int state,
unsigned int delay_us)
{
int ret;
ret = down_killable(&mux->lock);
if (ret < 0)
return ret;
ret = __mux_control_select(mux, state);
if (ret >= 0)
mux_control_delay(mux, delay_us);
if (ret < 0)
up(&mux->lock);
return ret;
}
EXPORT_SYMBOL_GPL(mux_control_select_delay);
/**
* mux_state_select_delay() - Select the given multiplexer state.
* @mstate: The mux-state to select.
* @delay_us: The time to delay (in microseconds) if the mux state is changed.
*
* On successfully selecting the mux-state, its mux-control will be locked
* until there is a call to mux_state_deselect(). If the mux-control is already
* selected when mux_state_select() is called, the caller will be blocked
* until mux_state_deselect() or mux_control_deselect() is called (by someone
* else).
*
* Therefore, make sure to call mux_state_deselect() when the operation is
* complete and the mux-control is free for others to use, but do not call
* mux_state_deselect() if mux_state_select() fails.
*
* Return: 0 when the mux-state has been selected or a negative
* errno on error.
*/
int mux_state_select_delay(struct mux_state *mstate, unsigned int delay_us)
{
return mux_control_select_delay(mstate->mux, mstate->state, delay_us);
}
EXPORT_SYMBOL_GPL(mux_state_select_delay);
/**
* mux_control_try_select_delay() - Try to select the given multiplexer state.
* @mux: The mux-control to request a change of state from.
* @state: The new requested state.
* @delay_us: The time to delay (in microseconds) if the mux state is changed.
*
* On successfully selecting the mux-control state, it will be locked until
* mux_control_deselect() is called.
*
* Therefore, make sure to call mux_control_deselect() when the operation is
* complete and the mux-control is free for others to use, but do not call
* mux_control_deselect() if mux_control_try_select() fails.
*
* Return: 0 when the mux-control state has the requested state or a negative
* errno on error. Specifically -EBUSY if the mux-control is contended.
*/
int mux_control_try_select_delay(struct mux_control *mux, unsigned int state,
unsigned int delay_us)
{
int ret;
if (down_trylock(&mux->lock))
return -EBUSY;
ret = __mux_control_select(mux, state);
if (ret >= 0)
mux_control_delay(mux, delay_us);
if (ret < 0)
up(&mux->lock);
return ret;
}
EXPORT_SYMBOL_GPL(mux_control_try_select_delay);
/**
* mux_state_try_select_delay() - Try to select the given multiplexer state.
* @mstate: The mux-state to select.
* @delay_us: The time to delay (in microseconds) if the mux state is changed.
*
* On successfully selecting the mux-state, its mux-control will be locked
* until mux_state_deselect() is called.
*
* Therefore, make sure to call mux_state_deselect() when the operation is
* complete and the mux-control is free for others to use, but do not call
* mux_state_deselect() if mux_state_try_select() fails.
*
* Return: 0 when the mux-state has been selected or a negative errno on
* error. Specifically -EBUSY if the mux-control is contended.
*/
int mux_state_try_select_delay(struct mux_state *mstate, unsigned int delay_us)
{
return mux_control_try_select_delay(mstate->mux, mstate->state, delay_us);
}
EXPORT_SYMBOL_GPL(mux_state_try_select_delay);
/**
* mux_control_deselect() - Deselect the previously selected multiplexer state.
* @mux: The mux-control to deselect.
*
* It is required that a single call is made to mux_control_deselect() for
* each and every successful call made to either of mux_control_select() or
* mux_control_try_select().
*
* Return: 0 on success and a negative errno on error. An error can only
* occur if the mux has an idle state. Note that even if an error occurs, the
* mux-control is unlocked and is thus free for the next access.
*/
int mux_control_deselect(struct mux_control *mux)
{
int ret = 0;
if (mux->idle_state != MUX_IDLE_AS_IS &&
mux->idle_state != mux->cached_state)
ret = mux_control_set(mux, mux->idle_state);
up(&mux->lock);
return ret;
}
EXPORT_SYMBOL_GPL(mux_control_deselect);
/**
* mux_state_deselect() - Deselect the previously selected multiplexer state.
* @mstate: The mux-state to deselect.
*
* It is required that a single call is made to mux_state_deselect() for
* each and every successful call made to either of mux_state_select() or
* mux_state_try_select().
*
* Return: 0 on success and a negative errno on error. An error can only
* occur if the mux has an idle state. Note that even if an error occurs, the
* mux-control is unlocked and is thus free for the next access.
*/
int mux_state_deselect(struct mux_state *mstate)
{
return mux_control_deselect(mstate->mux);
}
EXPORT_SYMBOL_GPL(mux_state_deselect);
/* Note this function returns a reference to the mux_chip dev. */
static struct mux_chip *of_find_mux_chip_by_node(struct device_node *np)
{
struct device *dev;
dev = class_find_device_by_of_node(&mux_class, np);
return dev ? to_mux_chip(dev) : NULL;
}
/*
* mux_get() - Get the mux-control for a device.
* @dev: The device that needs a mux-control.
* @mux_name: The name identifying the mux-control.
* @state: Pointer to where the requested state is returned, or NULL when
* the required multiplexer states are handled by other means.
*
* Return: A pointer to the mux-control, or an ERR_PTR with a negative errno.
*/
static struct mux_control *mux_get(struct device *dev, const char *mux_name,
unsigned int *state)
{
struct device_node *np = dev->of_node;
struct of_phandle_args args;
struct mux_chip *mux_chip;
unsigned int controller;
int index = 0;
int ret;
if (mux_name) {
if (state)
index = of_property_match_string(np, "mux-state-names",
mux_name);
else
index = of_property_match_string(np, "mux-control-names",
mux_name);
if (index < 0) {
dev_err(dev, "mux controller '%s' not found\n",
mux_name);
return ERR_PTR(index);
}
}
if (state)
ret = of_parse_phandle_with_args(np,
"mux-states", "#mux-state-cells",
index, &args);
else
ret = of_parse_phandle_with_args(np,
"mux-controls", "#mux-control-cells",
index, &args);
if (ret) {
dev_err(dev, "%pOF: failed to get mux-%s %s(%i)\n",
np, state ? "state" : "control", mux_name ?: "", index);
return ERR_PTR(ret);
}
mux_chip = of_find_mux_chip_by_node(args.np);
of_node_put(args.np);
if (!mux_chip)
return ERR_PTR(-EPROBE_DEFER);
controller = 0;
if (state) {
if (args.args_count > 2 || args.args_count == 0 ||
(args.args_count < 2 && mux_chip->controllers > 1)) {
dev_err(dev, "%pOF: wrong #mux-state-cells for %pOF\n",
np, args.np);
put_device(&mux_chip->dev);
return ERR_PTR(-EINVAL);
}
if (args.args_count == 2) {
controller = args.args[0];
*state = args.args[1];
} else {
*state = args.args[0];
}
} else {
if (args.args_count > 1 ||
(!args.args_count && mux_chip->controllers > 1)) {
dev_err(dev, "%pOF: wrong #mux-control-cells for %pOF\n",
np, args.np);
put_device(&mux_chip->dev);
return ERR_PTR(-EINVAL);
}
if (args.args_count)
controller = args.args[0];
}
if (controller >= mux_chip->controllers) {
dev_err(dev, "%pOF: bad mux controller %u specified in %pOF\n",
np, controller, args.np);
put_device(&mux_chip->dev);
return ERR_PTR(-EINVAL);
}
return &mux_chip->mux[controller];
}
/**
* mux_control_get() - Get the mux-control for a device.
* @dev: The device that needs a mux-control.
* @mux_name: The name identifying the mux-control.
*
* Return: A pointer to the mux-control, or an ERR_PTR with a negative errno.
*/
struct mux_control *mux_control_get(struct device *dev, const char *mux_name)
{
return mux_get(dev, mux_name, NULL);
}
EXPORT_SYMBOL_GPL(mux_control_get);
/**
* mux_control_put() - Put away the mux-control for good.
* @mux: The mux-control to put away.
*
* mux_control_put() reverses the effects of mux_control_get().
*/
void mux_control_put(struct mux_control *mux)
{
put_device(&mux->chip->dev);
}
EXPORT_SYMBOL_GPL(mux_control_put);
static void devm_mux_control_release(struct device *dev, void *res)
{
struct mux_control *mux = *(struct mux_control **)res;
mux_control_put(mux);
}
/**
* devm_mux_control_get() - Get the mux-control for a device, with resource
* management.
* @dev: The device that needs a mux-control.
* @mux_name: The name identifying the mux-control.
*
* Return: Pointer to the mux-control, or an ERR_PTR with a negative errno.
*/
struct mux_control *devm_mux_control_get(struct device *dev,
const char *mux_name)
{
struct mux_control **ptr, *mux;
ptr = devres_alloc(devm_mux_control_release, sizeof(*ptr), GFP_KERNEL);
if (!ptr)
return ERR_PTR(-ENOMEM);
mux = mux_control_get(dev, mux_name);
if (IS_ERR(mux)) {
devres_free(ptr);
return mux;
}
*ptr = mux;
devres_add(dev, ptr);
return mux;
}
EXPORT_SYMBOL_GPL(devm_mux_control_get);
/*
* mux_state_get() - Get the mux-state for a device.
* @dev: The device that needs a mux-state.
* @mux_name: The name identifying the mux-state.
*
* Return: A pointer to the mux-state, or an ERR_PTR with a negative errno.
*/
static struct mux_state *mux_state_get(struct device *dev, const char *mux_name)
{
struct mux_state *mstate;
mstate = kzalloc(sizeof(*mstate), GFP_KERNEL);
if (!mstate)
return ERR_PTR(-ENOMEM);
mstate->mux = mux_get(dev, mux_name, &mstate->state);
if (IS_ERR(mstate->mux)) {
int err = PTR_ERR(mstate->mux);
kfree(mstate);
return ERR_PTR(err);
}
return mstate;
}
/*
* mux_state_put() - Put away the mux-state for good.
* @mstate: The mux-state to put away.
*
* mux_state_put() reverses the effects of mux_state_get().
*/
static void mux_state_put(struct mux_state *mstate)
{
mux_control_put(mstate->mux);
kfree(mstate);
}
static void devm_mux_state_release(struct device *dev, void *res)
{
struct mux_state *mstate = *(struct mux_state **)res;
mux_state_put(mstate);
}
/**
* devm_mux_state_get() - Get the mux-state for a device, with resource
* management.
* @dev: The device that needs a mux-control.
* @mux_name: The name identifying the mux-control.
*
* Return: Pointer to the mux-state, or an ERR_PTR with a negative errno.
*/
struct mux_state *devm_mux_state_get(struct device *dev,
const char *mux_name)
{
struct mux_state **ptr, *mstate;
ptr = devres_alloc(devm_mux_state_release, sizeof(*ptr), GFP_KERNEL);
if (!ptr)
return ERR_PTR(-ENOMEM);
mstate = mux_state_get(dev, mux_name);
if (IS_ERR(mstate)) {
devres_free(ptr);
return mstate;
}
*ptr = mstate;
devres_add(dev, ptr);
return mstate;
}
EXPORT_SYMBOL_GPL(devm_mux_state_get);
/*
* Using subsys_initcall instead of module_init here to try to ensure - for
* the non-modular case - that the subsystem is initialized when mux consumers
* and mux controllers start to use it.
* For the modular case, the ordering is ensured with module dependencies.
*/
subsys_initcall(mux_init);
module_exit(mux_exit);
MODULE_DESCRIPTION("Multiplexer subsystem");
MODULE_AUTHOR("Peter Rosin <[email protected]>");
MODULE_LICENSE("GPL v2");
|
linux-master
|
drivers/mux/core.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* MMIO register bitfield-controlled multiplexer driver
*
* Copyright (C) 2017 Pengutronix, Philipp Zabel <[email protected]>
*/
#include <linux/bitops.h>
#include <linux/err.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/mux/driver.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/regmap.h>
static int mux_mmio_set(struct mux_control *mux, int state)
{
struct regmap_field **fields = mux_chip_priv(mux->chip);
return regmap_field_write(fields[mux_control_get_index(mux)], state);
}
static const struct mux_control_ops mux_mmio_ops = {
.set = mux_mmio_set,
};
static const struct of_device_id mux_mmio_dt_ids[] = {
{ .compatible = "mmio-mux", },
{ .compatible = "reg-mux", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, mux_mmio_dt_ids);
static int mux_mmio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct regmap_field **fields;
struct mux_chip *mux_chip;
struct regmap *regmap;
int num_fields;
int ret;
int i;
if (of_device_is_compatible(np, "mmio-mux"))
regmap = syscon_node_to_regmap(np->parent);
else
regmap = dev_get_regmap(dev->parent, NULL) ?: ERR_PTR(-ENODEV);
if (IS_ERR(regmap)) {
ret = PTR_ERR(regmap);
dev_err(dev, "failed to get regmap: %d\n", ret);
return ret;
}
ret = of_property_count_u32_elems(np, "mux-reg-masks");
if (ret == 0 || ret % 2)
ret = -EINVAL;
if (ret < 0) {
dev_err(dev, "mux-reg-masks property missing or invalid: %d\n",
ret);
return ret;
}
num_fields = ret / 2;
mux_chip = devm_mux_chip_alloc(dev, num_fields, num_fields *
sizeof(*fields));
if (IS_ERR(mux_chip))
return PTR_ERR(mux_chip);
fields = mux_chip_priv(mux_chip);
for (i = 0; i < num_fields; i++) {
struct mux_control *mux = &mux_chip->mux[i];
struct reg_field field;
s32 idle_state = MUX_IDLE_AS_IS;
u32 reg, mask;
int bits;
ret = of_property_read_u32_index(np, "mux-reg-masks",
2 * i, ®);
if (!ret)
ret = of_property_read_u32_index(np, "mux-reg-masks",
2 * i + 1, &mask);
if (ret < 0) {
dev_err(dev, "bitfield %d: failed to read mux-reg-masks property: %d\n",
i, ret);
return ret;
}
field.reg = reg;
field.msb = fls(mask) - 1;
field.lsb = ffs(mask) - 1;
if (mask != GENMASK(field.msb, field.lsb)) {
dev_err(dev, "bitfield %d: invalid mask 0x%x\n",
i, mask);
return -EINVAL;
}
fields[i] = devm_regmap_field_alloc(dev, regmap, field);
if (IS_ERR(fields[i])) {
ret = PTR_ERR(fields[i]);
dev_err(dev, "bitfield %d: failed allocate: %d\n",
i, ret);
return ret;
}
bits = 1 + field.msb - field.lsb;
mux->states = 1 << bits;
of_property_read_u32_index(np, "idle-states", i,
(u32 *)&idle_state);
if (idle_state != MUX_IDLE_AS_IS) {
if (idle_state < 0 || idle_state >= mux->states) {
dev_err(dev, "bitfield: %d: out of range idle state %d\n",
i, idle_state);
return -EINVAL;
}
mux->idle_state = idle_state;
}
}
mux_chip->ops = &mux_mmio_ops;
return devm_mux_chip_register(dev, mux_chip);
}
static struct platform_driver mux_mmio_driver = {
.driver = {
.name = "mmio-mux",
.of_match_table = mux_mmio_dt_ids,
},
.probe = mux_mmio_probe,
};
module_platform_driver(mux_mmio_driver);
MODULE_DESCRIPTION("MMIO register bitfield-controlled multiplexer driver");
MODULE_AUTHOR("Philipp Zabel <[email protected]>");
MODULE_LICENSE("GPL v2");
|
linux-master
|
drivers/mux/mmio.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* TI AM33XX EMIF PM Assembly Offsets
*
* Copyright (C) 2016-2017 Texas Instruments Inc.
*/
#include <linux/ti-emif-sram.h>
int main(void)
{
ti_emif_asm_offsets();
return 0;
}
|
linux-master
|
drivers/memory/emif-asm-offsets.c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.